diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.25-26.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.25-26.patch new file mode 100644 index 0000000000..f4c60ef9d1 --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.25-26.patch @@ -0,0 +1,2603 @@ +diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt +index ce6a1a0..8a3c408 100644 +--- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt ++++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt +@@ -30,10 +30,6 @@ should only be used when a device has multiple interrupt parents. + Example: + interrupts-extended = <&intc1 5 1>, <&intc2 1 0>; + +-A device node may contain either "interrupts" or "interrupts-extended", but not +-both. If both properties are present, then the operating system should log an +-error and use only the data in "interrupts". +- + 2) Interrupt controller nodes + ----------------------------- + +diff --git a/Makefile b/Makefile +index eb96e40..63a5ee8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 25 ++SUBLEVEL = 26 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h +index 71a06b2..3e635ee 100644 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h +@@ -43,16 +43,6 @@ struct cpu_context_save { + __u32 extra[2]; /* Xscale 'acc' register, etc */ + }; + +-struct arm_restart_block { +- union { +- /* For user cache flushing */ +- struct { +- unsigned long start; +- unsigned long end; +- } cache; +- }; +-}; +- + /* + * low level task data that entry.S needs immediate access to. + * __switch_to() assumes cpu_context follows immediately after cpu_domain. +@@ -78,7 +68,6 @@ struct thread_info { + unsigned long thumbee_state; /* ThumbEE Handler Base register */ + #endif + struct restart_block restart_block; +- struct arm_restart_block arm_restart_block; + }; + + #define INIT_THREAD_INFO(tsk) \ +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c +index 9265b8b..3f31443 100644 +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -510,8 +510,6 @@ static int bad_syscall(int n, struct pt_regs *regs) + return regs->ARM_r0; + } + +-static long do_cache_op_restart(struct restart_block *); +- + static inline int + __do_cache_op(unsigned long start, unsigned long end) + { +@@ -520,24 +518,8 @@ __do_cache_op(unsigned long start, unsigned long end) + do { + unsigned long chunk = min(PAGE_SIZE, end - start); + +- if (signal_pending(current)) { +- struct thread_info *ti = current_thread_info(); +- +- ti->restart_block = (struct restart_block) { +- .fn = do_cache_op_restart, +- }; +- +- ti->arm_restart_block = (struct arm_restart_block) { +- { +- .cache = { +- .start = start, +- .end = end, +- }, +- }, +- }; +- +- return -ERESTART_RESTARTBLOCK; +- } ++ if (fatal_signal_pending(current)) ++ return 0; + + ret = flush_cache_user_range(start, start + chunk); + if (ret) +@@ -550,15 +532,6 @@ __do_cache_op(unsigned long start, unsigned long end) + return 0; + } + +-static long do_cache_op_restart(struct restart_block *unused) +-{ +- struct arm_restart_block *restart_block; +- +- restart_block = ¤t_thread_info()->arm_restart_block; +- return __do_cache_op(restart_block->cache.start, +- restart_block->cache.end); +-} +- + static inline int + do_cache_op(unsigned long start, unsigned long end, int flags) + { +diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S +index 74f6033..fdedc31 100644 +--- a/arch/arm/mm/proc-v7.S ++++ b/arch/arm/mm/proc-v7.S +@@ -211,7 +211,6 @@ __v7_pj4b_setup: + /* Auxiliary Debug Modes Control 1 Register */ + #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ + #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ +-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */ + #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ + + /* Auxiliary Debug Modes Control 2 Register */ +@@ -234,7 +233,6 @@ __v7_pj4b_setup: + /* Auxiliary Debug Modes Control 1 Register */ + mrc p15, 1, r0, c15, c1, 1 + orr r0, r0, #PJ4B_CLEAN_LINE +- orr r0, r0, #PJ4B_BCK_OFF_STREX + orr r0, r0, #PJ4B_INTER_PARITY + bic r0, r0, #PJ4B_STATIC_BP + mcr p15, 1, r0, c15, c1, 1 +diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S +index d19b1cf..b34b95f 100644 +--- a/arch/arm/mm/proc-xscale.S ++++ b/arch/arm/mm/proc-xscale.S +@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend) + mrc p15, 0, r5, c15, c1, 0 @ CP access reg + mrc p15, 0, r6, c13, c0, 0 @ PID + mrc p15, 0, r7, c3, c0, 0 @ domain ID +- mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg ++ mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg + mrc p15, 0, r9, c1, c0, 0 @ control reg + bic r4, r4, #2 @ clear frequency change bit + stmia r0, {r4 - r9} @ store cp regs +@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume) + mcr p15, 0, r6, c13, c0, 0 @ PID + mcr p15, 0, r7, c3, c0, 0 @ domain ID + mcr p15, 0, r1, c2, c0, 0 @ translation table base addr +- mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg ++ mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg + mov r0, r9 @ control register + b cpu_resume_mmu + ENDPROC(cpu_xscale_do_resume) +diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile +index 9e4484c..9005a8d6 100644 +--- a/arch/mips/loongson/common/Makefile ++++ b/arch/mips/loongson/common/Makefile +@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o + # Serial port support + # + obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +-obj-$(CONFIG_SERIAL_8250) += serial.o ++loongson-serial-$(CONFIG_SERIAL_8250) := serial.o ++obj-y += $(loongson-serial-m) $(loongson-serial-y) + obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o + obj-$(CONFIG_LOONGSON_MC146818) += rtc.o + +diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c +index 6854ed5..83a1dfd 100644 +--- a/arch/mips/oprofile/backtrace.c ++++ b/arch/mips/oprofile/backtrace.c +@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame, + /* This marks the end of the previous function, + which means we overran. */ + break; +- stack_size = (unsigned) stack_adjustment; ++ stack_size = (unsigned long) stack_adjustment; + } else if (is_ra_save_ins(&ip)) { + int ra_slot = ip.i_format.simmediate; + if (ra_slot < 0) +diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c +index beedaf0..d558b85 100644 +--- a/arch/powerpc/platforms/powernv/pci-ioda.c ++++ b/arch/powerpc/platforms/powernv/pci-ioda.c +@@ -902,7 +902,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, + unsigned int is_64, struct msi_msg *msg) + { + struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); +- struct pci_dn *pdn = pci_get_pdn(dev); + struct irq_data *idata; + struct irq_chip *ichip; + unsigned int xive_num = hwirq - phb->msi_base; +@@ -918,7 +917,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, + return -ENXIO; + + /* Force 32-bit MSI on some broken devices */ +- if (pdn && pdn->force_32bit_msi) ++ if (dev->no_64bit_msi) + is_64 = 0; + + /* Assign XIVE to PE */ +diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c +index 8518817..52c1162 100644 +--- a/arch/powerpc/platforms/powernv/pci.c ++++ b/arch/powerpc/platforms/powernv/pci.c +@@ -1,3 +1,4 @@ ++ + /* + * Support PCI/PCIe on PowerNV platforms + * +@@ -50,9 +51,8 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type) + { + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + struct pnv_phb *phb = hose->private_data; +- struct pci_dn *pdn = pci_get_pdn(pdev); + +- if (pdn && pdn->force_32bit_msi && !phb->msi32_support) ++ if (pdev->no_64bit_msi && !phb->msi32_support) + return -ENODEV; + + return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV; +diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c +index 0c882e8..6849d85 100644 +--- a/arch/powerpc/platforms/pseries/msi.c ++++ b/arch/powerpc/platforms/pseries/msi.c +@@ -428,7 +428,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) + */ + again: + if (type == PCI_CAP_ID_MSI) { +- if (pdn->force_32bit_msi) { ++ if (pdev->no_64bit_msi) { + rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); + if (rc < 0) { + /* +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c +index b079098..bc5fbc2 100644 +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -288,10 +288,10 @@ static inline void disable_surveillance(void) + args.token = rtas_token("set-indicator"); + if (args.token == RTAS_UNKNOWN_SERVICE) + return; +- args.nargs = 3; +- args.nret = 1; ++ args.nargs = cpu_to_be32(3); ++ args.nret = cpu_to_be32(1); + args.rets = &args.args[3]; +- args.args[0] = SURVEILLANCE_TOKEN; ++ args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN); + args.args[1] = 0; + args.args[2] = 0; + enter_rtas(__pa(&args)); +diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h +index a34ad07..4c7c12d 100644 +--- a/arch/sparc/include/uapi/asm/swab.h ++++ b/arch/sparc/include/uapi/asm/swab.h +@@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr) + { + __u16 ret; + +- __asm__ __volatile__ ("lduha [%1] %2, %0" ++ __asm__ __volatile__ ("lduha [%2] %3, %0" + : "=r" (ret) +- : "r" (addr), "i" (ASI_PL)); ++ : "m" (*addr), "r" (addr), "i" (ASI_PL)); + return ret; + } + #define __arch_swab16p __arch_swab16p +@@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr) + { + __u32 ret; + +- __asm__ __volatile__ ("lduwa [%1] %2, %0" ++ __asm__ __volatile__ ("lduwa [%2] %3, %0" + : "=r" (ret) +- : "r" (addr), "i" (ASI_PL)); ++ : "m" (*addr), "r" (addr), "i" (ASI_PL)); + return ret; + } + #define __arch_swab32p __arch_swab32p +@@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr) + { + __u64 ret; + +- __asm__ __volatile__ ("ldxa [%1] %2, %0" ++ __asm__ __volatile__ ("ldxa [%2] %3, %0" + : "=r" (ret) +- : "r" (addr), "i" (ASI_PL)); ++ : "m" (*addr), "r" (addr), "i" (ASI_PL)); + return ret; + } + #define __arch_swab64p __arch_swab64p +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h +index 5f12968..1717156 100644 +--- a/arch/x86/include/asm/cpufeature.h ++++ b/arch/x86/include/asm/cpufeature.h +@@ -203,6 +203,7 @@ + #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ + #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ + #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ ++#define X86_FEATURE_VMMCALL (8*32+15) /* Prefer vmmcall to vmcall */ + + + /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ +diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h +index c7678e4..e62cf89 100644 +--- a/arch/x86/include/asm/kvm_para.h ++++ b/arch/x86/include/asm/kvm_para.h +@@ -2,6 +2,7 @@ + #define _ASM_X86_KVM_PARA_H + + #include ++#include + #include + + extern void kvmclock_init(void); +@@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void) + } + #endif /* CONFIG_KVM_GUEST */ + +-/* This instruction is vmcall. On non-VT architectures, it will generate a +- * trap that we will then rewrite to the appropriate instruction. ++#ifdef CONFIG_DEBUG_RODATA ++#define KVM_HYPERCALL \ ++ ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL) ++#else ++/* On AMD processors, vmcall will generate a trap that we will ++ * then rewrite to the appropriate instruction. + */ + #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" ++#endif + + /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall + * instruction. The hypervisor may replace it with something else but only the +diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h +index f48b17d..3a52ee0 100644 +--- a/arch/x86/include/asm/page_32_types.h ++++ b/arch/x86/include/asm/page_32_types.h +@@ -20,7 +20,6 @@ + #define THREAD_SIZE_ORDER 1 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) + +-#define STACKFAULT_STACK 0 + #define DOUBLEFAULT_STACK 1 + #define NMI_STACK 0 + #define DEBUG_STACK 0 +diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h +index 8de6d9c..d54d1ee 100644 +--- a/arch/x86/include/asm/page_64_types.h ++++ b/arch/x86/include/asm/page_64_types.h +@@ -14,12 +14,11 @@ + #define IRQ_STACK_ORDER 2 + #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) + +-#define STACKFAULT_STACK 1 +-#define DOUBLEFAULT_STACK 2 +-#define NMI_STACK 3 +-#define DEBUG_STACK 4 +-#define MCE_STACK 5 +-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ ++#define DOUBLEFAULT_STACK 1 ++#define NMI_STACK 2 ++#define DEBUG_STACK 3 ++#define MCE_STACK 4 ++#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */ + + #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) + #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h +index e1940c0..e870ea9 100644 +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h +@@ -144,7 +144,7 @@ struct thread_info { + /* Only used for 64 bit */ + #define _TIF_DO_NOTIFY_MASK \ + (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ +- _TIF_USER_RETURN_NOTIFY) ++ _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE) + + /* flags to check in __switch_to() */ + #define _TIF_WORK_CTXSW \ +diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h +index 58d66fe..b409b17 100644 +--- a/arch/x86/include/asm/traps.h ++++ b/arch/x86/include/asm/traps.h +@@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void); + + #ifdef CONFIG_TRACING + asmlinkage void trace_page_fault(void); ++#define trace_stack_segment stack_segment + #define trace_divide_error divide_error + #define trace_bounds bounds + #define trace_invalid_op invalid_op +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index c67ffa6..c005fdd 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -508,6 +508,13 @@ static void early_init_amd(struct cpuinfo_x86 *c) + } + #endif + ++ /* ++ * This is only needed to tell the kernel whether to use VMCALL ++ * and VMMCALL. VMMCALL is never executed except under virt, so ++ * we can set it unconditionally. ++ */ ++ set_cpu_cap(c, X86_FEATURE_VMMCALL); ++ + /* F16h erratum 793, CVE-2013-6885 */ + if (c->x86 == 0x16 && c->x86_model <= 0xf) { + u64 val; +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 3f27f5fd..e6bddd5 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -144,6 +144,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); + + static int __init x86_xsave_setup(char *s) + { ++ if (strlen(s)) ++ return 0; + setup_clear_cpu_cap(X86_FEATURE_XSAVE); + setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + setup_clear_cpu_cap(X86_FEATURE_AVX); +diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c +index addb207..66e274a 100644 +--- a/arch/x86/kernel/dumpstack_64.c ++++ b/arch/x86/kernel/dumpstack_64.c +@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = { + [ DEBUG_STACK-1 ] = "#DB", + [ NMI_STACK-1 ] = "NMI", + [ DOUBLEFAULT_STACK-1 ] = "#DF", +- [ STACKFAULT_STACK-1 ] = "#SS", + [ MCE_STACK-1 ] = "#MC", + #if DEBUG_STKSZ > EXCEPTION_STKSZ + [ N_EXCEPTION_STACKS ... +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index 03cd2a8..02553d6 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -1053,9 +1053,15 @@ ENTRY(native_iret) + jnz native_irq_return_ldt + #endif + ++.global native_irq_return_iret + native_irq_return_iret: ++ /* ++ * This may fault. Non-paranoid faults on return to userspace are ++ * handled by fixup_bad_iret. These include #SS, #GP, and #NP. ++ * Double-faults due to espfix64 are handled in do_double_fault. ++ * Other faults here are fatal. ++ */ + iretq +- _ASM_EXTABLE(native_irq_return_iret, bad_iret) + + #ifdef CONFIG_X86_ESPFIX64 + native_irq_return_ldt: +@@ -1083,25 +1089,6 @@ native_irq_return_ldt: + jmp native_irq_return_iret + #endif + +- .section .fixup,"ax" +-bad_iret: +- /* +- * The iret traps when the %cs or %ss being restored is bogus. +- * We've lost the original trap vector and error code. +- * #GPF is the most likely one to get for an invalid selector. +- * So pretend we completed the iret and took the #GPF in user mode. +- * +- * We are now running with the kernel GS after exception recovery. +- * But error_entry expects us to have user GS to match the user %cs, +- * so swap back. +- */ +- pushq $0 +- +- SWAPGS +- jmp general_protection +- +- .previous +- + /* edi: workmask, edx: work */ + retint_careful: + CFI_RESTORE_STATE +@@ -1147,37 +1134,6 @@ ENTRY(retint_kernel) + CFI_ENDPROC + END(common_interrupt) + +- /* +- * If IRET takes a fault on the espfix stack, then we +- * end up promoting it to a doublefault. In that case, +- * modify the stack to make it look like we just entered +- * the #GP handler from user space, similar to bad_iret. +- */ +-#ifdef CONFIG_X86_ESPFIX64 +- ALIGN +-__do_double_fault: +- XCPT_FRAME 1 RDI+8 +- movq RSP(%rdi),%rax /* Trap on the espfix stack? */ +- sarq $PGDIR_SHIFT,%rax +- cmpl $ESPFIX_PGD_ENTRY,%eax +- jne do_double_fault /* No, just deliver the fault */ +- cmpl $__KERNEL_CS,CS(%rdi) +- jne do_double_fault +- movq RIP(%rdi),%rax +- cmpq $native_irq_return_iret,%rax +- jne do_double_fault /* This shouldn't happen... */ +- movq PER_CPU_VAR(kernel_stack),%rax +- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ +- movq %rax,RSP(%rdi) +- movq $0,(%rax) /* Missing (lost) #GP error code */ +- movq $general_protection,RIP(%rdi) +- retq +- CFI_ENDPROC +-END(__do_double_fault) +-#else +-# define __do_double_fault do_double_fault +-#endif +- + /* + * End of kprobes section + */ +@@ -1379,7 +1335,7 @@ zeroentry overflow do_overflow + zeroentry bounds do_bounds + zeroentry invalid_op do_invalid_op + zeroentry device_not_available do_device_not_available +-paranoiderrorentry double_fault __do_double_fault ++paranoiderrorentry double_fault do_double_fault + zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun + errorentry invalid_TSS do_invalid_TSS + errorentry segment_not_present do_segment_not_present +@@ -1549,7 +1505,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ + + paranoidzeroentry_ist debug do_debug DEBUG_STACK + paranoidzeroentry_ist int3 do_int3 DEBUG_STACK +-paranoiderrorentry stack_segment do_stack_segment ++errorentry stack_segment do_stack_segment + #ifdef CONFIG_XEN + zeroentry xen_debug do_debug + zeroentry xen_int3 do_int3 +@@ -1659,16 +1615,15 @@ error_sti: + + /* + * There are two places in the kernel that can potentially fault with +- * usergs. Handle them here. The exception handlers after iret run with +- * kernel gs again, so don't set the user space flag. B stepping K8s +- * sometimes report an truncated RIP for IRET exceptions returning to +- * compat mode. Check for these here too. ++ * usergs. Handle them here. B stepping K8s sometimes report a ++ * truncated RIP for IRET exceptions returning to compat mode. Check ++ * for these here too. + */ + error_kernelspace: + incl %ebx + leaq native_irq_return_iret(%rip),%rcx + cmpq %rcx,RIP+8(%rsp) +- je error_swapgs ++ je error_bad_iret + movl %ecx,%eax /* zero extend */ + cmpq %rax,RIP+8(%rsp) + je bstep_iret +@@ -1679,7 +1634,15 @@ error_kernelspace: + bstep_iret: + /* Fix truncated RIP */ + movq %rcx,RIP+8(%rsp) +- jmp error_swapgs ++ /* fall through */ ++ ++error_bad_iret: ++ SWAPGS ++ mov %rsp,%rdi ++ call fixup_bad_iret ++ mov %rax,%rsp ++ decl %ebx /* Return to usergs */ ++ jmp error_sti + CFI_ENDPROC + END(error_entry) + +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 57409f6..f9d976e 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -218,32 +218,40 @@ DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL + DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun ) + DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS ) + DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present ) +-#ifdef CONFIG_X86_32 + DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment ) +-#endif + DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 ) + + #ifdef CONFIG_X86_64 + /* Runs on IST stack */ +-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) +-{ +- enum ctx_state prev_state; +- +- prev_state = exception_enter(); +- if (notify_die(DIE_TRAP, "stack segment", regs, error_code, +- X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { +- preempt_conditional_sti(regs); +- do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); +- preempt_conditional_cli(regs); +- } +- exception_exit(prev_state); +-} +- + dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) + { + static const char str[] = "double fault"; + struct task_struct *tsk = current; + ++#ifdef CONFIG_X86_ESPFIX64 ++ extern unsigned char native_irq_return_iret[]; ++ ++ /* ++ * If IRET takes a non-IST fault on the espfix64 stack, then we ++ * end up promoting it to a doublefault. In that case, modify ++ * the stack to make it look like we just entered the #GP ++ * handler from user space, similar to bad_iret. ++ */ ++ if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && ++ regs->cs == __KERNEL_CS && ++ regs->ip == (unsigned long)native_irq_return_iret) ++ { ++ struct pt_regs *normal_regs = task_pt_regs(current); ++ ++ /* Fake a #GP(0) from userspace. */ ++ memmove(&normal_regs->ip, (void *)regs->sp, 5*8); ++ normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ ++ regs->ip = (unsigned long)general_protection; ++ regs->sp = (unsigned long)&normal_regs->orig_ax; ++ return; ++ } ++#endif ++ + exception_enter(); + /* Return not checked because double check cannot be ignored */ + notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); +@@ -376,6 +384,35 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) + *regs = *eregs; + return regs; + } ++ ++struct bad_iret_stack { ++ void *error_entry_ret; ++ struct pt_regs regs; ++}; ++ ++asmlinkage __visible ++struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) ++{ ++ /* ++ * This is called from entry_64.S early in handling a fault ++ * caused by a bad iret to user mode. To handle the fault ++ * correctly, we want move our stack frame to task_pt_regs ++ * and we want to pretend that the exception came from the ++ * iret target. ++ */ ++ struct bad_iret_stack *new_stack = ++ container_of(task_pt_regs(current), ++ struct bad_iret_stack, regs); ++ ++ /* Copy the IRET target to the new stack. */ ++ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); ++ ++ /* Copy the remainder of the stack from the current stack. */ ++ memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); ++ ++ BUG_ON(!user_mode_vm(&new_stack->regs)); ++ return new_stack; ++} + #endif + + /* +@@ -748,7 +785,7 @@ void __init trap_init(void) + set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); + set_intr_gate(X86_TRAP_TS, invalid_TSS); + set_intr_gate(X86_TRAP_NP, segment_not_present); +- set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); ++ set_intr_gate(X86_TRAP_SS, stack_segment); + set_intr_gate(X86_TRAP_GP, general_protection); + set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); + set_intr_gate(X86_TRAP_MF, coprocessor_error); +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index f35c66c..2308a40 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -1110,7 +1110,7 @@ void mark_rodata_ro(void) + unsigned long end = (unsigned long) &__end_rodata_hpage_align; + unsigned long text_end = PFN_ALIGN(&__stop___ex_table); + unsigned long rodata_end = PFN_ALIGN(&__end_rodata); +- unsigned long all_end = PFN_ALIGN(&_end); ++ unsigned long all_end; + + printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", + (end - start) >> 10); +@@ -1121,7 +1121,16 @@ void mark_rodata_ro(void) + /* + * The rodata/data/bss/brk section (but not the kernel text!) + * should also be not-executable. ++ * ++ * We align all_end to PMD_SIZE because the existing mapping ++ * is a full PMD. If we would align _brk_end to PAGE_SIZE we ++ * split the PMD and the reminder between _brk_end and the end ++ * of the PMD will remain mapped executable. ++ * ++ * Any PMD which was setup after the one which covers _brk_end ++ * has been zapped already via cleanup_highmem(). + */ ++ all_end = roundup((unsigned long)_brk_end, PMD_SIZE); + set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); + + rodata_test(); +diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl +index 0b0b124..23210ba 100644 +--- a/arch/x86/tools/calc_run_size.pl ++++ b/arch/x86/tools/calc_run_size.pl +@@ -19,7 +19,16 @@ while (<>) { + if ($file_offset == 0) { + $file_offset = $offset; + } elsif ($file_offset != $offset) { +- die ".bss and .brk lack common file offset\n"; ++ # BFD linker shows the same file offset in ELF. ++ # Gold linker shows them as consecutive. ++ next if ($file_offset + $mem_size == $offset + $size); ++ ++ printf STDERR "file_offset: 0x%lx\n", $file_offset; ++ printf STDERR "mem_size: 0x%lx\n", $mem_size; ++ printf STDERR "offset: 0x%lx\n", $offset; ++ printf STDERR "size: 0x%lx\n", $size; ++ ++ die ".bss and .brk are non-contiguous\n"; + } + } + } +diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c +index bf497af..7d19f86 100644 +--- a/drivers/clocksource/sun4i_timer.c ++++ b/drivers/clocksource/sun4i_timer.c +@@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node) + /* Make sure timer is stopped before playing with interrupts */ + sun4i_clkevt_time_stop(0); + ++ sun4i_clockevent.cpumask = cpu_possible_mask; ++ sun4i_clockevent.irq = irq; ++ ++ clockevents_config_and_register(&sun4i_clockevent, rate, ++ TIMER_SYNC_TICKS, 0xffffffff); ++ + ret = setup_irq(irq, &sun4i_timer_irq); + if (ret) + pr_warn("failed to setup irq %d\n", irq); +@@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node) + /* Enable timer0 interrupt */ + val = readl(timer_base + TIMER_IRQ_EN_REG); + writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); +- +- sun4i_clockevent.cpumask = cpu_possible_mask; +- sun4i_clockevent.irq = irq; +- +- clockevents_config_and_register(&sun4i_clockevent, rate, +- TIMER_SYNC_TICKS, 0xffffffff); + } + CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer", + sun4i_timer_init); +diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c +index 813db8d..3334f91 100644 +--- a/drivers/gpu/drm/radeon/r600_dpm.c ++++ b/drivers/gpu/drm/radeon/r600_dpm.c +@@ -1209,7 +1209,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = +- ppt->usMaximumPowerDeliveryLimit; ++ le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); + pt = &ppt->power_tune_table; + } else { + ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c +index 089c9ff..b3f0293 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c +@@ -202,6 +202,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev) + if (rdev->flags & RADEON_IS_AGP) + return false; + ++ /* ++ * Older chips have a HW limitation, they can only generate 40 bits ++ * of address for "64-bit" MSIs which breaks on some platforms, notably ++ * IBM POWER servers, so we limit them ++ */ ++ if (rdev->family < CHIP_BONAIRE) { ++ dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n"); ++ rdev->pdev->no_64bit_msi = 1; ++ } ++ + /* force MSI on */ + if (radeon_msi == 1) + return true; +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index c5c194c..a96cfc3 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -112,9 +112,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) + attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; + /* + * FIXME: Use devattr.max_sge - 2 for max_send_sge as +- * work-around for RDMA_READ.. ++ * work-around for RDMA_READs with ConnectX-2. ++ * ++ * Also, still make sure to have at least two SGEs for ++ * outgoing control PDU responses. + */ +- attr.cap.max_send_sge = device->dev_attr.max_sge - 2; ++ attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); + isert_conn->max_sge = attr.cap.max_send_sge; + + attr.cap.max_recv_sge = 1; +@@ -220,12 +223,16 @@ isert_create_device_ib_res(struct isert_device *device) + struct isert_cq_desc *cq_desc; + struct ib_device_attr *dev_attr; + int ret = 0, i, j; ++ int max_rx_cqe, max_tx_cqe; + + dev_attr = &device->dev_attr; + ret = isert_query_device(ib_dev, dev_attr); + if (ret) + return ret; + ++ max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); ++ max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); ++ + /* asign function handlers */ + if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + device->use_fastreg = 1; +@@ -261,7 +268,7 @@ isert_create_device_ib_res(struct isert_device *device) + isert_cq_rx_callback, + isert_cq_event_callback, + (void *)&cq_desc[i], +- ISER_MAX_RX_CQ_LEN, i); ++ max_rx_cqe, i); + if (IS_ERR(device->dev_rx_cq[i])) { + ret = PTR_ERR(device->dev_rx_cq[i]); + device->dev_rx_cq[i] = NULL; +@@ -273,7 +280,7 @@ isert_create_device_ib_res(struct isert_device *device) + isert_cq_tx_callback, + isert_cq_event_callback, + (void *)&cq_desc[i], +- ISER_MAX_TX_CQ_LEN, i); ++ max_tx_cqe, i); + if (IS_ERR(device->dev_tx_cq[i])) { + ret = PTR_ERR(device->dev_tx_cq[i]); + device->dev_tx_cq[i] = NULL; +@@ -718,14 +725,25 @@ wake_up: + complete(&isert_conn->conn_wait); + } + +-static void ++static int + isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) + { +- struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; ++ struct isert_conn *isert_conn; ++ ++ if (!cma_id->qp) { ++ struct isert_np *isert_np = cma_id->context; ++ ++ isert_np->np_cm_id = NULL; ++ return -1; ++ } ++ ++ isert_conn = (struct isert_conn *)cma_id->context; + + isert_conn->disconnect = disconnect; + INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); + schedule_work(&isert_conn->conn_logout_work); ++ ++ return 0; + } + + static int +@@ -740,6 +758,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + switch (event->event) { + case RDMA_CM_EVENT_CONNECT_REQUEST: + ret = isert_connect_request(cma_id, event); ++ if (ret) ++ pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", ++ event->event, ret); + break; + case RDMA_CM_EVENT_ESTABLISHED: + isert_connected_handler(cma_id); +@@ -749,7 +770,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ + disconnect = true; + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ +- isert_disconnected_handler(cma_id, disconnect); ++ ret = isert_disconnected_handler(cma_id, disconnect); + break; + case RDMA_CM_EVENT_CONNECT_ERROR: + default: +@@ -757,12 +778,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + break; + } + +- if (ret != 0) { +- pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", +- event->event, ret); +- dump_stack(); +- } +- + return ret; + } + +@@ -970,7 +985,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, + } + if (!login->login_failed) { + if (login->login_complete) { +- if (isert_conn->conn_device->use_fastreg) { ++ if (!conn->sess->sess_ops->SessionType && ++ isert_conn->conn_device->use_fastreg) { + ret = isert_conn_create_fastreg_pool(isert_conn); + if (ret) { + pr_err("Conn: %p failed to create" +@@ -1937,7 +1953,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + isert_cmd->tx_desc.num_sge = 2; + } + +- isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); ++ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); + + pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + +@@ -2456,7 +2472,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_conn, isert_cmd, +- &isert_cmd->tx_desc.send_wr, true); ++ &isert_cmd->tx_desc.send_wr, false); + + atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + +@@ -2768,7 +2784,8 @@ isert_free_np(struct iscsi_np *np) + { + struct isert_np *isert_np = (struct isert_np *)np->np_context; + +- rdma_destroy_id(isert_np->np_cm_id); ++ if (isert_np->np_cm_id) ++ rdma_destroy_id(isert_np->np_cm_id); + + np->np_context = NULL; + kfree(isert_np); +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c +index d1078ce..0097b8d 100644 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c +@@ -2091,6 +2091,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) + if (!qp_init) + goto out; + ++retry: + ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, + ch->rq_size + srp_sq_size, 0); + if (IS_ERR(ch->cq)) { +@@ -2114,6 +2115,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) + ch->qp = ib_create_qp(sdev->pd, qp_init); + if (IS_ERR(ch->qp)) { + ret = PTR_ERR(ch->qp); ++ if (ret == -ENOMEM) { ++ srp_sq_size /= 2; ++ if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { ++ ib_destroy_cq(ch->cq); ++ goto retry; ++ } ++ } + printk(KERN_ERR "failed to create_qp ret= %d\n", ret); + goto err_destroy_cq; + } +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 603fe0d..517829f 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -1003,9 +1003,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id + } + + ep_irq_in = &intf->cur_altsetting->endpoint[1].desc; +- usb_fill_bulk_urb(xpad->bulk_out, udev, +- usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress), +- xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad); ++ if (usb_endpoint_is_bulk_out(ep_irq_in)) { ++ usb_fill_bulk_urb(xpad->bulk_out, udev, ++ usb_sndbulkpipe(udev, ++ ep_irq_in->bEndpointAddress), ++ xpad->bdata, XPAD_PKT_LEN, ++ xpad_bulk_out, xpad); ++ } else { ++ usb_fill_int_urb(xpad->bulk_out, udev, ++ usb_sndintpipe(udev, ++ ep_irq_in->bEndpointAddress), ++ xpad->bdata, XPAD_PKT_LEN, ++ xpad_bulk_out, xpad, 0); ++ } + + /* + * Submit the int URB immediately rather than waiting for open +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index 1e76eb8..a3769cf 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -140,6 +140,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = { + (const char * const []){"LEN2001", NULL}, + 1024, 5022, 2508, 4832 + }, ++ { ++ (const char * const []){"LEN2006", NULL}, ++ 1264, 5675, 1171, 4688 ++ }, + { } + }; + +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index cc38948..1537982 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -2450,9 +2450,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) + if (!rtnl_trylock()) + goto re_arm; + +- if (slave_state_changed) { ++ if (slave_state_changed) + bond_slave_state_change(bond); +- } else if (do_failover) { ++ if (do_failover) { + /* the bond_select_active_slave must hold RTNL + * and curr_slave_lock for write. + */ +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index fc59bc6..cc11f7f 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -384,7 +384,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx) + BUG_ON(idx >= priv->echo_skb_max); + + if (priv->echo_skb[idx]) { +- kfree_skb(priv->echo_skb[idx]); ++ dev_kfree_skb_any(priv->echo_skb[idx]); + priv->echo_skb[idx] = NULL; + } + } +diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c +index 7fbe859..f34f7fa 100644 +--- a/drivers/net/can/usb/esd_usb2.c ++++ b/drivers/net/can/usb/esd_usb2.c +@@ -1141,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf) + } + } + unlink_all_urbs(dev); ++ kfree(dev); + } + } + +diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c +index bf0d55e..6adbef8 100644 +--- a/drivers/net/ieee802154/fakehard.c ++++ b/drivers/net/ieee802154/fakehard.c +@@ -376,17 +376,20 @@ static int ieee802154fake_probe(struct platform_device *pdev) + + err = wpan_phy_register(phy); + if (err) +- goto out; ++ goto err_phy_reg; + + err = register_netdev(dev); +- if (err < 0) +- goto out; ++ if (err) ++ goto err_netdev_reg; + + dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); + return 0; + +-out: +- unregister_netdev(dev); ++err_netdev_reg: ++ wpan_phy_unregister(phy); ++err_phy_reg: ++ free_netdev(dev); ++ wpan_phy_free(phy); + return err; + } + +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c +index 1aff970..1dc628f 100644 +--- a/drivers/net/ppp/pptp.c ++++ b/drivers/net/ppp/pptp.c +@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, + int len = sizeof(struct sockaddr_pppox); + struct sockaddr_pppox sp; + +- sp.sa_family = AF_PPPOX; ++ memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); ++ ++ sp.sa_family = AF_PPPOX; + sp.sa_protocol = PX_PROTO_PPTP; + sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; + +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index d510f1d..db21af8 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -769,6 +769,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ ++ {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ + + /* 4. Gobi 1000 devices */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c +index 09facba..390c2de 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c ++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c +@@ -647,6 +647,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah) + ah->enabled_cals |= TX_CL_CAL; + else + ah->enabled_cals &= ~TX_CL_CAL; ++ ++ if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) { ++ if (ah->is_clk_25mhz) { ++ REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); ++ REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); ++ REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); ++ } else { ++ REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); ++ REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); ++ REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); ++ } ++ udelay(100); ++ } + } + + static void ar9003_hw_prog_ini(struct ath_hw *ah, +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c +index 9078a6c..dcc1494 100644 +--- a/drivers/net/wireless/ath/ath9k/hw.c ++++ b/drivers/net/wireless/ath/ath9k/hw.c +@@ -858,19 +858,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, + udelay(RTC_PLL_SETTLE_DELAY); + + REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); +- +- if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { +- if (ah->is_clk_25mhz) { +- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); +- REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); +- REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); +- } else { +- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); +- REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); +- REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); +- } +- udelay(100); +- } + } + + static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, +diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c +index 5642ccc..22d49d5 100644 +--- a/drivers/net/wireless/rt2x00/rt2x00queue.c ++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c +@@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb) + skb_trim(skb, frame_length); + } + +-void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) ++/* ++ * H/W needs L2 padding between the header and the paylod if header size ++ * is not 4 bytes aligned. ++ */ ++void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) + { +- unsigned int payload_length = skb->len - header_length; +- unsigned int header_align = ALIGN_SIZE(skb, 0); +- unsigned int payload_align = ALIGN_SIZE(skb, header_length); +- unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; ++ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; + +- /* +- * Adjust the header alignment if the payload needs to be moved more +- * than the header. +- */ +- if (payload_align > header_align) +- header_align += 4; +- +- /* There is nothing to do if no alignment is needed */ +- if (!header_align) ++ if (!l2pad) + return; + +- /* Reserve the amount of space needed in front of the frame */ +- skb_push(skb, header_align); +- +- /* +- * Move the header. +- */ +- memmove(skb->data, skb->data + header_align, header_length); +- +- /* Move the payload, if present and if required */ +- if (payload_length && payload_align) +- memmove(skb->data + header_length + l2pad, +- skb->data + header_length + l2pad + payload_align, +- payload_length); +- +- /* Trim the skb to the correct size */ +- skb_trim(skb, header_length + l2pad + payload_length); ++ skb_push(skb, l2pad); ++ memmove(skb->data, skb->data + l2pad, hdr_len); + } + +-void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) ++void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) + { +- /* +- * L2 padding is only present if the skb contains more than just the +- * IEEE 802.11 header. +- */ +- unsigned int l2pad = (skb->len > header_length) ? +- L2PAD_SIZE(header_length) : 0; ++ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; + + if (!l2pad) + return; + +- memmove(skb->data + l2pad, skb->data, header_length); ++ memmove(skb->data + l2pad, skb->data, hdr_len); + skb_pull(skb, l2pad); + } + +diff --git a/drivers/of/address.c b/drivers/of/address.c +index 1a54f1f..005c657 100644 +--- a/drivers/of/address.c ++++ b/drivers/of/address.c +@@ -401,6 +401,21 @@ static struct of_bus *of_match_bus(struct device_node *np) + return NULL; + } + ++static int of_empty_ranges_quirk(void) ++{ ++ if (IS_ENABLED(CONFIG_PPC)) { ++ /* To save cycles, we cache the result */ ++ static int quirk_state = -1; ++ ++ if (quirk_state < 0) ++ quirk_state = ++ of_machine_is_compatible("Power Macintosh") || ++ of_machine_is_compatible("MacRISC"); ++ return quirk_state; ++ } ++ return false; ++} ++ + static int of_translate_one(struct device_node *parent, struct of_bus *bus, + struct of_bus *pbus, __be32 *addr, + int na, int ns, int pna, const char *rprop) +@@ -426,12 +441,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, + * This code is only enabled on powerpc. --gcl + */ + ranges = of_get_property(parent, rprop, &rlen); +-#if !defined(CONFIG_PPC) +- if (ranges == NULL) { ++ if (ranges == NULL && !of_empty_ranges_quirk()) { + pr_err("OF: no ranges; cannot translate\n"); + return 1; + } +-#endif /* !defined(CONFIG_PPC) */ + if (ranges == NULL || rlen == 0) { + offset = of_read_number(addr, na); + memset(addr, 0, pna * 4); +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index fb02fc2..ced17f2 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -599,6 +599,20 @@ error_attrs: + return ret; + } + ++static int msi_verify_entries(struct pci_dev *dev) ++{ ++ struct msi_desc *entry; ++ ++ list_for_each_entry(entry, &dev->msi_list, list) { ++ if (!dev->no_64bit_msi || !entry->msg.address_hi) ++ continue; ++ dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" ++ " tried to assign one above 4G\n"); ++ return -EIO; ++ } ++ return 0; ++} ++ + /** + * msi_capability_init - configure device's MSI capability structure + * @dev: pointer to the pci_dev data structure of MSI device function +@@ -652,6 +666,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) + return ret; + } + ++ ret = msi_verify_entries(dev); ++ if (ret) { ++ msi_mask_irq(entry, mask, ~mask); ++ free_msi_irqs(dev); ++ return ret; ++ } ++ + ret = populate_msi_sysfs(dev); + if (ret) { + msi_mask_irq(entry, mask, ~mask); +@@ -767,6 +788,11 @@ static int msix_capability_init(struct pci_dev *dev, + if (ret) + goto out_avail; + ++ /* Check if all MSI entries honor device restrictions */ ++ ret = msi_verify_entries(dev); ++ if (ret) ++ goto out_free; ++ + /* + * Some devices require MSI-X to be enabled before we can touch the + * MSI-X registers. We need to mask all the vectors to prevent +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 6e34498..34dff3a 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -395,15 +395,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) + { + struct pci_dev *dev = child->self; + u16 mem_base_lo, mem_limit_lo; +- unsigned long base, limit; ++ u64 base64, limit64; ++ dma_addr_t base, limit; + struct pci_bus_region region; + struct resource *res; + + res = child->resource[2]; + pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); + pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); +- base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; +- limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; ++ base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; ++ limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; + + if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { + u32 mem_base_hi, mem_limit_hi; +@@ -417,18 +418,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) + * this, just assume they are not being used. + */ + if (mem_base_hi <= mem_limit_hi) { +-#if BITS_PER_LONG == 64 +- base |= ((unsigned long) mem_base_hi) << 32; +- limit |= ((unsigned long) mem_limit_hi) << 32; +-#else +- if (mem_base_hi || mem_limit_hi) { +- dev_err(&dev->dev, "can't handle 64-bit " +- "address space for bridge\n"); +- return; +- } +-#endif ++ base64 |= (u64) mem_base_hi << 32; ++ limit64 |= (u64) mem_limit_hi << 32; + } + } ++ ++ base = (dma_addr_t) base64; ++ limit = (dma_addr_t) limit64; ++ ++ if (base != base64) { ++ dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", ++ (unsigned long long) base64); ++ return; ++ } ++ + if (base <= limit) { + res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | + IORESOURCE_MEM | IORESOURCE_PREFETCH; +diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +index 9b94850..cc6b13b 100644 +--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c ++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +@@ -411,6 +411,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, + struct fc_frame_header *fh; + struct fcoe_rcv_info *fr; + struct fcoe_percpu_s *bg; ++ struct sk_buff *tmp_skb; + unsigned short oxid; + + interface = container_of(ptype, struct bnx2fc_interface, +@@ -423,6 +424,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, + goto err; + } + ++ tmp_skb = skb_share_check(skb, GFP_ATOMIC); ++ if (!tmp_skb) ++ goto err; ++ ++ skb = tmp_skb; ++ + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { + printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); + goto err; +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c +index 49014a1..c1d04d4 100644 +--- a/drivers/scsi/scsi_devinfo.c ++++ b/drivers/scsi/scsi_devinfo.c +@@ -202,6 +202,7 @@ static struct { + {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, + {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, + {"INSITE", "I325VM", NULL, BLIST_KEY}, ++ {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, + {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, + {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, + {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index e63d270..e543b80 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -394,9 +394,6 @@ static void pump_transfers(unsigned long data) + chip = dws->cur_chip; + spi = message->spi; + +- if (unlikely(!chip->clk_div)) +- chip->clk_div = dws->max_freq / chip->speed_hz; +- + if (message->state == ERROR_STATE) { + message->status = -EIO; + goto early_exit; +@@ -437,7 +434,7 @@ static void pump_transfers(unsigned long data) + if (transfer->speed_hz) { + speed = chip->speed_hz; + +- if (transfer->speed_hz != speed) { ++ if ((transfer->speed_hz != speed) || (!chip->clk_div)) { + speed = transfer->speed_hz; + if (speed > dws->max_freq) { + printk(KERN_ERR "MRST SPI0: unsupported" +@@ -659,7 +656,6 @@ static int dw_spi_setup(struct spi_device *spi) + dev_err(&spi->dev, "No max speed HZ parameter\n"); + return -EINVAL; + } +- chip->speed_hz = spi->max_speed_hz; + + chip->tmode = 0; /* Tx & Rx */ + /* Default SPI mode is SCPOL = 0, SCPH = 0 */ +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index fed699f..2185a71 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -57,6 +57,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { + {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ ++ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ + {} /* Terminating entry */ + }; +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 9232c773..e6463ef 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -2230,7 +2230,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) + * and let it call back once the write buffers are ready. + */ + target_add_to_state_list(cmd); +- if (cmd->data_direction != DMA_TO_DEVICE) { ++ if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { + target_execute_cmd(cmd); + return 0; + } +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index c854593..b195fdb 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Creative SB Audigy 2 NX */ + { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, + ++ /* Microsoft Wireless Laser Mouse 6000 Receiver */ ++ { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, ++ + /* Microsoft LifeCam-VX700 v2.0 */ + { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, + +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 75cb1ff..73c43e5 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -281,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) + if (xhci_compliance_mode_recovery_timer_quirk_check()) + pdev->no_d3cold = true; + +- return xhci_suspend(xhci); ++ return xhci_suspend(xhci, do_wakeup); + } + + static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 8abda5c..1d5ba3c 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -205,7 +205,15 @@ static int xhci_plat_suspend(struct device *dev) + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + +- return xhci_suspend(xhci); ++ /* ++ * xhci_suspend() needs `do_wakeup` to know whether host is allowed ++ * to do wakeup during suspend. Since xhci_plat_suspend is currently ++ * only designed for system suspend, device_may_wakeup() is enough ++ * to dertermine whether host is allowed to do wakeup. Need to ++ * reconsider this when xhci_plat_suspend enlarges its scope, e.g., ++ * also applies to runtime suspend. ++ */ ++ return xhci_suspend(xhci, device_may_wakeup(dev)); + } + + static int xhci_plat_resume(struct device *dev) +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 0e6665a..1710a86 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1180,9 +1180,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, + false); + xhci_ring_cmd_db(xhci); + } else { +- /* Clear our internal halted state and restart the ring(s) */ ++ /* Clear our internal halted state */ + xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; +- ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } + } + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 82b563f..17e3987 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -35,6 +35,8 @@ + #define DRIVER_AUTHOR "Sarah Sharp" + #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" + ++#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) ++ + /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ + static int link_quirk; + module_param(link_quirk, int, S_IRUGO | S_IWUSR); +@@ -842,13 +844,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) + xhci_set_cmd_ring_deq(xhci); + } + ++static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) ++{ ++ int port_index; ++ __le32 __iomem **port_array; ++ unsigned long flags; ++ u32 t1, t2; ++ ++ spin_lock_irqsave(&xhci->lock, flags); ++ ++ /* disble usb3 ports Wake bits*/ ++ port_index = xhci->num_usb3_ports; ++ port_array = xhci->usb3_ports; ++ while (port_index--) { ++ t1 = readl(port_array[port_index]); ++ t1 = xhci_port_state_to_neutral(t1); ++ t2 = t1 & ~PORT_WAKE_BITS; ++ if (t1 != t2) ++ writel(t2, port_array[port_index]); ++ } ++ ++ /* disble usb2 ports Wake bits*/ ++ port_index = xhci->num_usb2_ports; ++ port_array = xhci->usb2_ports; ++ while (port_index--) { ++ t1 = readl(port_array[port_index]); ++ t1 = xhci_port_state_to_neutral(t1); ++ t2 = t1 & ~PORT_WAKE_BITS; ++ if (t1 != t2) ++ writel(t2, port_array[port_index]); ++ } ++ ++ spin_unlock_irqrestore(&xhci->lock, flags); ++} ++ + /* + * Stop HC (not bus-specific) + * + * This is called when the machine transition into S3/S4 mode. + * + */ +-int xhci_suspend(struct xhci_hcd *xhci) ++int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) + { + int rc = 0; + unsigned int delay = XHCI_MAX_HALT_USEC; +@@ -859,6 +895,10 @@ int xhci_suspend(struct xhci_hcd *xhci) + xhci->shared_hcd->state != HC_STATE_SUSPENDED) + return -EINVAL; + ++ /* Clear root port wake on bits if wakeup not allowed. */ ++ if (!do_wakeup) ++ xhci_disable_port_wake_on_bits(xhci); ++ + /* Don't poll the roothubs on bus suspend. */ + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 8faef64..96e9e78 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1760,7 +1760,7 @@ void xhci_shutdown(struct usb_hcd *hcd); + int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); + + #ifdef CONFIG_PM +-int xhci_suspend(struct xhci_hcd *xhci); ++int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); + int xhci_resume(struct xhci_hcd *xhci, bool hibernated); + #else + #define xhci_suspend NULL +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 3beae72..5741e94 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ ++ { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index a523ada..debcdef 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -483,6 +483,39 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) }, ++ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 6786b70..e52409c9 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -926,8 +926,8 @@ + #define BAYER_CONTOUR_CABLE_PID 0x6001 + + /* +- * The following are the values for the Matrix Orbital FTDI Range +- * Anything in this range will use an FT232RL. ++ * Matrix Orbital Intelligent USB displays. ++ * http://www.matrixorbital.com + */ + #define MTXORB_VID 0x1B3D + #define MTXORB_FTDI_RANGE_0100_PID 0x0100 +@@ -1186,8 +1186,39 @@ + #define MTXORB_FTDI_RANGE_01FD_PID 0x01FD + #define MTXORB_FTDI_RANGE_01FE_PID 0x01FE + #define MTXORB_FTDI_RANGE_01FF_PID 0x01FF +- +- ++#define MTXORB_FTDI_RANGE_4701_PID 0x4701 ++#define MTXORB_FTDI_RANGE_9300_PID 0x9300 ++#define MTXORB_FTDI_RANGE_9301_PID 0x9301 ++#define MTXORB_FTDI_RANGE_9302_PID 0x9302 ++#define MTXORB_FTDI_RANGE_9303_PID 0x9303 ++#define MTXORB_FTDI_RANGE_9304_PID 0x9304 ++#define MTXORB_FTDI_RANGE_9305_PID 0x9305 ++#define MTXORB_FTDI_RANGE_9306_PID 0x9306 ++#define MTXORB_FTDI_RANGE_9307_PID 0x9307 ++#define MTXORB_FTDI_RANGE_9308_PID 0x9308 ++#define MTXORB_FTDI_RANGE_9309_PID 0x9309 ++#define MTXORB_FTDI_RANGE_930A_PID 0x930A ++#define MTXORB_FTDI_RANGE_930B_PID 0x930B ++#define MTXORB_FTDI_RANGE_930C_PID 0x930C ++#define MTXORB_FTDI_RANGE_930D_PID 0x930D ++#define MTXORB_FTDI_RANGE_930E_PID 0x930E ++#define MTXORB_FTDI_RANGE_930F_PID 0x930F ++#define MTXORB_FTDI_RANGE_9310_PID 0x9310 ++#define MTXORB_FTDI_RANGE_9311_PID 0x9311 ++#define MTXORB_FTDI_RANGE_9312_PID 0x9312 ++#define MTXORB_FTDI_RANGE_9313_PID 0x9313 ++#define MTXORB_FTDI_RANGE_9314_PID 0x9314 ++#define MTXORB_FTDI_RANGE_9315_PID 0x9315 ++#define MTXORB_FTDI_RANGE_9316_PID 0x9316 ++#define MTXORB_FTDI_RANGE_9317_PID 0x9317 ++#define MTXORB_FTDI_RANGE_9318_PID 0x9318 ++#define MTXORB_FTDI_RANGE_9319_PID 0x9319 ++#define MTXORB_FTDI_RANGE_931A_PID 0x931A ++#define MTXORB_FTDI_RANGE_931B_PID 0x931B ++#define MTXORB_FTDI_RANGE_931C_PID 0x931C ++#define MTXORB_FTDI_RANGE_931D_PID 0x931D ++#define MTXORB_FTDI_RANGE_931E_PID 0x931E ++#define MTXORB_FTDI_RANGE_931F_PID 0x931F + + /* + * The Mobility Lab (TML) +diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c +index 265c677..49101fe 100644 +--- a/drivers/usb/serial/keyspan.c ++++ b/drivers/usb/serial/keyspan.c +@@ -311,24 +311,30 @@ static void usa26_indat_callback(struct urb *urb) + if ((data[0] & 0x80) == 0) { + /* no errors on individual bytes, only + possible overrun err */ +- if (data[0] & RXERROR_OVERRUN) +- err = TTY_OVERRUN; +- else +- err = 0; ++ if (data[0] & RXERROR_OVERRUN) { ++ tty_insert_flip_char(&port->port, 0, ++ TTY_OVERRUN); ++ } + for (i = 1; i < urb->actual_length ; ++i) +- tty_insert_flip_char(&port->port, data[i], err); ++ tty_insert_flip_char(&port->port, data[i], ++ TTY_NORMAL); + } else { + /* some bytes had errors, every byte has status */ + dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); + for (i = 0; i + 1 < urb->actual_length; i += 2) { +- int stat = data[i], flag = 0; +- if (stat & RXERROR_OVERRUN) +- flag |= TTY_OVERRUN; +- if (stat & RXERROR_FRAMING) +- flag |= TTY_FRAME; +- if (stat & RXERROR_PARITY) +- flag |= TTY_PARITY; ++ int stat = data[i]; ++ int flag = TTY_NORMAL; ++ ++ if (stat & RXERROR_OVERRUN) { ++ tty_insert_flip_char(&port->port, 0, ++ TTY_OVERRUN); ++ } + /* XXX should handle break (0x10) */ ++ if (stat & RXERROR_PARITY) ++ flag = TTY_PARITY; ++ else if (stat & RXERROR_FRAMING) ++ flag = TTY_FRAME; ++ + tty_insert_flip_char(&port->port, data[i+1], + flag); + } +@@ -666,14 +672,19 @@ static void usa49_indat_callback(struct urb *urb) + } else { + /* some bytes had errors, every byte has status */ + for (i = 0; i + 1 < urb->actual_length; i += 2) { +- int stat = data[i], flag = 0; +- if (stat & RXERROR_OVERRUN) +- flag |= TTY_OVERRUN; +- if (stat & RXERROR_FRAMING) +- flag |= TTY_FRAME; +- if (stat & RXERROR_PARITY) +- flag |= TTY_PARITY; ++ int stat = data[i]; ++ int flag = TTY_NORMAL; ++ ++ if (stat & RXERROR_OVERRUN) { ++ tty_insert_flip_char(&port->port, 0, ++ TTY_OVERRUN); ++ } + /* XXX should handle break (0x10) */ ++ if (stat & RXERROR_PARITY) ++ flag = TTY_PARITY; ++ else if (stat & RXERROR_FRAMING) ++ flag = TTY_FRAME; ++ + tty_insert_flip_char(&port->port, data[i+1], + flag); + } +@@ -730,15 +741,19 @@ static void usa49wg_indat_callback(struct urb *urb) + */ + for (x = 0; x + 1 < len && + i + 1 < urb->actual_length; x += 2) { +- int stat = data[i], flag = 0; ++ int stat = data[i]; ++ int flag = TTY_NORMAL; + +- if (stat & RXERROR_OVERRUN) +- flag |= TTY_OVERRUN; +- if (stat & RXERROR_FRAMING) +- flag |= TTY_FRAME; +- if (stat & RXERROR_PARITY) +- flag |= TTY_PARITY; ++ if (stat & RXERROR_OVERRUN) { ++ tty_insert_flip_char(&port->port, 0, ++ TTY_OVERRUN); ++ } + /* XXX should handle break (0x10) */ ++ if (stat & RXERROR_PARITY) ++ flag = TTY_PARITY; ++ else if (stat & RXERROR_FRAMING) ++ flag = TTY_FRAME; ++ + tty_insert_flip_char(&port->port, data[i+1], + flag); + i += 2; +@@ -790,25 +805,31 @@ static void usa90_indat_callback(struct urb *urb) + if ((data[0] & 0x80) == 0) { + /* no errors on individual bytes, only + possible overrun err*/ +- if (data[0] & RXERROR_OVERRUN) +- err = TTY_OVERRUN; +- else +- err = 0; ++ if (data[0] & RXERROR_OVERRUN) { ++ tty_insert_flip_char(&port->port, 0, ++ TTY_OVERRUN); ++ } + for (i = 1; i < urb->actual_length ; ++i) + tty_insert_flip_char(&port->port, +- data[i], err); ++ data[i], TTY_NORMAL); + } else { + /* some bytes had errors, every byte has status */ + dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); + for (i = 0; i + 1 < urb->actual_length; i += 2) { +- int stat = data[i], flag = 0; +- if (stat & RXERROR_OVERRUN) +- flag |= TTY_OVERRUN; +- if (stat & RXERROR_FRAMING) +- flag |= TTY_FRAME; +- if (stat & RXERROR_PARITY) +- flag |= TTY_PARITY; ++ int stat = data[i]; ++ int flag = TTY_NORMAL; ++ ++ if (stat & RXERROR_OVERRUN) { ++ tty_insert_flip_char( ++ &port->port, 0, ++ TTY_OVERRUN); ++ } + /* XXX should handle break (0x10) */ ++ if (stat & RXERROR_PARITY) ++ flag = TTY_PARITY; ++ else if (stat & RXERROR_FRAMING) ++ flag = TTY_FRAME; ++ + tty_insert_flip_char(&port->port, + data[i+1], flag); + } +diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c +index a7fe664b..70a098d 100644 +--- a/drivers/usb/serial/ssu100.c ++++ b/drivers/usb/serial/ssu100.c +@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr, + if (*tty_flag == TTY_NORMAL) + *tty_flag = TTY_FRAME; + } +- if (lsr & UART_LSR_OE){ ++ if (lsr & UART_LSR_OE) { + port->icount.overrun++; +- if (*tty_flag == TTY_NORMAL) +- *tty_flag = TTY_OVERRUN; ++ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); + } + } + +@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb) + if ((len >= 4) && + (packet[0] == 0x1b) && (packet[1] == 0x1b) && + ((packet[2] == 0x00) || (packet[2] == 0x01))) { +- if (packet[2] == 0x00) { ++ if (packet[2] == 0x00) + ssu100_update_lsr(port, packet[3], &flag); +- if (flag == TTY_OVERRUN) +- tty_insert_flip_char(&port->port, 0, +- TTY_OVERRUN); +- } + if (packet[2] == 0x01) + ssu100_update_msr(port, packet[3]); + +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index e48d4a6..5d0b7b8 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -1200,6 +1200,7 @@ static int + vhost_scsi_set_endpoint(struct vhost_scsi *vs, + struct vhost_scsi_target *t) + { ++ struct se_portal_group *se_tpg; + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tpg; + struct tcm_vhost_tpg **vs_tpg; +@@ -1247,6 +1248,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, + ret = -EEXIST; + goto out; + } ++ /* ++ * In order to ensure individual vhost-scsi configfs ++ * groups cannot be removed while in use by vhost ioctl, ++ * go ahead and take an explicit se_tpg->tpg_group.cg_item ++ * dependency now. ++ */ ++ se_tpg = &tpg->se_tpg; ++ ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, ++ &se_tpg->tpg_group.cg_item); ++ if (ret) { ++ pr_warn("configfs_depend_item() failed: %d\n", ret); ++ kfree(vs_tpg); ++ mutex_unlock(&tpg->tv_tpg_mutex); ++ goto out; ++ } + tpg->tv_tpg_vhost_count++; + tpg->vhost_scsi = vs; + vs_tpg[tpg->tport_tpgt] = tpg; +@@ -1289,6 +1305,7 @@ static int + vhost_scsi_clear_endpoint(struct vhost_scsi *vs, + struct vhost_scsi_target *t) + { ++ struct se_portal_group *se_tpg; + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tpg; + struct vhost_virtqueue *vq; +@@ -1337,6 +1354,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, + vs->vs_tpg[target] = NULL; + match = true; + mutex_unlock(&tpg->tv_tpg_mutex); ++ /* ++ * Release se_tpg->tpg_group.cg_item configfs dependency now ++ * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. ++ */ ++ se_tpg = &tpg->se_tpg; ++ configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, ++ &se_tpg->tpg_group.cg_item); + } + if (match) { + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { +diff --git a/fs/aio.c b/fs/aio.c +index f45ddaa..2f7e8c2 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt; + static const struct file_operations aio_ring_fops; + static const struct address_space_operations aio_ctx_aops; + ++/* Backing dev info for aio fs. ++ * -no dirty page accounting or writeback happens ++ */ ++static struct backing_dev_info aio_fs_backing_dev_info = { ++ .name = "aiofs", ++ .state = 0, ++ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY, ++}; ++ + static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) + { + struct qstr this = QSTR_INIT("[aio]", 5); +@@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) + + inode->i_mapping->a_ops = &aio_ctx_aops; + inode->i_mapping->private_data = ctx; ++ inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info; + inode->i_size = PAGE_SIZE * nr_pages; + + path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); +@@ -221,6 +231,9 @@ static int __init aio_setup(void) + if (IS_ERR(aio_mnt)) + panic("Failed to create aio fs mount."); + ++ if (bdi_init(&aio_fs_backing_dev_info)) ++ panic("Failed to init aio fs backing dev info."); ++ + kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); + kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); + +@@ -282,11 +295,6 @@ static const struct file_operations aio_ring_fops = { + .mmap = aio_ring_mmap, + }; + +-static int aio_set_page_dirty(struct page *page) +-{ +- return 0; +-} +- + #if IS_ENABLED(CONFIG_MIGRATION) + static int aio_migratepage(struct address_space *mapping, struct page *new, + struct page *old, enum migrate_mode mode) +@@ -358,7 +366,7 @@ out: + #endif + + static const struct address_space_operations aio_ctx_aops = { +- .set_page_dirty = aio_set_page_dirty, ++ .set_page_dirty = __set_page_dirty_no_writeback, + #if IS_ENABLED(CONFIG_MIGRATION) + .migratepage = aio_migratepage, + #endif +@@ -413,7 +421,6 @@ static int aio_setup_ring(struct kioctx *ctx) + pr_debug("pid(%d) page[%d]->count=%d\n", + current->pid, i, page_count(page)); + SetPageUptodate(page); +- SetPageDirty(page); + unlock_page(page); + + ctx->ring_pages[i] = page; +diff --git a/fs/locks.c b/fs/locks.c +index 4dd39b9..2c61c4e 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -2235,16 +2235,28 @@ void locks_remove_flock(struct file *filp) + + while ((fl = *before) != NULL) { + if (fl->fl_file == filp) { +- if (IS_FLOCK(fl)) { +- locks_delete_lock(before); +- continue; +- } + if (IS_LEASE(fl)) { + lease_modify(before, F_UNLCK); + continue; + } +- /* What? */ +- BUG(); ++ ++ /* ++ * There's a leftover lock on the list of a type that ++ * we didn't expect to see. Most likely a classic ++ * POSIX lock that ended up not getting released ++ * properly, or that raced onto the list somehow. Log ++ * some info about it and then just remove it from ++ * the list. ++ */ ++ WARN(!IS_FLOCK(fl), ++ "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n", ++ MAJOR(inode->i_sb->s_dev), ++ MINOR(inode->i_sb->s_dev), inode->i_ino, ++ fl->fl_type, fl->fl_flags, ++ fl->fl_start, fl->fl_end); ++ ++ locks_delete_lock(before); ++ continue; + } + before = &fl->fl_next; + } +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 2ffebf2..27d7f27 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -113,7 +113,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c) + if (atomic_read(&c->io_count) == 0) + break; + ret = nfs_wait_bit_killable(&c->flags); +- } while (atomic_read(&c->io_count) != 0); ++ } while (atomic_read(&c->io_count) != 0 && !ret); + finish_wait(wq, &q.wait); + return ret; + } +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c +index cc8c5b3..f42bbe5 100644 +--- a/fs/nfsd/nfs4callback.c ++++ b/fs/nfsd/nfs4callback.c +@@ -784,8 +784,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) + { + if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { + rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); +- dprintk("%s slot is busy\n", __func__); +- return false; ++ /* Race breaker */ ++ if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { ++ dprintk("%s slot is busy\n", __func__); ++ return false; ++ } ++ rpc_wake_up_queued_task(&clp->cl_cb_waitq, task); + } + return true; + } +diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c +index f8f060f..6040da8 100644 +--- a/fs/nfsd/nfscache.c ++++ b/fs/nfsd/nfscache.c +@@ -224,13 +224,6 @@ hash_refile(struct svc_cacherep *rp) + hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits)); + } + +-static inline bool +-nfsd_cache_entry_expired(struct svc_cacherep *rp) +-{ +- return rp->c_state != RC_INPROG && +- time_after(jiffies, rp->c_timestamp + RC_EXPIRE); +-} +- + /* + * Walk the LRU list and prune off entries that are older than RC_EXPIRE. + * Also prune the oldest ones when the total exceeds the max number of entries. +@@ -242,8 +235,14 @@ prune_cache_entries(void) + long freed = 0; + + list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { +- if (!nfsd_cache_entry_expired(rp) && +- num_drc_entries <= max_drc_entries) ++ /* ++ * Don't free entries attached to calls that are still ++ * in-progress, but do keep scanning the list. ++ */ ++ if (rp->c_state == RC_INPROG) ++ continue; ++ if (num_drc_entries <= max_drc_entries && ++ time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) + break; + nfsd_reply_cache_free_locked(rp); + freed++; +diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h +index 479eb68..f417fef 100644 +--- a/fs/nfsd/nfsd.h ++++ b/fs/nfsd/nfsd.h +@@ -328,12 +328,15 @@ void nfsd_lockd_shutdown(void); + (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) + + #ifdef CONFIG_NFSD_V4_SECURITY_LABEL +-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ +- (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL) ++#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL + #else +-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0 ++#define NFSD4_2_SECURITY_ATTRS 0 + #endif + ++#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ ++ (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \ ++ NFSD4_2_SECURITY_ATTRS) ++ + static inline u32 nfsd_suppattrs0(u32 minorversion) + { + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 +diff --git a/include/linux/bitops.h b/include/linux/bitops.h +index be5fd38..5d858e0 100644 +--- a/include/linux/bitops.h ++++ b/include/linux/bitops.h +@@ -18,8 +18,11 @@ + * position @h. For example + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ +-#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) +-#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) ++#define GENMASK(h, l) \ ++ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) ++ ++#define GENMASK_ULL(h, l) \ ++ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + + extern unsigned int __sw_hweight8(unsigned int w); + extern unsigned int __sw_hweight16(unsigned int w); +diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h +index 8bbd7bc..03fa332 100644 +--- a/include/linux/iio/events.h ++++ b/include/linux/iio/events.h +@@ -72,7 +72,7 @@ struct iio_event_data { + + #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) + +-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) ++#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F) + + #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) + +diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h +index 0068708..0a21fbe 100644 +--- a/include/linux/inetdevice.h ++++ b/include/linux/inetdevice.h +@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev) + static __inline__ __be32 inet_make_mask(int logmask) + { + if (logmask) +- return htonl(~((1<<(32-logmask))-1)); ++ return htonl(~((1U<<(32-logmask))-1)); + return 0; + } + +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 33aa2ca..0e5e16c 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -324,6 +324,7 @@ struct pci_dev { + unsigned int is_added:1; + unsigned int is_busmaster:1; /* device is busmaster */ + unsigned int no_msi:1; /* device may not use msi */ ++ unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ + unsigned int block_cfg_access:1; /* config space access is blocked */ + unsigned int broken_parity_status:1; /* Device generates false positive parity */ + unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ +diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h +index 2883a7a..98f2ade 100644 +--- a/include/sound/soc-dpcm.h ++++ b/include/sound/soc-dpcm.h +@@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime { + /* state and update */ + enum snd_soc_dpcm_update runtime_update; + enum snd_soc_dpcm_state state; ++ ++ int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */ + }; + + /* can this BE stop and free */ +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c +index 307d87c..1139b22 100644 +--- a/kernel/events/uprobes.c ++++ b/kernel/events/uprobes.c +@@ -1621,7 +1621,6 @@ bool uprobe_deny_signal(void) + if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { + utask->state = UTASK_SSTEP_TRAPPED; + set_tsk_thread_flag(t, TIF_UPROBE); +- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); + } + } + +diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c +index b851cc5..fbda6b5 100644 +--- a/net/batman-adv/hard-interface.c ++++ b/net/batman-adv/hard-interface.c +@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) + return true; + + /* no more parents..stop recursion */ +- if (net_dev->iflink == net_dev->ifindex) ++ if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex) + return false; + + /* recurse over the parent device */ +diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c +index f2e1573..8f7bd56 100644 +--- a/net/ipv4/fib_rules.c ++++ b/net/ipv4/fib_rules.c +@@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) + else + res->tclassid = 0; + #endif ++ ++ if (err == -ESRCH) ++ err = -ENETUNREACH; ++ + return err; + } + EXPORT_SYMBOL_GPL(__fib_lookup); +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index e21934b..0d33f94 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) + &ipv6_hdr(skb)->daddr)) + continue; + #endif ++ } else { ++ continue; + } + + if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) +diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c +index 00b2a6d..d65aea2 100644 +--- a/net/ipx/af_ipx.c ++++ b/net/ipx/af_ipx.c +@@ -1763,6 +1763,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, + struct ipxhdr *ipx = NULL; + struct sk_buff *skb; + int copied, rc; ++ bool locked = true; + + lock_sock(sk); + /* put the autobinding in */ +@@ -1789,6 +1790,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, + if (sock_flag(sk, SOCK_ZAPPED)) + goto out; + ++ release_sock(sk); ++ locked = false; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, + flags & MSG_DONTWAIT, &rc); + if (!skb) +@@ -1822,7 +1825,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, + out_free: + skb_free_datagram(sk, skb); + out: +- release_sock(sk); ++ if (locked) ++ release_sock(sk); + return rc; + } + +diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c +index 0fcbe90..12528e9 100644 +--- a/sound/soc/codecs/sgtl5000.c ++++ b/sound/soc/codecs/sgtl5000.c +@@ -1369,8 +1369,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) + + /* enable small pop, introduce 400ms delay in turning off */ + snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL, +- SGTL5000_SMALL_POP, +- SGTL5000_SMALL_POP); ++ SGTL5000_SMALL_POP, 1); + + /* disable short cut detector */ + snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0); +diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h +index 2f8c889..bd7a344 100644 +--- a/sound/soc/codecs/sgtl5000.h ++++ b/sound/soc/codecs/sgtl5000.h +@@ -275,7 +275,7 @@ + #define SGTL5000_BIAS_CTRL_MASK 0x000e + #define SGTL5000_BIAS_CTRL_SHIFT 1 + #define SGTL5000_BIAS_CTRL_WIDTH 3 +-#define SGTL5000_SMALL_POP 0x0001 ++#define SGTL5000_SMALL_POP 0 + + /* + * SGTL5000_CHIP_MIC_CTRL +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c +index 53c03af..0502e3f 100644 +--- a/sound/soc/codecs/wm_adsp.c ++++ b/sound/soc/codecs/wm_adsp.c +@@ -1341,6 +1341,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) + file, blocks, pos - firmware->size); + + out_fw: ++ regmap_async_complete(regmap); + release_firmware(firmware); + wm_adsp_buf_free(&buf_list); + out: +diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c +index 1967f44..9d0c59c 100644 +--- a/sound/soc/sh/fsi.c ++++ b/sound/soc/sh/fsi.c +@@ -1785,8 +1785,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = { + static struct snd_pcm_hardware fsi_pcm_hardware = { + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_MMAP | +- SNDRV_PCM_INFO_MMAP_VALID | +- SNDRV_PCM_INFO_PAUSE, ++ SNDRV_PCM_INFO_MMAP_VALID, + .buffer_bytes_max = 64 * 1024, + .period_bytes_min = 32, + .period_bytes_max = 8192, +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c +index 743de5e..37fcd93 100644 +--- a/sound/soc/sh/rcar/core.c ++++ b/sound/soc/sh/rcar/core.c +@@ -626,8 +626,7 @@ static void rsnd_dai_remove(struct platform_device *pdev, + static struct snd_pcm_hardware rsnd_pcm_hardware = { + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_MMAP | +- SNDRV_PCM_INFO_MMAP_VALID | +- SNDRV_PCM_INFO_PAUSE, ++ SNDRV_PCM_INFO_MMAP_VALID, + .buffer_bytes_max = 64 * 1024, + .period_bytes_min = 32, + .period_bytes_max = 8192, +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 02733de..e28704e 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -1258,13 +1258,36 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream) + dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture); + } + ++static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd); ++ ++/* Set FE's runtime_update state; the state is protected via PCM stream lock ++ * for avoiding the race with trigger callback. ++ * If the state is unset and a trigger is pending while the previous operation, ++ * process the pending trigger action here. ++ */ ++static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe, ++ int stream, enum snd_soc_dpcm_update state) ++{ ++ struct snd_pcm_substream *substream = ++ snd_soc_dpcm_get_substream(fe, stream); ++ ++ snd_pcm_stream_lock_irq(substream); ++ if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) { ++ dpcm_fe_dai_do_trigger(substream, ++ fe->dpcm[stream].trigger_pending - 1); ++ fe->dpcm[stream].trigger_pending = 0; ++ } ++ fe->dpcm[stream].runtime_update = state; ++ snd_pcm_stream_unlock_irq(substream); ++} ++ + static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) + { + struct snd_soc_pcm_runtime *fe = fe_substream->private_data; + struct snd_pcm_runtime *runtime = fe_substream->runtime; + int stream = fe_substream->stream, ret = 0; + +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); + + ret = dpcm_be_dai_startup(fe, fe_substream->stream); + if (ret < 0) { +@@ -1286,13 +1309,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) + dpcm_set_fe_runtime(fe_substream); + snd_pcm_limit_hw_rates(runtime); + +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); + return 0; + + unwind: + dpcm_be_dai_startup_unwind(fe, fe_substream->stream); + be_err: +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); + return ret; + } + +@@ -1339,7 +1362,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) + struct snd_soc_pcm_runtime *fe = substream->private_data; + int stream = substream->stream; + +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); + + /* shutdown the BEs */ + dpcm_be_dai_shutdown(fe, substream->stream); +@@ -1353,7 +1376,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) + dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP); + + fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); + return 0; + } + +@@ -1401,7 +1424,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) + int err, stream = substream->stream; + + mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); + + dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name); + +@@ -1416,7 +1439,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) + err = dpcm_be_dai_hw_free(fe, stream); + + fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); + + mutex_unlock(&fe->card->mutex); + return 0; +@@ -1509,7 +1532,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, + int ret, stream = substream->stream; + + mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); + + memcpy(&fe->dpcm[substream->stream].hw_params, params, + sizeof(struct snd_pcm_hw_params)); +@@ -1532,7 +1555,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, + fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS; + + out: +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); + mutex_unlock(&fe->card->mutex); + return ret; + } +@@ -1646,7 +1669,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, + } + EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); + +-static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) ++static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) + { + struct snd_soc_pcm_runtime *fe = substream->private_data; + int stream = substream->stream, ret; +@@ -1720,6 +1743,23 @@ out: + return ret; + } + ++static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) ++{ ++ struct snd_soc_pcm_runtime *fe = substream->private_data; ++ int stream = substream->stream; ++ ++ /* if FE's runtime_update is already set, we're in race; ++ * process this trigger later at exit ++ */ ++ if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) { ++ fe->dpcm[stream].trigger_pending = cmd + 1; ++ return 0; /* delayed, assuming it's successful */ ++ } ++ ++ /* we're alone, let's trigger */ ++ return dpcm_fe_dai_do_trigger(substream, cmd); ++} ++ + int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) + { + struct snd_soc_dpcm *dpcm; +@@ -1763,7 +1803,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) + + dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name); + +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); + + /* there is no point preparing this FE if there are no BEs */ + if (list_empty(&fe->dpcm[stream].be_clients)) { +@@ -1790,7 +1830,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) + fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; + + out: +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); + mutex_unlock(&fe->card->mutex); + + return ret; +@@ -1937,11 +1977,11 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream) + { + int ret; + +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); + ret = dpcm_run_update_startup(fe, stream); + if (ret < 0) + dev_err(fe->dev, "ASoC: failed to startup some BEs\n"); +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); + + return ret; + } +@@ -1950,11 +1990,11 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream) + { + int ret; + +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); + ret = dpcm_run_update_shutdown(fe, stream); + if (ret < 0) + dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n"); +- fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; ++ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); + + return ret; + } +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index c64a3d9..827d404 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1142,6 +1142,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, + if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + mdelay(20); ++ ++ /* Marantz/Denon devices with USB DAC functionality need a delay ++ * after each class compliant request ++ */ ++ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) && ++ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) { ++ ++ switch (le16_to_cpu(dev->descriptor.idProduct)) { ++ case 0x3005: /* Marantz HD-DAC1 */ ++ case 0x3006: /* Marantz SA-14S1 */ ++ mdelay(20); ++ break; ++ } ++ } + } + + /* diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.26-27.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.26-27.patch new file mode 100644 index 0000000000..e788ebdba2 --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.26-27.patch @@ -0,0 +1,914 @@ +diff --git a/Makefile b/Makefile +index 63a5ee8..944db23 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 26 ++SUBLEVEL = 27 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S +index 47afd08..fe7e97a 100644 +--- a/arch/powerpc/kernel/vdso32/getcpu.S ++++ b/arch/powerpc/kernel/vdso32/getcpu.S +@@ -30,8 +30,8 @@ + V_FUNCTION_BEGIN(__kernel_getcpu) + .cfi_startproc + mfspr r5,SPRN_USPRG3 +- cmpdi cr0,r3,0 +- cmpdi cr1,r4,0 ++ cmpwi cr0,r3,0 ++ cmpwi cr1,r4,0 + clrlwi r6,r5,16 + rlwinm r7,r5,16,31-15,31-0 + beq cr0,1f +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 14fe7cb..b5bb498 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -75,7 +75,7 @@ suffix-$(CONFIG_KERNEL_XZ) := xz + suffix-$(CONFIG_KERNEL_LZO) := lzo + suffix-$(CONFIG_KERNEL_LZ4) := lz4 + +-RUN_SIZE = $(shell objdump -h vmlinux | \ ++RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ + perl $(srctree)/arch/x86/tools/calc_run_size.pl) + quiet_cmd_mkpiggy = MKPIGGY $@ + cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c +index 79f9f84..fb345c4 100644 +--- a/arch/x86/kernel/cpu/perf_event.c ++++ b/arch/x86/kernel/cpu/perf_event.c +@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) + continue; + if (event->attr.config1 & ~er->valid_mask) + return -EINVAL; ++ /* Check if the extra msrs can be safely accessed*/ ++ if (!er->extra_msr_access) ++ return -ENXIO; + + reg->idx = er->idx; + reg->config = event->attr.config1; +diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h +index 4972c24..7876c34 100644 +--- a/arch/x86/kernel/cpu/perf_event.h ++++ b/arch/x86/kernel/cpu/perf_event.h +@@ -293,14 +293,16 @@ struct extra_reg { + u64 config_mask; + u64 valid_mask; + int idx; /* per_xxx->regs[] reg index */ ++ bool extra_msr_access; + }; + + #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ +- .event = (e), \ +- .msr = (ms), \ +- .config_mask = (m), \ +- .valid_mask = (vm), \ +- .idx = EXTRA_REG_##i, \ ++ .event = (e), \ ++ .msr = (ms), \ ++ .config_mask = (m), \ ++ .valid_mask = (vm), \ ++ .idx = EXTRA_REG_##i, \ ++ .extra_msr_access = true, \ + } + + #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c +index 5ee8064..d4c0a0e 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel.c ++++ b/arch/x86/kernel/cpu/perf_event_intel.c +@@ -2183,6 +2183,41 @@ static void intel_snb_check_microcode(void) + } + } + ++/* ++ * Under certain circumstances, access certain MSR may cause #GP. ++ * The function tests if the input MSR can be safely accessed. ++ */ ++static bool check_msr(unsigned long msr, u64 mask) ++{ ++ u64 val_old, val_new, val_tmp; ++ ++ /* ++ * Read the current value, change it and read it back to see if it ++ * matches, this is needed to detect certain hardware emulators ++ * (qemu/kvm) that don't trap on the MSR access and always return 0s. ++ */ ++ if (rdmsrl_safe(msr, &val_old)) ++ return false; ++ ++ /* ++ * Only change the bits which can be updated by wrmsrl. ++ */ ++ val_tmp = val_old ^ mask; ++ if (wrmsrl_safe(msr, val_tmp) || ++ rdmsrl_safe(msr, &val_new)) ++ return false; ++ ++ if (val_new != val_tmp) ++ return false; ++ ++ /* Here it's sure that the MSR can be safely accessed. ++ * Restore the old value and return. ++ */ ++ wrmsrl(msr, val_old); ++ ++ return true; ++} ++ + static __init void intel_sandybridge_quirk(void) + { + x86_pmu.check_microcode = intel_snb_check_microcode; +@@ -2272,7 +2307,8 @@ __init int intel_pmu_init(void) + union cpuid10_ebx ebx; + struct event_constraint *c; + unsigned int unused; +- int version; ++ struct extra_reg *er; ++ int version, i; + + if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { + switch (boot_cpu_data.x86) { +@@ -2578,6 +2614,34 @@ __init int intel_pmu_init(void) + } + } + ++ /* ++ * Access LBR MSR may cause #GP under certain circumstances. ++ * E.g. KVM doesn't support LBR MSR ++ * Check all LBT MSR here. ++ * Disable LBR access if any LBR MSRs can not be accessed. ++ */ ++ if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL)) ++ x86_pmu.lbr_nr = 0; ++ for (i = 0; i < x86_pmu.lbr_nr; i++) { ++ if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && ++ check_msr(x86_pmu.lbr_to + i, 0xffffUL))) ++ x86_pmu.lbr_nr = 0; ++ } ++ ++ /* ++ * Access extra MSR may cause #GP under certain circumstances. ++ * E.g. KVM doesn't support offcore event ++ * Check all extra_regs here. ++ */ ++ if (x86_pmu.extra_regs) { ++ for (er = x86_pmu.extra_regs; er->msr; er++) { ++ er->extra_msr_access = check_msr(er->msr, 0x1ffUL); ++ /* Disable LBR select mapping */ ++ if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) ++ x86_pmu.lbr_sel_map = NULL; ++ } ++ } ++ + /* Support full width counters using alternative MSR range */ + if (x86_pmu.intel_cap.full_width_write) { + x86_pmu.max_period = x86_pmu.cntval_mask; +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index e662f14..cc5f102 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -322,6 +322,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ ++ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ ++ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ ++ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ +@@ -493,6 +496,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { + * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731 + */ + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi }, ++ { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi }, + + /* Enmotus */ + { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, +diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c +index fb0b40a..ee2780d 100644 +--- a/drivers/ata/sata_fsl.c ++++ b/drivers/ata/sata_fsl.c +@@ -1503,7 +1503,7 @@ static int sata_fsl_probe(struct platform_device *ofdev) + host_priv->csr_base = csr_base; + + irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); +- if (irq < 0) { ++ if (!irq) { + dev_err(&ofdev->dev, "invalid irq from platform\n"); + goto error_exit_with_cleanup; + } +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index b6fb3eb..c514690 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -3817,7 +3817,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) + ironlake_fdi_disable(crtc); + + ironlake_disable_pch_transcoder(dev_priv, pipe); +- intel_set_pch_fifo_underrun_reporting(dev, pipe, true); + + if (HAS_PCH_CPT(dev)) { + /* disable TRANS_DP_CTL */ +@@ -3883,7 +3882,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) + + if (intel_crtc->config.has_pch_encoder) { + lpt_disable_pch_transcoder(dev_priv); +- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); + intel_ddi_fdi_disable(crtc); + } + +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c +index 67c9ff3..af49b24 100644 +--- a/drivers/gpu/drm/i915/intel_lvds.c ++++ b/drivers/gpu/drm/i915/intel_lvds.c +@@ -905,6 +905,17 @@ void intel_lvds_init(struct drm_device *dev) + int pipe; + u8 pin; + ++ /* ++ * Unlock registers and just leave them unlocked. Do this before ++ * checking quirk lists to avoid bogus WARNINGs. ++ */ ++ if (HAS_PCH_SPLIT(dev)) { ++ I915_WRITE(PCH_PP_CONTROL, ++ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); ++ } else { ++ I915_WRITE(PP_CONTROL, ++ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); ++ } + if (!intel_lvds_supported(dev)) + return; + +@@ -1099,17 +1110,6 @@ out: + DRM_DEBUG_KMS("detected %s-link lvds configuration\n", + lvds_encoder->is_dual_link ? "dual" : "single"); + +- /* +- * Unlock registers and just +- * leave them unlocked +- */ +- if (HAS_PCH_SPLIT(dev)) { +- I915_WRITE(PCH_PP_CONTROL, +- I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); +- } else { +- I915_WRITE(PP_CONTROL, +- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); +- } + lvds_connector->lid_notifier.notifier_call = intel_lid_notify; + if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { + DRM_DEBUG_KMS("lid notifier registration failed\n"); +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c +index 0bc9106..6bffe82 100644 +--- a/drivers/gpu/drm/radeon/radeon_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_kms.c +@@ -740,6 +740,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, + + /* Get associated drm_crtc: */ + drmcrtc = &rdev->mode_info.crtcs[crtc]->base; ++ if (!drmcrtc) ++ return -EINVAL; + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, +diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c +index af0b583..e3c6a96 100644 +--- a/drivers/i2c/busses/i2c-davinci.c ++++ b/drivers/i2c/busses/i2c-davinci.c +@@ -411,11 +411,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) + if (dev->cmd_err & DAVINCI_I2C_STR_NACK) { + if (msg->flags & I2C_M_IGNORE_NAK) + return msg->len; +- if (stop) { +- w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); +- w |= DAVINCI_I2C_MDR_STP; +- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); +- } ++ w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); ++ w |= DAVINCI_I2C_MDR_STP; ++ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); + return -EREMOTEIO; + } + return -EIO; +diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c +index 90dcc2e..a686057 100644 +--- a/drivers/i2c/busses/i2c-omap.c ++++ b/drivers/i2c/busses/i2c-omap.c +@@ -926,14 +926,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id) + if (stat & OMAP_I2C_STAT_NACK) { + err |= OMAP_I2C_STAT_NACK; + omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); +- break; + } + + if (stat & OMAP_I2C_STAT_AL) { + dev_err(dev->dev, "Arbitration lost\n"); + err |= OMAP_I2C_STAT_AL; + omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); +- break; + } + + /* +@@ -958,11 +956,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id) + if (dev->fifo_size) + num_bytes = dev->buf_len; + +- omap_i2c_receive_data(dev, num_bytes, true); +- +- if (dev->errata & I2C_OMAP_ERRATA_I207) ++ if (dev->errata & I2C_OMAP_ERRATA_I207) { + i2c_omap_errata_i207(dev, stat); ++ num_bytes = (omap_i2c_read_reg(dev, ++ OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F; ++ } + ++ omap_i2c_receive_data(dev, num_bytes, true); + omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); + continue; + } +diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c +index 8741cae..7026ab0 100644 +--- a/drivers/media/i2c/smiapp/smiapp-core.c ++++ b/drivers/media/i2c/smiapp/smiapp-core.c +@@ -2138,7 +2138,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev, + ret = smiapp_set_compose(subdev, fh, sel); + break; + default: +- BUG(); ++ ret = -EINVAL; + } + + mutex_unlock(&sensor->mutex); +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 9373f1f..086eac5 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -8548,7 +8548,8 @@ static int tg3_init_rings(struct tg3 *tp) + if (tnapi->rx_rcb) + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + +- if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { ++ if (tnapi->prodring.rx_std && ++ tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + tg3_free_rings(tp); + return -ENOMEM; + } +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index 5ca8c47..206e79d 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -1613,6 +1613,8 @@ void igb_power_up_link(struct igb_adapter *adapter) + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); ++ ++ igb_setup_link(&adapter->hw); + } + + /** +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c +index c4c00d9f..96fc7fe 100644 +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -213,7 +213,7 @@ + /* Various constants */ + + /* Coalescing */ +-#define MVNETA_TXDONE_COAL_PKTS 16 ++#define MVNETA_TXDONE_COAL_PKTS 1 + #define MVNETA_RX_COAL_PKTS 32 + #define MVNETA_RX_COAL_USEC 100 + +@@ -1612,6 +1612,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) + u16 txq_id = skb_get_queue_mapping(skb); + struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; + struct mvneta_tx_desc *tx_desc; ++ int len = skb->len; + struct netdev_queue *nq; + int frags = 0; + u32 tx_cmd; +@@ -1675,7 +1676,7 @@ out: + + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; +- stats->tx_bytes += skb->len; ++ stats->tx_bytes += len; + u64_stats_update_end(&stats->syncp); + } else { + dev->stats.tx_dropped++; +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +index 57428a0..1e8a4b4 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +@@ -1456,7 +1456,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + + switch (op) { + case RES_OP_RESERVE: +- count = get_param_l(&in_param); ++ count = get_param_l(&in_param) & 0xffffff; + align = get_param_h(&in_param); + err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); + if (err) +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 5441b49..5988910 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -2106,9 +2106,8 @@ static int vxlan_init(struct net_device *dev) + spin_lock(&vn->sock_lock); + vs = vxlan_find_sock(dev_net(dev), ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port); +- if (vs) { ++ if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) { + /* If we have a socket with same port already, reuse it */ +- atomic_inc(&vs->refcnt); + vxlan_vs_add_dev(vs, vxlan); + } else { + /* otherwise make new socket outside of RTNL */ +@@ -2574,12 +2573,9 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + + spin_lock(&vn->sock_lock); + vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); +- if (vs) { +- if (vs->rcv == rcv) +- atomic_inc(&vs->refcnt); +- else ++ if (vs && ((vs->rcv != rcv) || ++ !atomic_add_unless(&vs->refcnt, 1, 0))) + vs = ERR_PTR(-EBUSY); +- } + spin_unlock(&vn->sock_lock); + + if (!vs) +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index e30d800..19db057 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -469,9 +469,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, + len = skb_frag_size(frag); + offset = frag->page_offset; + +- /* Data must not cross a page boundary. */ +- BUG_ON(len + offset > PAGE_SIZE<> PAGE_SHIFT; + offset &= ~PAGE_MASK; +@@ -479,8 +476,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, + while (len > 0) { + unsigned long bytes; + +- BUG_ON(offset >= PAGE_SIZE); +- + bytes = PAGE_SIZE - offset; + if (bytes > len) + bytes = len; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 1710a86..faa8b98 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1998,22 +1998,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, + ep->stopped_td = td; + return 0; + } else { +- if (trb_comp_code == COMP_STALL) { +- /* The transfer is completed from the driver's +- * perspective, but we need to issue a set dequeue +- * command for this stalled endpoint to move the dequeue +- * pointer past the TD. We can't do that here because +- * the halt condition must be cleared first. Let the +- * USB class driver clear the stall later. +- */ +- ep->stopped_td = td; +- ep->stopped_stream = ep_ring->stream_id; +- } else if (xhci_requires_manual_halt_cleanup(xhci, +- ep_ctx, trb_comp_code)) { +- /* Other types of errors halt the endpoint, but the +- * class driver doesn't call usb_reset_endpoint() unless +- * the error is -EPIPE. Clear the halted status in the +- * xHCI hardware manually. ++ if (trb_comp_code == COMP_STALL || ++ xhci_requires_manual_halt_cleanup(xhci, ep_ctx, ++ trb_comp_code)) { ++ /* Issue a reset endpoint command to clear the host side ++ * halt, followed by a set dequeue command to move the ++ * dequeue pointer past the TD. ++ * The class driver clears the device side halt later. + */ + xhci_cleanup_halted_endpoint(xhci, + slot_id, ep_index, ep_ring->stream_id, +@@ -2133,9 +2124,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, + else + td->urb->actual_length = 0; + +- xhci_cleanup_halted_endpoint(xhci, +- slot_id, ep_index, 0, td, event_trb); +- return finish_td(xhci, td, event_trb, event, ep, status, true); ++ return finish_td(xhci, td, event_trb, event, ep, status, false); + } + /* + * Did we transfer any data, despite the errors that might have +@@ -2689,17 +2678,8 @@ cleanup: + if (ret) { + urb = td->urb; + urb_priv = urb->hcpriv; +- /* Leave the TD around for the reset endpoint function +- * to use(but only if it's not a control endpoint, +- * since we already queued the Set TR dequeue pointer +- * command for stalled control endpoints). +- */ +- if (usb_endpoint_xfer_control(&urb->ep->desc) || +- (trb_comp_code != COMP_STALL && +- trb_comp_code != COMP_BABBLE)) +- xhci_urb_free_priv(xhci, urb_priv); +- else +- kfree(urb_priv); ++ ++ xhci_urb_free_priv(xhci, urb_priv); + + usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); + if ((urb->actual_length != urb->transfer_buffer_length && +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 17e3987..16f4f8d 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -2925,63 +2925,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, + } + } + +-/* Deal with stalled endpoints. The core should have sent the control message +- * to clear the halt condition. However, we need to make the xHCI hardware +- * reset its sequence number, since a device will expect a sequence number of +- * zero after the halt condition is cleared. ++/* Called when clearing halted device. The core should have sent the control ++ * message to clear the device halt condition. The host side of the halt should ++ * already be cleared with a reset endpoint command issued when the STALL tx ++ * event was received. ++ * + * Context: in_interrupt + */ ++ + void xhci_endpoint_reset(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) + { + struct xhci_hcd *xhci; +- struct usb_device *udev; +- unsigned int ep_index; +- unsigned long flags; +- int ret; +- struct xhci_virt_ep *virt_ep; + + xhci = hcd_to_xhci(hcd); +- udev = (struct usb_device *) ep->hcpriv; +- /* Called with a root hub endpoint (or an endpoint that wasn't added +- * with xhci_add_endpoint() +- */ +- if (!ep->hcpriv) +- return; +- ep_index = xhci_get_endpoint_index(&ep->desc); +- virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; +- if (!virt_ep->stopped_td) { +- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, +- "Endpoint 0x%x not halted, refusing to reset.", +- ep->desc.bEndpointAddress); +- return; +- } +- if (usb_endpoint_xfer_control(&ep->desc)) { +- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, +- "Control endpoint stall already handled."); +- return; +- } + +- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, +- "Queueing reset endpoint command"); +- spin_lock_irqsave(&xhci->lock, flags); +- ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); + /* +- * Can't change the ring dequeue pointer until it's transitioned to the +- * stopped state, which is only upon a successful reset endpoint +- * command. Better hope that last command worked! ++ * We might need to implement the config ep cmd in xhci 4.8.1 note: ++ * The Reset Endpoint Command may only be issued to endpoints in the ++ * Halted state. If software wishes reset the Data Toggle or Sequence ++ * Number of an endpoint that isn't in the Halted state, then software ++ * may issue a Configure Endpoint Command with the Drop and Add bits set ++ * for the target endpoint. that is in the Stopped state. + */ +- if (!ret) { +- xhci_cleanup_stalled_ring(xhci, udev, ep_index); +- kfree(virt_ep->stopped_td); +- xhci_ring_cmd_db(xhci); +- } +- virt_ep->stopped_td = NULL; +- virt_ep->stopped_stream = 0; +- spin_unlock_irqrestore(&xhci->lock, flags); + +- if (ret) +- xhci_warn(xhci, "FIXME allocate a new ring segment\n"); ++ /* For now just print debug to follow the situation */ ++ xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", ++ ep->desc.bEndpointAddress); + } + + static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, +diff --git a/mm/frontswap.c b/mm/frontswap.c +index c30eec5..f2a3571 100644 +--- a/mm/frontswap.c ++++ b/mm/frontswap.c +@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page) + the (older) page from frontswap + */ + inc_frontswap_failed_stores(); +- if (dup) ++ if (dup) { + __frontswap_clear(sis, offset); ++ frontswap_ops->invalidate_page(type, offset); ++ } + } + if (frontswap_writethrough_enabled) + /* report failure so swap also writes to swap device */ +diff --git a/mm/memory.c b/mm/memory.c +index 492e36f..48d7365 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -808,20 +808,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, + if (!pte_file(pte)) { + swp_entry_t entry = pte_to_swp_entry(pte); + +- if (swap_duplicate(entry) < 0) +- return entry.val; +- +- /* make sure dst_mm is on swapoff's mmlist. */ +- if (unlikely(list_empty(&dst_mm->mmlist))) { +- spin_lock(&mmlist_lock); +- if (list_empty(&dst_mm->mmlist)) +- list_add(&dst_mm->mmlist, +- &src_mm->mmlist); +- spin_unlock(&mmlist_lock); +- } +- if (likely(!non_swap_entry(entry))) ++ if (likely(!non_swap_entry(entry))) { ++ if (swap_duplicate(entry) < 0) ++ return entry.val; ++ ++ /* make sure dst_mm is on swapoff's mmlist. */ ++ if (unlikely(list_empty(&dst_mm->mmlist))) { ++ spin_lock(&mmlist_lock); ++ if (list_empty(&dst_mm->mmlist)) ++ list_add(&dst_mm->mmlist, ++ &src_mm->mmlist); ++ spin_unlock(&mmlist_lock); ++ } + rss[MM_SWAPENTS]++; +- else if (is_migration_entry(entry)) { ++ } else if (is_migration_entry(entry)) { + page = migration_entry_to_page(entry); + + if (PageAnon(page)) +diff --git a/mm/mmap.c b/mm/mmap.c +index dfe90657..b91ac80 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -745,8 +745,11 @@ again: remove_next = 1 + (end > next->vm_end); + * shrinking vma had, to cover any anon pages imported. + */ + if (exporter && exporter->anon_vma && !importer->anon_vma) { +- if (anon_vma_clone(importer, exporter)) +- return -ENOMEM; ++ int error; ++ ++ error = anon_vma_clone(importer, exporter); ++ if (error) ++ return error; + importer->anon_vma = exporter->anon_vma; + } + } +@@ -2428,7 +2431,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + if (err) + goto out_free_vma; + +- if (anon_vma_clone(new, vma)) ++ err = anon_vma_clone(new, vma); ++ if (err) + goto out_free_mpol; + + if (new->vm_file) +diff --git a/mm/rmap.c b/mm/rmap.c +index cdbd312..cab9820 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) + { + struct anon_vma_chain *avc; + struct anon_vma *anon_vma; ++ int error; + + /* Don't bother if the parent process has no anon_vma here. */ + if (!pvma->anon_vma) +@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) + * First, attach the new VMA to the parent VMA's anon_vmas, + * so rmap can find non-COWed pages in child processes. + */ +- if (anon_vma_clone(vma, pvma)) +- return -ENOMEM; ++ error = anon_vma_clone(vma, pvma); ++ if (error) ++ return error; + + /* Then add our own anon_vma. */ + anon_vma = anon_vma_alloc(); +diff --git a/mm/vmpressure.c b/mm/vmpressure.c +index d4042e7..c5afd57 100644 +--- a/mm/vmpressure.c ++++ b/mm/vmpressure.c +@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work) + unsigned long scanned; + unsigned long reclaimed; + ++ spin_lock(&vmpr->sr_lock); + /* + * Several contexts might be calling vmpressure(), so it is + * possible that the work was rescheduled again before the old +@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work) + * here. No need for any locks here since we don't care if + * vmpr->reclaimed is in sync. + */ +- if (!vmpr->scanned) ++ scanned = vmpr->scanned; ++ if (!scanned) { ++ spin_unlock(&vmpr->sr_lock); + return; ++ } + +- spin_lock(&vmpr->sr_lock); +- scanned = vmpr->scanned; + reclaimed = vmpr->reclaimed; + vmpr->scanned = 0; + vmpr->reclaimed = 0; +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index b0db904..4617586 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1453,6 +1453,7 @@ static int do_setlink(const struct sk_buff *skb, + goto errout; + } + if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { ++ put_net(net); + err = -EPERM; + goto errout; + } +diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c +index 8c8493e..278836f 100644 +--- a/net/ipv4/gre_offload.c ++++ b/net/ipv4/gre_offload.c +@@ -271,6 +271,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) + err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); + + rcu_read_unlock(); ++ ++ skb_set_inner_mac_header(skb, nhoff + grehlen); ++ + return err; + } + +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c +index e4a8f76..b0a9cb4 100644 +--- a/net/ipv4/ip_vti.c ++++ b/net/ipv4/ip_vti.c +@@ -369,6 +369,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { + .validate = vti_tunnel_validate, + .newlink = vti_newlink, + .changelink = vti_changelink, ++ .dellink = ip_tunnel_dellink, + .get_size = vti_get_size, + .fill_info = vti_fill_info, + }; +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index b27f6d3..4a230b1 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -508,11 +508,11 @@ static int ip6gre_rcv(struct sk_buff *skb) + + skb->protocol = gre_proto; + /* WCCP version 1 and 2 protocol decoding. +- * - Change protocol to IP ++ * - Change protocol to IPv6 + * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header + */ + if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { +- skb->protocol = htons(ETH_P_IP); ++ skb->protocol = htons(ETH_P_IPV6); + if ((*(h + offset) & 0xF0) != 0x40) + offset += 4; + } +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c +index 9a5339f..28456c9 100644 +--- a/net/ipv6/ip6_vti.c ++++ b/net/ipv6/ip6_vti.c +@@ -825,6 +825,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev, + return vti6_tnl_create2(dev); + } + ++static void vti6_dellink(struct net_device *dev, struct list_head *head) ++{ ++ struct net *net = dev_net(dev); ++ struct vti6_net *ip6n = net_generic(net, vti6_net_id); ++ ++ if (dev != ip6n->fb_tnl_dev) ++ unregister_netdevice_queue(dev, head); ++} ++ + static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[]) + { +@@ -900,6 +909,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = { + .setup = vti6_dev_setup, + .validate = vti6_validate, + .newlink = vti6_newlink, ++ .dellink = vti6_dellink, + .changelink = vti6_changelink, + .get_size = vti6_get_size, + .fill_info = vti6_fill_info, +@@ -945,6 +955,7 @@ static int __net_init vti6_init_net(struct net *net) + if (!ip6n->fb_tnl_dev) + goto err_alloc_dev; + dev_net_set(ip6n->fb_tnl_dev, net); ++ ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops; + + err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); + if (err < 0) +diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c +index 7c7df47..f056f9ed 100644 +--- a/net/mac80211/aes_ccm.c ++++ b/net/mac80211/aes_ccm.c +@@ -54,6 +54,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, + + memset(&aead_req, 0, sizeof(aead_req)); + ++ if (data_len == 0) ++ return -EINVAL; ++ + sg_init_one(&pt, data, data_len); + sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); + sg_init_table(ct, 2); +diff --git a/net/sctp/output.c b/net/sctp/output.c +index 8267b06..740ca5f 100644 +--- a/net/sctp/output.c ++++ b/net/sctp/output.c +@@ -401,12 +401,12 @@ int sctp_packet_transmit(struct sctp_packet *packet) + sk = chunk->skb->sk; + + /* Allocate the new skb. */ +- nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); ++ nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC); + if (!nskb) + goto nomem; + + /* Make sure the outbound skb has enough header room reserved. */ +- skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); ++ skb_reserve(nskb, packet->overhead + MAX_HEADER); + + /* Set the owning socket so that we know where to get the + * destination IP address. +diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c +index eaf64ea..1a05efa 100644 +--- a/sound/pci/hda/patch_analog.c ++++ b/sound/pci/hda/patch_analog.c +@@ -333,6 +333,7 @@ static const struct hda_fixup ad1986a_fixups[] = { + + static const struct snd_pci_quirk ad1986a_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC), ++ SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD), + SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD), + SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK), + SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 4c826a4..910f2db 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -4554,6 +4554,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ++ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), +diff --git a/sound/usb/midi.c b/sound/usb/midi.c +index b901f46..c7aa71e 100644 +--- a/sound/usb/midi.c ++++ b/sound/usb/midi.c +@@ -364,6 +364,8 @@ static void snd_usbmidi_error_timer(unsigned long data) + if (in && in->error_resubmit) { + in->error_resubmit = 0; + for (j = 0; j < INPUT_URBS; ++j) { ++ if (atomic_read(&in->urbs[j]->use_count)) ++ continue; + in->urbs[j]->dev = umidi->dev; + snd_usbmidi_submit_urb(in->urbs[j], GFP_ATOMIC); + } diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.27-28.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.27-28.patch new file mode 100644 index 0000000000..0beea302b8 --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.27-28.patch @@ -0,0 +1,1961 @@ +diff --git a/Makefile b/Makefile +index 944db23..a2e572b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 27 ++SUBLEVEL = 28 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi +index 0d8530c..34841fc 100644 +--- a/arch/arm/boot/dts/armada-370.dtsi ++++ b/arch/arm/boot/dts/armada-370.dtsi +@@ -106,11 +106,6 @@ + reg = <0x11100 0x20>; + }; + +- system-controller@18200 { +- compatible = "marvell,armada-370-xp-system-controller"; +- reg = <0x18200 0x100>; +- }; +- + pinctrl { + compatible = "marvell,mv88f6710-pinctrl"; + reg = <0x18000 0x38>; +@@ -167,6 +162,11 @@ + interrupts = <91>; + }; + ++ system-controller@18200 { ++ compatible = "marvell,armada-370-xp-system-controller"; ++ reg = <0x18200 0x100>; ++ }; ++ + gateclk: clock-gating-control@18220 { + compatible = "marvell,armada-370-gating-clock"; + reg = <0x18220 0x4>; +diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S +index 8c1ba4f..3505799 100644 +--- a/arch/arm/mach-tegra/reset-handler.S ++++ b/arch/arm/mach-tegra/reset-handler.S +@@ -51,6 +51,7 @@ ENTRY(tegra_resume) + THUMB( it ne ) + bne cpu_resume @ no + ++ tegra_get_soc_id TEGRA_APB_MISC_BASE, r6 + /* Are we on Tegra20? */ + cmp r6, #TEGRA20 + beq 1f @ Yes +diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h +index 6cddbb0..e0ec201 100644 +--- a/arch/arm64/include/asm/hwcap.h ++++ b/arch/arm64/include/asm/hwcap.h +@@ -30,6 +30,7 @@ + #define COMPAT_HWCAP_IDIVA (1 << 17) + #define COMPAT_HWCAP_IDIVT (1 << 18) + #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) ++#define COMPAT_HWCAP_LPAE (1 << 20) + #define COMPAT_HWCAP_EVTSTRM (1 << 21) + + #ifndef __ASSEMBLY__ +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c +index c8e9eff..071c382 100644 +--- a/arch/arm64/kernel/setup.c ++++ b/arch/arm64/kernel/setup.c +@@ -67,7 +67,8 @@ EXPORT_SYMBOL_GPL(elf_hwcap); + COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ + COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ + COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ +- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) ++ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ ++ COMPAT_HWCAP_LPAE) + unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; + #endif + +diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c +index db02052..5426c9e 100644 +--- a/arch/s390/kernel/compat_linux.c ++++ b/arch/s390/kernel/compat_linux.c +@@ -245,7 +245,7 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist) + struct group_info *group_info; + int retval; + +- if (!capable(CAP_SETGID)) ++ if (!may_setgroups()) + return -EPERM; + if ((unsigned)gidsetsize > NGROUPS_MAX) + return -EINVAL; +diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h +index 46727eb..6e1aaf7 100644 +--- a/arch/x86/include/uapi/asm/ldt.h ++++ b/arch/x86/include/uapi/asm/ldt.h +@@ -28,6 +28,13 @@ struct user_desc { + unsigned int seg_not_present:1; + unsigned int useable:1; + #ifdef __x86_64__ ++ /* ++ * Because this bit is not present in 32-bit user code, user ++ * programs can pass uninitialized values here. Therefore, in ++ * any context in which a user_desc comes from a 32-bit program, ++ * the kernel must act as though lm == 0, regardless of the ++ * actual value. ++ */ + unsigned int lm:1; + #endif + }; +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index 713f1b3..0b1e1d5 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -280,7 +280,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) + static void __init paravirt_ops_setup(void) + { + pv_info.name = "KVM"; +- pv_info.paravirt_enabled = 1; ++ ++ /* ++ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM ++ * guest kernel works like a bare metal kernel with additional ++ * features, and paravirt_enabled is about features that are ++ * missing. ++ */ ++ pv_info.paravirt_enabled = 0; + + if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) + pv_cpu_ops.io_delay = kvm_io_delay; +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c +index e604109..c8e98cd 100644 +--- a/arch/x86/kernel/kvmclock.c ++++ b/arch/x86/kernel/kvmclock.c +@@ -263,7 +263,6 @@ void __init kvmclock_init(void) + #endif + kvm_get_preset_lpj(); + clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); +- pv_info.paravirt_enabled = 1; + pv_info.name = "KVM"; + + if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c +index 9c0280f9..e2d26ce 100644 +--- a/arch/x86/kernel/process_64.c ++++ b/arch/x86/kernel/process_64.c +@@ -286,24 +286,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + + fpu = switch_fpu_prepare(prev_p, next_p, cpu); + +- /* +- * Reload esp0, LDT and the page table pointer: +- */ ++ /* Reload esp0 and ss1. */ + load_sp0(tss, next); + +- /* +- * Switch DS and ES. +- * This won't pick up thread selector changes, but I guess that is ok. +- */ +- savesegment(es, prev->es); +- if (unlikely(next->es | prev->es)) +- loadsegment(es, next->es); +- +- savesegment(ds, prev->ds); +- if (unlikely(next->ds | prev->ds)) +- loadsegment(ds, next->ds); +- +- + /* We must save %fs and %gs before load_TLS() because + * %fs and %gs may be cleared by load_TLS(). + * +@@ -312,41 +297,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + savesegment(fs, fsindex); + savesegment(gs, gsindex); + ++ /* ++ * Load TLS before restoring any segments so that segment loads ++ * reference the correct GDT entries. ++ */ + load_TLS(next, cpu); + + /* +- * Leave lazy mode, flushing any hypercalls made here. +- * This must be done before restoring TLS segments so +- * the GDT and LDT are properly updated, and must be +- * done before math_state_restore, so the TS bit is up +- * to date. ++ * Leave lazy mode, flushing any hypercalls made here. This ++ * must be done after loading TLS entries in the GDT but before ++ * loading segments that might reference them, and and it must ++ * be done before math_state_restore, so the TS bit is up to ++ * date. + */ + arch_end_context_switch(next_p); + ++ /* Switch DS and ES. ++ * ++ * Reading them only returns the selectors, but writing them (if ++ * nonzero) loads the full descriptor from the GDT or LDT. The ++ * LDT for next is loaded in switch_mm, and the GDT is loaded ++ * above. ++ * ++ * We therefore need to write new values to the segment ++ * registers on every context switch unless both the new and old ++ * values are zero. ++ * ++ * Note that we don't need to do anything for CS and SS, as ++ * those are saved and restored as part of pt_regs. ++ */ ++ savesegment(es, prev->es); ++ if (unlikely(next->es | prev->es)) ++ loadsegment(es, next->es); ++ ++ savesegment(ds, prev->ds); ++ if (unlikely(next->ds | prev->ds)) ++ loadsegment(ds, next->ds); ++ + /* + * Switch FS and GS. + * +- * Segment register != 0 always requires a reload. Also +- * reload when it has changed. When prev process used 64bit +- * base always reload to avoid an information leak. ++ * These are even more complicated than FS and GS: they have ++ * 64-bit bases are that controlled by arch_prctl. Those bases ++ * only differ from the values in the GDT or LDT if the selector ++ * is 0. ++ * ++ * Loading the segment register resets the hidden base part of ++ * the register to 0 or the value from the GDT / LDT. If the ++ * next base address zero, writing 0 to the segment register is ++ * much faster than using wrmsr to explicitly zero the base. ++ * ++ * The thread_struct.fs and thread_struct.gs values are 0 ++ * if the fs and gs bases respectively are not overridden ++ * from the values implied by fsindex and gsindex. They ++ * are nonzero, and store the nonzero base addresses, if ++ * the bases are overridden. ++ * ++ * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should ++ * be impossible. ++ * ++ * Therefore we need to reload the segment registers if either ++ * the old or new selector is nonzero, and we need to override ++ * the base address if next thread expects it to be overridden. ++ * ++ * This code is unnecessarily slow in the case where the old and ++ * new indexes are zero and the new base is nonzero -- it will ++ * unnecessarily write 0 to the selector before writing the new ++ * base address. ++ * ++ * Note: This all depends on arch_prctl being the only way that ++ * user code can override the segment base. Once wrfsbase and ++ * wrgsbase are enabled, most of this code will need to change. + */ + if (unlikely(fsindex | next->fsindex | prev->fs)) { + loadsegment(fs, next->fsindex); ++ + /* +- * Check if the user used a selector != 0; if yes +- * clear 64bit base, since overloaded base is always +- * mapped to the Null selector ++ * If user code wrote a nonzero value to FS, then it also ++ * cleared the overridden base address. ++ * ++ * XXX: if user code wrote 0 to FS and cleared the base ++ * address itself, we won't notice and we'll incorrectly ++ * restore the prior base address next time we reschdule ++ * the process. + */ + if (fsindex) + prev->fs = 0; + } +- /* when next process has a 64bit base use it */ + if (next->fs) + wrmsrl(MSR_FS_BASE, next->fs); + prev->fsindex = fsindex; + + if (unlikely(gsindex | next->gsindex | prev->gs)) { + load_gs_index(next->gsindex); ++ ++ /* This works (and fails) the same way as fsindex above. */ + if (gsindex) + prev->gs = 0; + } +diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c +index f7fec09..4e942f3 100644 +--- a/arch/x86/kernel/tls.c ++++ b/arch/x86/kernel/tls.c +@@ -27,6 +27,37 @@ static int get_free_idx(void) + return -ESRCH; + } + ++static bool tls_desc_okay(const struct user_desc *info) ++{ ++ if (LDT_empty(info)) ++ return true; ++ ++ /* ++ * espfix is required for 16-bit data segments, but espfix ++ * only works for LDT segments. ++ */ ++ if (!info->seg_32bit) ++ return false; ++ ++ /* Only allow data segments in the TLS array. */ ++ if (info->contents > 1) ++ return false; ++ ++ /* ++ * Non-present segments with DPL 3 present an interesting attack ++ * surface. The kernel should handle such segments correctly, ++ * but TLS is very difficult to protect in a sandbox, so prevent ++ * such segments from being created. ++ * ++ * If userspace needs to remove a TLS entry, it can still delete ++ * it outright. ++ */ ++ if (info->seg_not_present) ++ return false; ++ ++ return true; ++} ++ + static void set_tls_desc(struct task_struct *p, int idx, + const struct user_desc *info, int n) + { +@@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx, + if (copy_from_user(&info, u_info, sizeof(info))) + return -EFAULT; + ++ if (!tls_desc_okay(&info)) ++ return -EINVAL; ++ + if (idx == -1) + idx = info.entry_number; + +@@ -192,6 +226,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, + { + struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; + const struct user_desc *info; ++ int i; + + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || + (pos % sizeof(struct user_desc)) != 0 || +@@ -205,6 +240,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, + else + info = infobuf; + ++ for (i = 0; i < count / sizeof(struct user_desc); i++) ++ if (!tls_desc_okay(info + i)) ++ return -EINVAL; ++ + set_tls_desc(target, + GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), + info, count / sizeof(struct user_desc)); +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 6a3ad80..1de4bee 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -449,6 +449,9 @@ void af_alg_complete(struct crypto_async_request *req, int err) + { + struct af_alg_completion *completion = req->data; + ++ if (err == -EINPROGRESS) ++ return; ++ + completion->err = err; + complete(&completion->completion); + } +diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c +index 4195a01..8e51b3a 100644 +--- a/drivers/md/bitmap.c ++++ b/drivers/md/bitmap.c +@@ -883,7 +883,6 @@ void bitmap_unplug(struct bitmap *bitmap) + { + unsigned long i; + int dirty, need_write; +- int wait = 0; + + if (!bitmap || !bitmap->storage.filemap || + test_bit(BITMAP_STALE, &bitmap->flags)) +@@ -901,16 +900,13 @@ void bitmap_unplug(struct bitmap *bitmap) + clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); + write_page(bitmap, bitmap->storage.filemap[i], 0); + } +- if (dirty) +- wait = 1; +- } +- if (wait) { /* if any writes were performed, we need to wait on them */ +- if (bitmap->storage.file) +- wait_event(bitmap->write_wait, +- atomic_read(&bitmap->pending_writes)==0); +- else +- md_super_wait(bitmap->mddev); + } ++ if (bitmap->storage.file) ++ wait_event(bitmap->write_wait, ++ atomic_read(&bitmap->pending_writes)==0); ++ else ++ md_super_wait(bitmap->mddev); ++ + if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) + bitmap_file_kick(bitmap); + } +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c +index a1cebf7..03c872f 100644 +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -532,6 +532,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block, + end_io(&b->bio, r); + } + ++static void inline_endio(struct bio *bio, int error) ++{ ++ bio_end_io_t *end_fn = bio->bi_private; ++ ++ /* ++ * Reset the bio to free any attached resources ++ * (e.g. bio integrity profiles). ++ */ ++ bio_reset(bio); ++ ++ end_fn(bio, error); ++} ++ + static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, + bio_end_io_t *end_io) + { +@@ -543,7 +556,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, + b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; + b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; + b->bio.bi_bdev = b->c->bdev; +- b->bio.bi_end_io = end_io; ++ b->bio.bi_end_io = inline_endio; ++ /* ++ * Use of .bi_private isn't a problem here because ++ * the dm_buffer's inline bio is local to bufio. ++ */ ++ b->bio.bi_private = end_io; + + /* + * We assume that if len >= PAGE_SIZE ptr is page-aligned. +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c +index 2331543..ff284b7 100644 +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -946,10 +946,14 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) + } + + } else { +- clear_dirty(cache, mg->new_oblock, mg->cblock); +- if (mg->requeue_holder) ++ if (mg->requeue_holder) { ++ clear_dirty(cache, mg->new_oblock, mg->cblock); + cell_defer(cache, mg->new_ocell, true); +- else { ++ } else { ++ /* ++ * The block was promoted via an overwrite, so it's dirty. ++ */ ++ set_dirty(cache, mg->new_oblock, mg->cblock); + bio_endio(mg->new_ocell->holder, 0); + cell_defer(cache, mg->new_ocell, false); + } +@@ -1060,7 +1064,8 @@ static void issue_copy(struct dm_cache_migration *mg) + + avoid = is_discarded_oblock(cache, mg->new_oblock); + +- if (!avoid && bio_writes_complete_block(cache, bio)) { ++ if (writeback_mode(&cache->features) && ++ !avoid && bio_writes_complete_block(cache, bio)) { + issue_overwrite(mg, bio); + return; + } +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index 9533f83..4a8d19d 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -709,7 +709,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, + for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) + crypto_xor(data + i * 8, buf, 8); + out: +- memset(buf, 0, sizeof(buf)); ++ memzero_explicit(buf, sizeof(buf)); + return r; + } + +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index 37f2648..f7e052c 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -916,6 +916,24 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, + } + } + ++static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); ++ ++static void check_for_space(struct pool *pool) ++{ ++ int r; ++ dm_block_t nr_free; ++ ++ if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) ++ return; ++ ++ r = dm_pool_get_free_block_count(pool->pmd, &nr_free); ++ if (r) ++ return; ++ ++ if (nr_free) ++ set_pool_mode(pool, PM_WRITE); ++} ++ + /* + * A non-zero return indicates read_only or fail_io mode. + * Many callers don't care about the return value. +@@ -930,6 +948,8 @@ static int commit(struct pool *pool) + r = dm_pool_commit_metadata(pool->pmd); + if (r) + metadata_operation_failed(pool, "dm_pool_commit_metadata", r); ++ else ++ check_for_space(pool); + + return r; + } +@@ -948,8 +968,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) + } + } + +-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); +- + static int alloc_data_block(struct thin_c *tc, dm_block_t *result) + { + int r; +@@ -1592,7 +1610,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) + pool->process_bio = process_bio_read_only; + pool->process_discard = process_discard; + pool->process_prepared_mapping = process_prepared_mapping; +- pool->process_prepared_discard = process_prepared_discard_passdown; ++ pool->process_prepared_discard = process_prepared_discard; + + if (!pool->pf.error_if_no_space && no_space_timeout) + queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index 786b689..f4e22bc 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -564,7 +564,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count + { + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + +- return smm->ll.nr_blocks; ++ *count = smm->ll.nr_blocks; ++ ++ return 0; + } + + static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count) +diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c +index 11c19e5..48579e5 100644 +--- a/drivers/mfd/tc6393xb.c ++++ b/drivers/mfd/tc6393xb.c +@@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev) + return 0; + } + ++static int tc6393xb_ohci_suspend(struct platform_device *dev) ++{ ++ struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent); ++ ++ /* We can't properly store/restore OHCI state, so fail here */ ++ if (tcpd->resume_restore) ++ return -EBUSY; ++ ++ return tc6393xb_ohci_disable(dev); ++} ++ + static int tc6393xb_fb_enable(struct platform_device *dev) + { + struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent); +@@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = { + .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources), + .resources = tc6393xb_ohci_resources, + .enable = tc6393xb_ohci_enable, +- .suspend = tc6393xb_ohci_disable, ++ .suspend = tc6393xb_ohci_suspend, + .resume = tc6393xb_ohci_enable, + .disable = tc6393xb_ohci_disable, + }, +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index 7b5424f..df72c47 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -260,7 +260,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, + int ret; + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + +- ret = snprintf(buf, PAGE_SIZE, "%d", ++ ret = snprintf(buf, PAGE_SIZE, "%d\n", + get_disk_ro(dev_to_disk(dev)) ^ + md->read_only); + mmc_blk_put(md); +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c +index 55cd110..caed9d5 100644 +--- a/drivers/mmc/host/dw_mmc.c ++++ b/drivers/mmc/host/dw_mmc.c +@@ -632,6 +632,13 @@ static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) + + WARN_ON(!(data->flags & MMC_DATA_READ)); + ++ /* ++ * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is ++ * in the FIFO region, so we really shouldn't access it). ++ */ ++ if (host->verid < DW_MMC_240A) ++ return; ++ + if (host->timing != MMC_TIMING_MMC_HS200 && + host->timing != MMC_TIMING_UHS_SDR104) + goto disable; +diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c +index f49666b..257e9ca 100644 +--- a/drivers/mmc/host/sdhci-pci-o2micro.c ++++ b/drivers/mmc/host/sdhci-pci-o2micro.c +@@ -88,8 +88,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip) + return; + scratch_32 &= ~((1 << 21) | (1 << 30)); + +- /* Set RTD3 function disabled */ +- scratch_32 |= ((1 << 29) | (1 << 28)); + pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32); + + /* Set L1 Entrance Timer */ +diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c +index 1e9d6ad..7563b3d 100644 +--- a/drivers/scsi/NCR5380.c ++++ b/drivers/scsi/NCR5380.c +@@ -2655,14 +2655,14 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) { + * + * Purpose : abort a command + * +- * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the +- * host byte of the result field to, if zero DID_ABORTED is ++ * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the ++ * host byte of the result field to, if zero DID_ABORTED is + * used. + * +- * Returns : 0 - success, -1 on failure. ++ * Returns : SUCCESS - success, FAILED on failure. + * +- * XXX - there is no way to abort the command that is currently +- * connected, you have to wait for it to complete. If this is ++ * XXX - there is no way to abort the command that is currently ++ * connected, you have to wait for it to complete. If this is + * a problem, we could implement longjmp() / setjmp(), setjmp() + * called where the loop started in NCR5380_main(). + * +@@ -2712,7 +2712,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { + * aborted flag and get back into our main loop. + */ + +- return 0; ++ return SUCCESS; + } + #endif + +diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c +index 5f31017..31ace4b 100644 +--- a/drivers/scsi/aha1740.c ++++ b/drivers/scsi/aha1740.c +@@ -531,7 +531,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy) + * quiet as possible... + */ + +- return 0; ++ return SUCCESS; + } + + static struct scsi_host_template aha1740_template = { +diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c +index 0f3cdbc..30073d4 100644 +--- a/drivers/scsi/atari_NCR5380.c ++++ b/drivers/scsi/atari_NCR5380.c +@@ -2613,7 +2613,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) + * host byte of the result field to, if zero DID_ABORTED is + * used. + * +- * Returns : 0 - success, -1 on failure. ++ * Returns : SUCCESS - success, FAILED on failure. + * + * XXX - there is no way to abort the command that is currently + * connected, you have to wait for it to complete. If this is +diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c +index f37f3e3..28fe6fe 100644 +--- a/drivers/scsi/esas2r/esas2r_main.c ++++ b/drivers/scsi/esas2r/esas2r_main.c +@@ -1057,7 +1057,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd) + + cmd->scsi_done(cmd); + +- return 0; ++ return SUCCESS; + } + + spin_lock_irqsave(&a->queue_lock, flags); +diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c +index 816db12..52587ce 100644 +--- a/drivers/scsi/megaraid.c ++++ b/drivers/scsi/megaraid.c +@@ -1967,7 +1967,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) + cmd->device->id, cmd->device->lun); + + if(list_empty(&adapter->pending_list)) +- return FALSE; ++ return FAILED; + + list_for_each_safe(pos, next, &adapter->pending_list) { + +@@ -1990,7 +1990,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) + (aor==SCB_ABORT) ? "ABORTING":"RESET", + scb->idx); + +- return FALSE; ++ return FAILED; + } + else { + +@@ -2015,12 +2015,12 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) + list_add_tail(SCSI_LIST(cmd), + &adapter->completed_list); + +- return TRUE; ++ return SUCCESS; + } + } + } + +- return FALSE; ++ return FAILED; + } + + static inline int +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index 3b7ad10..c80afde 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -953,7 +953,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, + cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); + + cmd->sync_cmd = 1; +- cmd->cmd_status = 0xFF; ++ cmd->cmd_status = ENODATA; + + instance->instancet->issue_dcmd(instance, cmd); + +diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c +index 636bbe0..fc57c8a 100644 +--- a/drivers/scsi/sun3_NCR5380.c ++++ b/drivers/scsi/sun3_NCR5380.c +@@ -2597,15 +2597,15 @@ static void NCR5380_reselect (struct Scsi_Host *instance) + * Purpose : abort a command + * + * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the +- * host byte of the result field to, if zero DID_ABORTED is ++ * host byte of the result field to, if zero DID_ABORTED is + * used. + * +- * Returns : 0 - success, -1 on failure. ++ * Returns : SUCCESS - success, FAILED on failure. + * +- * XXX - there is no way to abort the command that is currently +- * connected, you have to wait for it to complete. If this is ++ * XXX - there is no way to abort the command that is currently ++ * connected, you have to wait for it to complete. If this is + * a problem, we could implement longjmp() / setjmp(), setjmp() +- * called where the loop started in NCR5380_main(). ++ * called where the loop started in NCR5380_main(). + */ + + static int NCR5380_abort(struct scsi_cmnd *cmd) +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c +index 71b0ec0..284733e 100644 +--- a/drivers/thermal/thermal_core.c ++++ b/drivers/thermal/thermal_core.c +@@ -1824,10 +1824,10 @@ static int __init thermal_init(void) + + exit_netlink: + genetlink_exit(); +-unregister_governors: +- thermal_unregister_governors(); + unregister_class: + class_unregister(&thermal_class); ++unregister_governors: ++ thermal_unregister_governors(); + error: + idr_destroy(&thermal_tz_idr); + idr_destroy(&thermal_cdev_idr); +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 370ef74..0db8ded 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -3978,12 +3978,6 @@ again: + if (ret) + break; + +- /* opt_discard */ +- if (btrfs_test_opt(root, DISCARD)) +- ret = btrfs_error_discard_extent(root, start, +- end + 1 - start, +- NULL); +- + clear_extent_dirty(unpin, start, end, GFP_NOFS); + btrfs_error_unpin_extent_range(root, start, end); + cond_resched(); +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 3ff98e2..d2f1c01 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -5503,7 +5503,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, + update_global_block_rsv(fs_info); + } + +-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) ++static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end, ++ const bool return_free_space) + { + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_block_group_cache *cache = NULL; +@@ -5527,7 +5528,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) + + if (start < cache->last_byte_to_unpin) { + len = min(len, cache->last_byte_to_unpin - start); +- btrfs_add_free_space(cache, start, len); ++ if (return_free_space) ++ btrfs_add_free_space(cache, start, len); + } + + start += len; +@@ -5590,7 +5592,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, + end + 1 - start, NULL); + + clear_extent_dirty(unpin, start, end, GFP_NOFS); +- unpin_extent_range(root, start, end); ++ unpin_extent_range(root, start, end, true); + cond_resched(); + } + +@@ -8886,7 +8888,7 @@ out: + + int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) + { +- return unpin_extent_range(root, start, end); ++ return unpin_extent_range(root, start, end, false); + } + + int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, +diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c +index 996ad56b..82845a6 100644 +--- a/fs/btrfs/extent_map.c ++++ b/fs/btrfs/extent_map.c +@@ -290,8 +290,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, + if (!em) + goto out; + +- if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) +- list_move(&em->list, &tree->modified_extents); + em->generation = gen; + clear_bit(EXTENT_FLAG_PINNED, &em->flags); + em->mod_start = em->start; +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c +index 2f6735d..31b148f 100644 +--- a/fs/ecryptfs/crypto.c ++++ b/fs/ecryptfs/crypto.c +@@ -1917,7 +1917,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size, + break; + case 2: + dst[dst_byte_offset++] |= (src_byte); +- dst[dst_byte_offset] = 0; + current_bit_offset = 0; + break; + } +diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c +index b1eaa7a..03df502 100644 +--- a/fs/ecryptfs/file.c ++++ b/fs/ecryptfs/file.c +@@ -191,23 +191,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file) + { + int rc = 0; + struct ecryptfs_crypt_stat *crypt_stat = NULL; +- struct ecryptfs_mount_crypt_stat *mount_crypt_stat; + struct dentry *ecryptfs_dentry = file->f_path.dentry; + /* Private value of ecryptfs_dentry allocated in + * ecryptfs_lookup() */ + struct ecryptfs_file_info *file_info; + +- mount_crypt_stat = &ecryptfs_superblock_to_private( +- ecryptfs_dentry->d_sb)->mount_crypt_stat; +- if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) +- && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR) +- || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC) +- || (file->f_flags & O_APPEND))) { +- printk(KERN_WARNING "Mount has encrypted view enabled; " +- "files may only be read\n"); +- rc = -EPERM; +- goto out; +- } + /* Released in ecryptfs_release or end of function if failure */ + file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL); + ecryptfs_set_file_private(file, file_info); +diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c +index 1b119d3..34eb843 100644 +--- a/fs/ecryptfs/main.c ++++ b/fs/ecryptfs/main.c +@@ -493,6 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags + { + struct super_block *s; + struct ecryptfs_sb_info *sbi; ++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat; + struct ecryptfs_dentry_info *root_info; + const char *err = "Getting sb failed"; + struct inode *inode; +@@ -511,6 +512,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags + err = "Error parsing options"; + goto out; + } ++ mount_crypt_stat = &sbi->mount_crypt_stat; + + s = sget(fs_type, NULL, set_anon_super, flags, NULL); + if (IS_ERR(s)) { +@@ -557,11 +559,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags + + /** + * Set the POSIX ACL flag based on whether they're enabled in the lower +- * mount. Force a read-only eCryptfs mount if the lower mount is ro. +- * Allow a ro eCryptfs mount even when the lower mount is rw. ++ * mount. + */ + s->s_flags = flags & ~MS_POSIXACL; +- s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL); ++ s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL; ++ ++ /** ++ * Force a read-only eCryptfs mount when: ++ * 1) The lower mount is ro ++ * 2) The ecryptfs_encrypted_view mount option is specified ++ */ ++ if (path.dentry->d_sb->s_flags & MS_RDONLY || ++ mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) ++ s->s_flags |= MS_RDONLY; + + s->s_maxbytes = path.dentry->d_sb->s_maxbytes; + s->s_blocksize = path.dentry->d_sb->s_blocksize; +diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c +index f488bba..735d752 100644 +--- a/fs/isofs/rock.c ++++ b/fs/isofs/rock.c +@@ -30,6 +30,7 @@ struct rock_state { + int cont_size; + int cont_extent; + int cont_offset; ++ int cont_loops; + struct inode *inode; + }; + +@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode) + rs->inode = inode; + } + ++/* Maximum number of Rock Ridge continuation entries */ ++#define RR_MAX_CE_ENTRIES 32 ++ + /* + * Returns 0 if the caller should continue scanning, 1 if the scan must end + * and -ve on error. +@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs) + goto out; + } + ret = -EIO; ++ if (++rs->cont_loops >= RR_MAX_CE_ENTRIES) ++ goto out; + bh = sb_bread(rs->inode->i_sb, rs->cont_extent); + if (bh) { + memcpy(rs->buffer, bh->b_data + rs->cont_offset, +@@ -356,6 +362,9 @@ repeat: + rs.cont_size = isonum_733(rr->u.CE.size); + break; + case SIG('E', 'R'): ++ /* Invalid length of ER tag id? */ ++ if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len) ++ goto out; + ISOFS_SB(inode->i_sb)->s_rock = 1; + printk(KERN_DEBUG "ISO 9660 Extensions: "); + { +diff --git a/fs/namespace.c b/fs/namespace.c +index d9bf3ef..039f380 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -1295,6 +1295,8 @@ void umount_tree(struct mount *mnt, int how) + } + if (last) { + last->mnt_hash.next = unmounted.first; ++ if (unmounted.first) ++ unmounted.first->pprev = &last->mnt_hash.next; + unmounted.first = tmp_list.first; + unmounted.first->pprev = &unmounted.first; + } +@@ -1439,6 +1441,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) + goto dput_and_out; + if (mnt->mnt.mnt_flags & MNT_LOCKED) + goto dput_and_out; ++ retval = -EPERM; ++ if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) ++ goto dput_and_out; + + retval = do_umount(mnt, flags); + dput_and_out: +@@ -1964,7 +1969,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags, + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && + !(mnt_flags & MNT_NODEV)) { +- return -EPERM; ++ /* Was the nodev implicitly added in mount? */ ++ if ((mnt->mnt_ns->user_ns != &init_user_ns) && ++ !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) { ++ mnt_flags |= MNT_NODEV; ++ } else { ++ return -EPERM; ++ } + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && + !(mnt_flags & MNT_NOSUID)) { +diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c +index 60426cc..2f970de 100644 +--- a/fs/ncpfs/ioctl.c ++++ b/fs/ncpfs/ioctl.c +@@ -448,7 +448,6 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg + result = -EIO; + } + } +- result = 0; + } + mutex_unlock(&server->root_setup_lock); + +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index bd01803..58258ad 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -7589,6 +7589,9 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) + + dprintk("--> %s\n", __func__); + ++ /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ ++ pnfs_get_layout_hdr(NFS_I(inode)->layout); ++ + lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); + if (!lgp->args.layout.pages) { + nfs4_layoutget_release(lgp); +@@ -7601,9 +7604,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) + lgp->res.seq_res.sr_slot = NULL; + nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); + +- /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ +- pnfs_get_layout_hdr(NFS_I(inode)->layout); +- + task = rpc_run_task(&task_setup_data); + if (IS_ERR(task)) + return ERR_CAST(task); +diff --git a/fs/proc/base.c b/fs/proc/base.c +index b976062..489ba8c 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -2555,6 +2555,57 @@ static const struct file_operations proc_projid_map_operations = { + .llseek = seq_lseek, + .release = proc_id_map_release, + }; ++ ++static int proc_setgroups_open(struct inode *inode, struct file *file) ++{ ++ struct user_namespace *ns = NULL; ++ struct task_struct *task; ++ int ret; ++ ++ ret = -ESRCH; ++ task = get_proc_task(inode); ++ if (task) { ++ rcu_read_lock(); ++ ns = get_user_ns(task_cred_xxx(task, user_ns)); ++ rcu_read_unlock(); ++ put_task_struct(task); ++ } ++ if (!ns) ++ goto err; ++ ++ if (file->f_mode & FMODE_WRITE) { ++ ret = -EACCES; ++ if (!ns_capable(ns, CAP_SYS_ADMIN)) ++ goto err_put_ns; ++ } ++ ++ ret = single_open(file, &proc_setgroups_show, ns); ++ if (ret) ++ goto err_put_ns; ++ ++ return 0; ++err_put_ns: ++ put_user_ns(ns); ++err: ++ return ret; ++} ++ ++static int proc_setgroups_release(struct inode *inode, struct file *file) ++{ ++ struct seq_file *seq = file->private_data; ++ struct user_namespace *ns = seq->private; ++ int ret = single_release(inode, file); ++ put_user_ns(ns); ++ return ret; ++} ++ ++static const struct file_operations proc_setgroups_operations = { ++ .open = proc_setgroups_open, ++ .write = proc_setgroups_write, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = proc_setgroups_release, ++}; + #endif /* CONFIG_USER_NS */ + + static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, +@@ -2663,6 +2714,7 @@ static const struct pid_entry tgid_base_stuff[] = { + REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), + REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), + REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), ++ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), + #endif + #ifdef CONFIG_CHECKPOINT_RESTORE + REG("timers", S_IRUGO, proc_timers_operations), +@@ -2998,6 +3050,7 @@ static const struct pid_entry tid_base_stuff[] = { + REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), + REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), + REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), ++ REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), + #endif + }; + +diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c +index d7c6dbe..d89f324 100644 +--- a/fs/udf/symlink.c ++++ b/fs/udf/symlink.c +@@ -80,11 +80,17 @@ static int udf_symlink_filler(struct file *file, struct page *page) + struct inode *inode = page->mapping->host; + struct buffer_head *bh = NULL; + unsigned char *symlink; +- int err = -EIO; ++ int err; + unsigned char *p = kmap(page); + struct udf_inode_info *iinfo; + uint32_t pos; + ++ /* We don't support symlinks longer than one block */ ++ if (inode->i_size > inode->i_sb->s_blocksize) { ++ err = -ENAMETOOLONG; ++ goto out_unmap; ++ } ++ + iinfo = UDF_I(inode); + pos = udf_block_map(inode, 0); + +@@ -94,8 +100,10 @@ static int udf_symlink_filler(struct file *file, struct page *page) + } else { + bh = sb_bread(inode->i_sb, pos); + +- if (!bh) +- goto out; ++ if (!bh) { ++ err = -EIO; ++ goto out_unlock_inode; ++ } + + symlink = bh->b_data; + } +@@ -109,9 +117,10 @@ static int udf_symlink_filler(struct file *file, struct page *page) + unlock_page(page); + return 0; + +-out: ++out_unlock_inode: + up_read(&iinfo->i_data_sem); + SetPageError(page); ++out_unmap: + kunmap(page); + unlock_page(page); + return err; +diff --git a/include/linux/audit.h b/include/linux/audit.h +index ec1464d..419b7d7 100644 +--- a/include/linux/audit.h ++++ b/include/linux/audit.h +@@ -47,6 +47,7 @@ struct sk_buff; + + struct audit_krule { + int vers_ops; ++ u32 pflags; + u32 flags; + u32 listnr; + u32 action; +@@ -64,6 +65,9 @@ struct audit_krule { + u64 prio; + }; + ++/* Flag to indicate legacy AUDIT_LOGINUID unset usage */ ++#define AUDIT_LOGINUID_LEGACY 0x1 ++ + struct audit_field { + u32 type; + u32 val; +diff --git a/include/linux/cred.h b/include/linux/cred.h +index 04421e8..6c58dd7 100644 +--- a/include/linux/cred.h ++++ b/include/linux/cred.h +@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *); + extern int set_current_groups(struct group_info *); + extern int set_groups(struct cred *, struct group_info *); + extern int groups_search(const struct group_info *, kgid_t); ++extern bool may_setgroups(void); + + /* access the groups "array" with this macro */ + #define GROUP_AT(gi, i) \ +diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h +index 4836ba3..e92abf9 100644 +--- a/include/linux/user_namespace.h ++++ b/include/linux/user_namespace.h +@@ -17,6 +17,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */ + } extent[UID_GID_MAP_MAX_EXTENTS]; + }; + ++#define USERNS_SETGROUPS_ALLOWED 1UL ++ ++#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED ++ + struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; +@@ -27,6 +31,7 @@ struct user_namespace { + kuid_t owner; + kgid_t group; + unsigned int proc_inum; ++ unsigned long flags; + + /* Register of per-UID persistent keyrings for this namespace */ + #ifdef CONFIG_PERSISTENT_KEYRINGS +@@ -63,6 +68,9 @@ extern struct seq_operations proc_projid_seq_operations; + extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *); + extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *); + extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *); ++extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *); ++extern int proc_setgroups_show(struct seq_file *m, void *v); ++extern bool userns_may_setgroups(const struct user_namespace *ns); + #else + + static inline struct user_namespace *get_user_ns(struct user_namespace *ns) +@@ -87,6 +95,10 @@ static inline void put_user_ns(struct user_namespace *ns) + { + } + ++static inline bool userns_may_setgroups(const struct user_namespace *ns) ++{ ++ return true; ++} + #endif + + #endif /* _LINUX_USER_H */ +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c +index 92062fd..598c1dc 100644 +--- a/kernel/auditfilter.c ++++ b/kernel/auditfilter.c +@@ -429,6 +429,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, + if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) { + f->type = AUDIT_LOGINUID_SET; + f->val = 0; ++ entry->rule.pflags |= AUDIT_LOGINUID_LEGACY; + } + + err = audit_field_valid(entry, f); +@@ -604,6 +605,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) + data->buflen += data->values[i] = + audit_pack_string(&bufp, krule->filterkey); + break; ++ case AUDIT_LOGINUID_SET: ++ if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) { ++ data->fields[i] = AUDIT_LOGINUID; ++ data->values[i] = AUDIT_UID_UNSET; ++ break; ++ } ++ /* fallthrough if set */ + default: + data->values[i] = f->val; + } +@@ -620,6 +628,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) + int i; + + if (a->flags != b->flags || ++ a->pflags != b->pflags || + a->listnr != b->listnr || + a->action != b->action || + a->field_count != b->field_count) +@@ -738,6 +747,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old) + new = &entry->rule; + new->vers_ops = old->vers_ops; + new->flags = old->flags; ++ new->pflags = old->pflags; + new->listnr = old->listnr; + new->action = old->action; + for (i = 0; i < AUDIT_BITMASK_SIZE; i++) +diff --git a/kernel/groups.c b/kernel/groups.c +index 90cf1c3..67b4ba3 100644 +--- a/kernel/groups.c ++++ b/kernel/groups.c +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include + + /* init to 2 - one for init_task, one to ensure it is never freed */ +@@ -223,6 +224,14 @@ out: + return i; + } + ++bool may_setgroups(void) ++{ ++ struct user_namespace *user_ns = current_user_ns(); ++ ++ return ns_capable(user_ns, CAP_SETGID) && ++ userns_may_setgroups(user_ns); ++} ++ + /* + * SMP: Our groups are copy-on-write. We can set them safely + * without another task interfering. +@@ -233,7 +242,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) + struct group_info *group_info; + int retval; + +- if (!ns_capable(current_user_ns(), CAP_SETGID)) ++ if (!may_setgroups()) + return -EPERM; + if ((unsigned)gidsetsize > NGROUPS_MAX) + return -EINVAL; +diff --git a/kernel/pid.c b/kernel/pid.c +index 9b9a266..82430c8 100644 +--- a/kernel/pid.c ++++ b/kernel/pid.c +@@ -341,6 +341,8 @@ out: + + out_unlock: + spin_unlock_irq(&pidmap_lock); ++ put_pid_ns(ns); ++ + out_free: + while (++i <= ns->level) + free_pidmap(pid->numbers + i); +diff --git a/kernel/uid16.c b/kernel/uid16.c +index 602e5bb..d58cc4d 100644 +--- a/kernel/uid16.c ++++ b/kernel/uid16.c +@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist) + struct group_info *group_info; + int retval; + +- if (!ns_capable(current_user_ns(), CAP_SETGID)) ++ if (!may_setgroups()) + return -EPERM; + if ((unsigned)gidsetsize > NGROUPS_MAX) + return -EINVAL; +diff --git a/kernel/user.c b/kernel/user.c +index c006131..c2bbb50 100644 +--- a/kernel/user.c ++++ b/kernel/user.c +@@ -51,6 +51,7 @@ struct user_namespace init_user_ns = { + .owner = GLOBAL_ROOT_UID, + .group = GLOBAL_ROOT_GID, + .proc_inum = PROC_USER_INIT_INO, ++ .flags = USERNS_INIT_FLAGS, + #ifdef CONFIG_PERSISTENT_KEYRINGS + .persistent_keyring_register_sem = + __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c +index 80a57af..153971e 100644 +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -24,6 +24,7 @@ + #include + + static struct kmem_cache *user_ns_cachep __read_mostly; ++static DEFINE_MUTEX(userns_state_mutex); + + static bool new_idmap_permitted(const struct file *file, + struct user_namespace *ns, int cap_setid, +@@ -99,6 +100,11 @@ int create_user_ns(struct cred *new) + ns->owner = owner; + ns->group = group; + ++ /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */ ++ mutex_lock(&userns_state_mutex); ++ ns->flags = parent_ns->flags; ++ mutex_unlock(&userns_state_mutex); ++ + set_cred_user_ns(new, ns); + + #ifdef CONFIG_PERSISTENT_KEYRINGS +@@ -581,9 +587,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent + return false; + } + +- +-static DEFINE_MUTEX(id_map_mutex); +- + static ssize_t map_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos, + int cap_setid, +@@ -600,7 +603,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, + ssize_t ret = -EINVAL; + + /* +- * The id_map_mutex serializes all writes to any given map. ++ * The userns_state_mutex serializes all writes to any given map. + * + * Any map is only ever written once. + * +@@ -618,7 +621,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, + * order and smp_rmb() is guaranteed that we don't have crazy + * architectures returning stale data. + */ +- mutex_lock(&id_map_mutex); ++ mutex_lock(&userns_state_mutex); + + ret = -EPERM; + /* Only allow one successful write to the map */ +@@ -745,7 +748,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, + *ppos = count; + ret = count; + out: +- mutex_unlock(&id_map_mutex); ++ mutex_unlock(&userns_state_mutex); + if (page) + free_page(page); + return ret; +@@ -804,17 +807,21 @@ static bool new_idmap_permitted(const struct file *file, + struct user_namespace *ns, int cap_setid, + struct uid_gid_map *new_map) + { +- /* Allow mapping to your own filesystem ids */ +- if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) { ++ const struct cred *cred = file->f_cred; ++ /* Don't allow mappings that would allow anything that wouldn't ++ * be allowed without the establishment of unprivileged mappings. ++ */ ++ if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) && ++ uid_eq(ns->owner, cred->euid)) { + u32 id = new_map->extent[0].lower_first; + if (cap_setid == CAP_SETUID) { + kuid_t uid = make_kuid(ns->parent, id); +- if (uid_eq(uid, file->f_cred->fsuid)) ++ if (uid_eq(uid, cred->euid)) + return true; +- } +- else if (cap_setid == CAP_SETGID) { ++ } else if (cap_setid == CAP_SETGID) { + kgid_t gid = make_kgid(ns->parent, id); +- if (gid_eq(gid, file->f_cred->fsgid)) ++ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) && ++ gid_eq(gid, cred->egid)) + return true; + } + } +@@ -834,6 +841,100 @@ static bool new_idmap_permitted(const struct file *file, + return false; + } + ++int proc_setgroups_show(struct seq_file *seq, void *v) ++{ ++ struct user_namespace *ns = seq->private; ++ unsigned long userns_flags = ACCESS_ONCE(ns->flags); ++ ++ seq_printf(seq, "%s\n", ++ (userns_flags & USERNS_SETGROUPS_ALLOWED) ? ++ "allow" : "deny"); ++ return 0; ++} ++ ++ssize_t proc_setgroups_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct seq_file *seq = file->private_data; ++ struct user_namespace *ns = seq->private; ++ char kbuf[8], *pos; ++ bool setgroups_allowed; ++ ssize_t ret; ++ ++ /* Only allow a very narrow range of strings to be written */ ++ ret = -EINVAL; ++ if ((*ppos != 0) || (count >= sizeof(kbuf))) ++ goto out; ++ ++ /* What was written? */ ++ ret = -EFAULT; ++ if (copy_from_user(kbuf, buf, count)) ++ goto out; ++ kbuf[count] = '\0'; ++ pos = kbuf; ++ ++ /* What is being requested? */ ++ ret = -EINVAL; ++ if (strncmp(pos, "allow", 5) == 0) { ++ pos += 5; ++ setgroups_allowed = true; ++ } ++ else if (strncmp(pos, "deny", 4) == 0) { ++ pos += 4; ++ setgroups_allowed = false; ++ } ++ else ++ goto out; ++ ++ /* Verify there is not trailing junk on the line */ ++ pos = skip_spaces(pos); ++ if (*pos != '\0') ++ goto out; ++ ++ ret = -EPERM; ++ mutex_lock(&userns_state_mutex); ++ if (setgroups_allowed) { ++ /* Enabling setgroups after setgroups has been disabled ++ * is not allowed. ++ */ ++ if (!(ns->flags & USERNS_SETGROUPS_ALLOWED)) ++ goto out_unlock; ++ } else { ++ /* Permanently disabling setgroups after setgroups has ++ * been enabled by writing the gid_map is not allowed. ++ */ ++ if (ns->gid_map.nr_extents != 0) ++ goto out_unlock; ++ ns->flags &= ~USERNS_SETGROUPS_ALLOWED; ++ } ++ mutex_unlock(&userns_state_mutex); ++ ++ /* Report a successful write */ ++ *ppos = count; ++ ret = count; ++out: ++ return ret; ++out_unlock: ++ mutex_unlock(&userns_state_mutex); ++ goto out; ++} ++ ++bool userns_may_setgroups(const struct user_namespace *ns) ++{ ++ bool allowed; ++ ++ mutex_lock(&userns_state_mutex); ++ /* It is not safe to use setgroups until a gid mapping in ++ * the user namespace has been established. ++ */ ++ allowed = ns->gid_map.nr_extents != 0; ++ /* Is setgroups allowed? */ ++ allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED); ++ mutex_unlock(&userns_state_mutex); ++ ++ return allowed; ++} ++ + static void *userns_get(struct task_struct *task) + { + struct user_namespace *user_ns; +diff --git a/net/mac80211/key.c b/net/mac80211/key.c +index 6ff65a1..d78b37a 100644 +--- a/net/mac80211/key.c ++++ b/net/mac80211/key.c +@@ -652,7 +652,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, + int i; + + mutex_lock(&local->key_mtx); +- for (i = 0; i < NUM_DEFAULT_KEYS; i++) { ++ for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) { + key = key_mtx_dereference(local, sta->gtk[i]); + if (!key) + continue; +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 095c160..1e4dc4e 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -1679,14 +1679,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + sc = le16_to_cpu(hdr->seq_ctrl); + frag = sc & IEEE80211_SCTL_FRAG; + +- if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) +- goto out; +- + if (is_multicast_ether_addr(hdr->addr1)) { + rx->local->dot11MulticastReceivedFrameCount++; +- goto out; ++ goto out_no_led; + } + ++ if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) ++ goto out; ++ + I802_DEBUG_INC(rx->local->rx_handlers_fragments); + + if (skb_linearize(rx->skb)) +@@ -1777,9 +1777,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + status->rx_flags |= IEEE80211_RX_FRAGMENTED; + + out: ++ ieee80211_led_rx(rx->local); ++ out_no_led: + if (rx->sta) + rx->sta->rx_packets++; +- ieee80211_led_rx(rx->local); + return RX_CONTINUE; + } + +diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c +index 9e1e005..c4c8df4 100644 +--- a/security/keys/encrypted-keys/encrypted.c ++++ b/security/keys/encrypted-keys/encrypted.c +@@ -1018,10 +1018,13 @@ static int __init init_encrypted(void) + ret = encrypted_shash_alloc(); + if (ret < 0) + return ret; ++ ret = aes_get_sizes(); ++ if (ret < 0) ++ goto out; + ret = register_key_type(&key_type_encrypted); + if (ret < 0) + goto out; +- return aes_get_sizes(); ++ return 0; + out: + encrypted_shash_release(); + return ret; +diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c +index 1b3ff2f..5177850 100644 +--- a/tools/testing/selftests/mount/unprivileged-remount-test.c ++++ b/tools/testing/selftests/mount/unprivileged-remount-test.c +@@ -6,6 +6,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -32,11 +34,14 @@ + # define CLONE_NEWPID 0x20000000 + #endif + ++#ifndef MS_REC ++# define MS_REC 16384 ++#endif + #ifndef MS_RELATIME +-#define MS_RELATIME (1 << 21) ++# define MS_RELATIME (1 << 21) + #endif + #ifndef MS_STRICTATIME +-#define MS_STRICTATIME (1 << 24) ++# define MS_STRICTATIME (1 << 24) + #endif + + static void die(char *fmt, ...) +@@ -48,17 +53,14 @@ static void die(char *fmt, ...) + exit(EXIT_FAILURE); + } + +-static void write_file(char *filename, char *fmt, ...) ++static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap) + { + char buf[4096]; + int fd; + ssize_t written; + int buf_len; +- va_list ap; + +- va_start(ap, fmt); + buf_len = vsnprintf(buf, sizeof(buf), fmt, ap); +- va_end(ap); + if (buf_len < 0) { + die("vsnprintf failed: %s\n", + strerror(errno)); +@@ -69,6 +71,8 @@ static void write_file(char *filename, char *fmt, ...) + + fd = open(filename, O_WRONLY); + if (fd < 0) { ++ if ((errno == ENOENT) && enoent_ok) ++ return; + die("open of %s failed: %s\n", + filename, strerror(errno)); + } +@@ -87,6 +91,65 @@ static void write_file(char *filename, char *fmt, ...) + } + } + ++static void maybe_write_file(char *filename, char *fmt, ...) ++{ ++ va_list ap; ++ ++ va_start(ap, fmt); ++ vmaybe_write_file(true, filename, fmt, ap); ++ va_end(ap); ++ ++} ++ ++static void write_file(char *filename, char *fmt, ...) ++{ ++ va_list ap; ++ ++ va_start(ap, fmt); ++ vmaybe_write_file(false, filename, fmt, ap); ++ va_end(ap); ++ ++} ++ ++static int read_mnt_flags(const char *path) ++{ ++ int ret; ++ struct statvfs stat; ++ int mnt_flags; ++ ++ ret = statvfs(path, &stat); ++ if (ret != 0) { ++ die("statvfs of %s failed: %s\n", ++ path, strerror(errno)); ++ } ++ if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \ ++ ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \ ++ ST_SYNCHRONOUS | ST_MANDLOCK)) { ++ die("Unrecognized mount flags\n"); ++ } ++ mnt_flags = 0; ++ if (stat.f_flag & ST_RDONLY) ++ mnt_flags |= MS_RDONLY; ++ if (stat.f_flag & ST_NOSUID) ++ mnt_flags |= MS_NOSUID; ++ if (stat.f_flag & ST_NODEV) ++ mnt_flags |= MS_NODEV; ++ if (stat.f_flag & ST_NOEXEC) ++ mnt_flags |= MS_NOEXEC; ++ if (stat.f_flag & ST_NOATIME) ++ mnt_flags |= MS_NOATIME; ++ if (stat.f_flag & ST_NODIRATIME) ++ mnt_flags |= MS_NODIRATIME; ++ if (stat.f_flag & ST_RELATIME) ++ mnt_flags |= MS_RELATIME; ++ if (stat.f_flag & ST_SYNCHRONOUS) ++ mnt_flags |= MS_SYNCHRONOUS; ++ if (stat.f_flag & ST_MANDLOCK) ++ mnt_flags |= ST_MANDLOCK; ++ ++ return mnt_flags; ++} ++ + static void create_and_enter_userns(void) + { + uid_t uid; +@@ -100,13 +163,10 @@ static void create_and_enter_userns(void) + strerror(errno)); + } + ++ maybe_write_file("/proc/self/setgroups", "deny"); + write_file("/proc/self/uid_map", "0 %d 1", uid); + write_file("/proc/self/gid_map", "0 %d 1", gid); + +- if (setgroups(0, NULL) != 0) { +- die("setgroups failed: %s\n", +- strerror(errno)); +- } + if (setgid(0) != 0) { + die ("setgid(0) failed %s\n", + strerror(errno)); +@@ -118,7 +178,8 @@ static void create_and_enter_userns(void) + } + + static +-bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags) ++bool test_unpriv_remount(const char *fstype, const char *mount_options, ++ int mount_flags, int remount_flags, int invalid_flags) + { + pid_t child; + +@@ -151,9 +212,11 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags) + strerror(errno)); + } + +- if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) { +- die("mount of /tmp failed: %s\n", +- strerror(errno)); ++ if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) { ++ die("mount of %s with options '%s' on /tmp failed: %s\n", ++ fstype, ++ mount_options? mount_options : "", ++ strerror(errno)); + } + + create_and_enter_userns(); +@@ -181,62 +244,127 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags) + + static bool test_unpriv_remount_simple(int mount_flags) + { +- return test_unpriv_remount(mount_flags, mount_flags, 0); ++ return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0); + } + + static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags) + { +- return test_unpriv_remount(mount_flags, mount_flags, invalid_flags); ++ return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, ++ invalid_flags); ++} ++ ++static bool test_priv_mount_unpriv_remount(void) ++{ ++ pid_t child; ++ int ret; ++ const char *orig_path = "/dev"; ++ const char *dest_path = "/tmp"; ++ int orig_mnt_flags, remount_mnt_flags; ++ ++ child = fork(); ++ if (child == -1) { ++ die("fork failed: %s\n", ++ strerror(errno)); ++ } ++ if (child != 0) { /* parent */ ++ pid_t pid; ++ int status; ++ pid = waitpid(child, &status, 0); ++ if (pid == -1) { ++ die("waitpid failed: %s\n", ++ strerror(errno)); ++ } ++ if (pid != child) { ++ die("waited for %d got %d\n", ++ child, pid); ++ } ++ if (!WIFEXITED(status)) { ++ die("child did not terminate cleanly\n"); ++ } ++ return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false; ++ } ++ ++ orig_mnt_flags = read_mnt_flags(orig_path); ++ ++ create_and_enter_userns(); ++ ret = unshare(CLONE_NEWNS); ++ if (ret != 0) { ++ die("unshare(CLONE_NEWNS) failed: %s\n", ++ strerror(errno)); ++ } ++ ++ ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL); ++ if (ret != 0) { ++ die("recursive bind mount of %s onto %s failed: %s\n", ++ orig_path, dest_path, strerror(errno)); ++ } ++ ++ ret = mount(dest_path, dest_path, "none", ++ MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL); ++ if (ret != 0) { ++ /* system("cat /proc/self/mounts"); */ ++ die("remount of /tmp failed: %s\n", ++ strerror(errno)); ++ } ++ ++ remount_mnt_flags = read_mnt_flags(dest_path); ++ if (orig_mnt_flags != remount_mnt_flags) { ++ die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n", ++ dest_path, orig_path); ++ } ++ exit(EXIT_SUCCESS); + } + + int main(int argc, char **argv) + { +- if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) { ++ if (!test_unpriv_remount_simple(MS_RDONLY)) { + die("MS_RDONLY malfunctions\n"); + } +- if (!test_unpriv_remount_simple(MS_NODEV)) { ++ if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) { + die("MS_NODEV malfunctions\n"); + } +- if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) { ++ if (!test_unpriv_remount_simple(MS_NOSUID)) { + die("MS_NOSUID malfunctions\n"); + } +- if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) { ++ if (!test_unpriv_remount_simple(MS_NOEXEC)) { + die("MS_NOEXEC malfunctions\n"); + } +- if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV, +- MS_NOATIME|MS_NODEV)) ++ if (!test_unpriv_remount_atime(MS_RELATIME, ++ MS_NOATIME)) + { + die("MS_RELATIME malfunctions\n"); + } +- if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV, +- MS_NOATIME|MS_NODEV)) ++ if (!test_unpriv_remount_atime(MS_STRICTATIME, ++ MS_NOATIME)) + { + die("MS_STRICTATIME malfunctions\n"); + } +- if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV, +- MS_STRICTATIME|MS_NODEV)) ++ if (!test_unpriv_remount_atime(MS_NOATIME, ++ MS_STRICTATIME)) + { +- die("MS_RELATIME malfunctions\n"); ++ die("MS_NOATIME malfunctions\n"); + } +- if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV, +- MS_NOATIME|MS_NODEV)) ++ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME, ++ MS_NOATIME)) + { +- die("MS_RELATIME malfunctions\n"); ++ die("MS_RELATIME|MS_NODIRATIME malfunctions\n"); + } +- if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV, +- MS_NOATIME|MS_NODEV)) ++ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME, ++ MS_NOATIME)) + { +- die("MS_RELATIME malfunctions\n"); ++ die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n"); + } +- if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV, +- MS_STRICTATIME|MS_NODEV)) ++ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME, ++ MS_STRICTATIME)) + { +- die("MS_RELATIME malfunctions\n"); ++ die("MS_NOATIME|MS_DIRATIME malfunctions\n"); + } +- if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV, +- MS_NOATIME|MS_NODEV)) ++ if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME)) + { + die("Default atime malfunctions\n"); + } ++ if (!test_priv_mount_unpriv_remount()) { ++ die("Mount flags unexpectedly changed after remount\n"); ++ } + return EXIT_SUCCESS; + } diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.28-29.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.28-29.patch new file mode 100644 index 0000000000..73bfc7ea19 --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.28-29.patch @@ -0,0 +1,2545 @@ +diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt +index 69b3cac..5d86756 100644 +--- a/Documentation/ramoops.txt ++++ b/Documentation/ramoops.txt +@@ -14,11 +14,19 @@ survive after a restart. + + 1. Ramoops concepts + +-Ramoops uses a predefined memory area to store the dump. The start and size of +-the memory area are set using two variables: ++Ramoops uses a predefined memory area to store the dump. The start and size ++and type of the memory area are set using three variables: + * "mem_address" for the start + * "mem_size" for the size. The memory size will be rounded down to a + power of two. ++ * "mem_type" to specifiy if the memory type (default is pgprot_writecombine). ++ ++Typically the default value of mem_type=0 should be used as that sets the pstore ++mapping to pgprot_writecombine. Setting mem_type=1 attempts to use ++pgprot_noncached, which only works on some platforms. This is because pstore ++depends on atomic operations. At least on ARM, pgprot_noncached causes the ++memory to be mapped strongly ordered, and atomic operations on strongly ordered ++memory are implementation defined, and won't work on many ARMs such as omaps. + + The memory area is divided into "record_size" chunks (also rounded down to + power of two) and each oops/panic writes a "record_size" chunk of +@@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different manners: + static struct ramoops_platform_data ramoops_data = { + .mem_size = <...>, + .mem_address = <...>, ++ .mem_type = <...>, + .record_size = <...>, + .dump_oops = <...>, + .ecc = <...>, +diff --git a/Makefile b/Makefile +index a2e572b..7aff64e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 28 ++SUBLEVEL = 29 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi +index 767f0e3..f60aeee 100644 +--- a/arch/arm/boot/dts/dra7.dtsi ++++ b/arch/arm/boot/dts/dra7.dtsi +@@ -458,7 +458,7 @@ + }; + + wdt2: wdt@4ae14000 { +- compatible = "ti,omap4-wdt"; ++ compatible = "ti,omap3-wdt"; + reg = <0x4ae14000 0x80>; + interrupts = ; + ti,hwmods = "wd_timer2"; +diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts +index 57e00f9..a25debb 100644 +--- a/arch/arm/boot/dts/s3c6410-mini6410.dts ++++ b/arch/arm/boot/dts/s3c6410-mini6410.dts +@@ -198,10 +198,6 @@ + status = "okay"; + }; + +-&pwm { +- status = "okay"; +-}; +- + &pinctrl0 { + gpio_leds: gpio-leds { + samsung,pins = "gpk-4", "gpk-5", "gpk-6", "gpk-7"; +diff --git a/arch/arm/boot/dts/s3c64xx.dtsi b/arch/arm/boot/dts/s3c64xx.dtsi +index 4e3be4d..4f1eff3 100644 +--- a/arch/arm/boot/dts/s3c64xx.dtsi ++++ b/arch/arm/boot/dts/s3c64xx.dtsi +@@ -168,7 +168,6 @@ + clocks = <&clocks PCLK_PWM>; + samsung,pwm-outputs = <0>, <1>; + #pwm-cells = <3>; +- status = "disabled"; + }; + + pinctrl0: pinctrl@7f008000 { +diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig +index ee69829..cf4823b 100644 +--- a/arch/arm/configs/multi_v7_defconfig ++++ b/arch/arm/configs/multi_v7_defconfig +@@ -235,6 +235,7 @@ CONFIG_SND_SOC_TEGRA_MAX98090=y + CONFIG_USB=y + CONFIG_USB_XHCI_HCD=y + CONFIG_USB_EHCI_HCD=y ++CONFIG_USB_EHCI_EXYNOS=y + CONFIG_USB_EHCI_TEGRA=y + CONFIG_USB_EHCI_HCD_PLATFORM=y + CONFIG_USB_ISP1760_HCD=y +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c +index 1e8b030..aab70f6 100644 +--- a/arch/arm/kernel/setup.c ++++ b/arch/arm/kernel/setup.c +@@ -1021,6 +1021,15 @@ static int c_show(struct seq_file *m, void *v) + seq_printf(m, "model name\t: %s rev %d (%s)\n", + cpu_name, cpuid & 15, elf_platform); + ++#if defined(CONFIG_SMP) ++ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", ++ per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), ++ (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); ++#else ++ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", ++ loops_per_jiffy / (500000/HZ), ++ (loops_per_jiffy / (5000/HZ)) % 100); ++#endif + /* dump out the processor features */ + seq_puts(m, "Features\t: "); + +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c +index b7b4c86..8cd3724 100644 +--- a/arch/arm/kernel/smp.c ++++ b/arch/arm/kernel/smp.c +@@ -388,8 +388,17 @@ asmlinkage void secondary_start_kernel(void) + + void __init smp_cpus_done(unsigned int max_cpus) + { +- printk(KERN_INFO "SMP: Total of %d processors activated.\n", +- num_online_cpus()); ++ int cpu; ++ unsigned long bogosum = 0; ++ ++ for_each_online_cpu(cpu) ++ bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; ++ ++ printk(KERN_INFO "SMP: Total of %d processors activated " ++ "(%lu.%02lu BogoMIPS).\n", ++ num_online_cpus(), ++ bogosum / (500000/HZ), ++ (bogosum / (5000/HZ)) % 100); + + hyp_mode_check(); + } +diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c +index 4e9d581..c295c10 100644 +--- a/arch/arm/mach-mvebu/coherency.c ++++ b/arch/arm/mach-mvebu/coherency.c +@@ -125,6 +125,29 @@ int __init coherency_init(void) + { + struct device_node *np; + ++ /* ++ * The coherency fabric is needed: ++ * - For coherency between processors on Armada XP, so only ++ * when SMP is enabled. ++ * - For coherency between the processor and I/O devices, but ++ * this coherency requires many pre-requisites (write ++ * allocate cache policy, shareable pages, SMP bit set) that ++ * are only meant in SMP situations. ++ * ++ * Note that this means that on Armada 370, there is currently ++ * no way to use hardware I/O coherency, because even when ++ * CONFIG_SMP is enabled, is_smp() returns false due to the ++ * Armada 370 being a single-core processor. To lift this ++ * limitation, we would have to find a way to make the cache ++ * policy set to write-allocate (on all Armada SoCs), and to ++ * set the shareable attribute in page tables (on all Armada ++ * SoCs except the Armada 370). Unfortunately, such decisions ++ * are taken very early in the kernel boot process, at a point ++ * where we don't know yet on which SoC we are running. ++ */ ++ if (!is_smp()) ++ return 0; ++ + np = of_find_matching_node(NULL, of_coherency_table); + if (np) { + struct resource res; +@@ -151,6 +174,9 @@ static int __init coherency_late_init(void) + { + struct device_node *np; + ++ if (!is_smp()) ++ return 0; ++ + np = of_find_matching_node(NULL, of_coherency_table); + if (np) { + bus_register_notifier(&platform_bus_type, +diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c +index eefb30c..2b9cff9 100644 +--- a/arch/arm/mach-omap2/pm44xx.c ++++ b/arch/arm/mach-omap2/pm44xx.c +@@ -148,26 +148,6 @@ static inline int omap4_init_static_deps(void) + struct clockdomain *ducati_clkdm, *l3_2_clkdm; + int ret = 0; + +- if (omap_rev() == OMAP4430_REV_ES1_0) { +- WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); +- return -ENODEV; +- } +- +- pr_err("Power Management for TI OMAP4.\n"); +- /* +- * OMAP4 chip PM currently works only with certain (newer) +- * versions of bootloaders. This is due to missing code in the +- * kernel to properly reset and initialize some devices. +- * http://www.spinics.net/lists/arm-kernel/msg218641.html +- */ +- pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n"); +- +- ret = pwrdm_for_each(pwrdms_setup, NULL); +- if (ret) { +- pr_err("Failed to setup powerdomains\n"); +- return ret; +- } +- + /* + * The dynamic dependency between MPUSS -> MEMIF and + * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as +@@ -231,6 +211,15 @@ int __init omap4_pm_init(void) + + pr_info("Power Management for TI OMAP4+ devices.\n"); + ++ /* ++ * OMAP4 chip PM currently works only with certain (newer) ++ * versions of bootloaders. This is due to missing code in the ++ * kernel to properly reset and initialize some devices. ++ * http://www.spinics.net/lists/arm-kernel/msg218641.html ++ */ ++ if (cpu_is_omap44xx()) ++ pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n"); ++ + ret = pwrdm_for_each(pwrdms_setup, NULL); + if (ret) { + pr_err("Failed to setup powerdomains.\n"); +diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h +index e9c149c..456d67c 100644 +--- a/arch/arm64/include/asm/suspend.h ++++ b/arch/arm64/include/asm/suspend.h +@@ -21,6 +21,7 @@ struct sleep_save_sp { + phys_addr_t save_ptr_stash_phys; + }; + ++extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); + extern void cpu_resume(void); + extern int cpu_suspend(unsigned long); + +diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S +index b192572..ede186c 100644 +--- a/arch/arm64/kernel/sleep.S ++++ b/arch/arm64/kernel/sleep.S +@@ -49,28 +49,39 @@ + orr \dst, \dst, \mask // dst|=(aff3>>rs3) + .endm + /* +- * Save CPU state for a suspend. This saves callee registers, and allocates +- * space on the kernel stack to save the CPU specific registers + some +- * other data for resume. ++ * Save CPU state for a suspend and execute the suspend finisher. ++ * On success it will return 0 through cpu_resume - ie through a CPU ++ * soft/hard reboot from the reset vector. ++ * On failure it returns the suspend finisher return value or force ++ * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher ++ * is not allowed to return, if it does this must be considered failure). ++ * It saves callee registers, and allocates space on the kernel stack ++ * to save the CPU specific registers + some other data for resume. + * + * x0 = suspend finisher argument ++ * x1 = suspend finisher function pointer + */ +-ENTRY(__cpu_suspend) ++ENTRY(__cpu_suspend_enter) + stp x29, lr, [sp, #-96]! + stp x19, x20, [sp,#16] + stp x21, x22, [sp,#32] + stp x23, x24, [sp,#48] + stp x25, x26, [sp,#64] + stp x27, x28, [sp,#80] ++ /* ++ * Stash suspend finisher and its argument in x20 and x19 ++ */ ++ mov x19, x0 ++ mov x20, x1 + mov x2, sp + sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx +- mov x1, sp ++ mov x0, sp + /* +- * x1 now points to struct cpu_suspend_ctx allocated on the stack ++ * x0 now points to struct cpu_suspend_ctx allocated on the stack + */ +- str x2, [x1, #CPU_CTX_SP] +- ldr x2, =sleep_save_sp +- ldr x2, [x2, #SLEEP_SAVE_SP_VIRT] ++ str x2, [x0, #CPU_CTX_SP] ++ ldr x1, =sleep_save_sp ++ ldr x1, [x1, #SLEEP_SAVE_SP_VIRT] + #ifdef CONFIG_SMP + mrs x7, mpidr_el1 + ldr x9, =mpidr_hash +@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend) + ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] + ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] + compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 +- add x2, x2, x8, lsl #3 ++ add x1, x1, x8, lsl #3 + #endif +- bl __cpu_suspend_finisher ++ bl __cpu_suspend_save ++ /* ++ * Grab suspend finisher in x20 and its argument in x19 ++ */ ++ mov x0, x19 ++ mov x1, x20 ++ /* ++ * We are ready for power down, fire off the suspend finisher ++ * in x1, with argument in x0 ++ */ ++ blr x1 + /* +- * Never gets here, unless suspend fails. ++ * Never gets here, unless suspend finisher fails. + * Successful cpu_suspend should return from cpu_resume, returning + * through this code path is considered an error + * If the return value is set to 0 force x0 = -EOPNOTSUPP +@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend) + ldp x27, x28, [sp, #80] + ldp x29, lr, [sp], #96 + ret +-ENDPROC(__cpu_suspend) ++ENDPROC(__cpu_suspend_enter) + .ltorg + + /* +@@ -126,14 +147,12 @@ cpu_resume_after_mmu: + ret + ENDPROC(cpu_resume_after_mmu) + +- .data + ENTRY(cpu_resume) + bl el2_setup // if in EL2 drop to EL1 cleanly + #ifdef CONFIG_SMP + mrs x1, mpidr_el1 +- adr x4, mpidr_hash_ptr +- ldr x5, [x4] +- add x8, x4, x5 // x8 = struct mpidr_hash phys address ++ adrp x8, mpidr_hash ++ add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address + /* retrieve mpidr_hash members to compute the hash */ + ldr x2, [x8, #MPIDR_HASH_MASK] + ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] +@@ -143,14 +162,15 @@ ENTRY(cpu_resume) + #else + mov x7, xzr + #endif +- adr x0, sleep_save_sp ++ adrp x0, sleep_save_sp ++ add x0, x0, #:lo12:sleep_save_sp + ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] + ldr x0, [x0, x7, lsl #3] + /* load sp from context */ + ldr x2, [x0, #CPU_CTX_SP] +- adr x1, sleep_idmap_phys ++ adrp x1, sleep_idmap_phys + /* load physical address of identity map page table in x1 */ +- ldr x1, [x1] ++ ldr x1, [x1, #:lo12:sleep_idmap_phys] + mov sp, x2 + /* + * cpu_do_resume expects x0 to contain context physical address +@@ -159,26 +179,3 @@ ENTRY(cpu_resume) + bl cpu_do_resume // PC relative jump, MMU off + b cpu_resume_mmu // Resume MMU, never returns + ENDPROC(cpu_resume) +- +- .align 3 +-mpidr_hash_ptr: +- /* +- * offset of mpidr_hash symbol from current location +- * used to obtain run-time mpidr_hash address with MMU off +- */ +- .quad mpidr_hash - . +-/* +- * physical address of identity mapped page tables +- */ +- .type sleep_idmap_phys, #object +-ENTRY(sleep_idmap_phys) +- .quad 0 +-/* +- * struct sleep_save_sp { +- * phys_addr_t *save_ptr_stash; +- * phys_addr_t save_ptr_stash_phys; +- * }; +- */ +- .type sleep_save_sp, #object +-ENTRY(sleep_save_sp) +- .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp +diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c +index 1fa9ce4..2d6b606 100644 +--- a/arch/arm64/kernel/suspend.c ++++ b/arch/arm64/kernel/suspend.c +@@ -5,26 +5,24 @@ + #include + #include + #include ++#include + #include + #include + #include + +-extern int __cpu_suspend(unsigned long); ++extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); + /* +- * This is called by __cpu_suspend() to save the state, and do whatever ++ * This is called by __cpu_suspend_enter() to save the state, and do whatever + * flushing is required to ensure that when the CPU goes to sleep we have + * the necessary data available when the caches are not searched. + * +- * @arg: Argument to pass to suspend operations +- * @ptr: CPU context virtual address +- * @save_ptr: address of the location where the context physical address +- * must be saved ++ * ptr: CPU context virtual address ++ * save_ptr: address of the location where the context physical address ++ * must be saved + */ +-int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, +- phys_addr_t *save_ptr) ++void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, ++ phys_addr_t *save_ptr) + { +- int cpu = smp_processor_id(); +- + *save_ptr = virt_to_phys(ptr); + + cpu_do_suspend(ptr); +@@ -35,8 +33,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, + */ + __flush_dcache_area(ptr, sizeof(*ptr)); + __flush_dcache_area(save_ptr, sizeof(*save_ptr)); +- +- return cpu_ops[cpu]->cpu_suspend(arg); + } + + /* +@@ -56,15 +52,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) + } + + /** +- * cpu_suspend ++ * cpu_suspend() - function to enter a low-power state ++ * @arg: argument to pass to CPU suspend operations + * +- * @arg: argument to pass to the finisher function ++ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU ++ * operations back-end error code otherwise. + */ + int cpu_suspend(unsigned long arg) + { +- struct mm_struct *mm = current->active_mm; +- int ret, cpu = smp_processor_id(); +- unsigned long flags; ++ int cpu = smp_processor_id(); + + /* + * If cpu_ops have not been registered or suspend +@@ -72,6 +68,21 @@ int cpu_suspend(unsigned long arg) + */ + if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) + return -EOPNOTSUPP; ++ return cpu_ops[cpu]->cpu_suspend(arg); ++} ++ ++/* ++ * __cpu_suspend ++ * ++ * arg: argument to pass to the finisher function ++ * fn: finisher function pointer ++ * ++ */ ++int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ++{ ++ struct mm_struct *mm = current->active_mm; ++ int ret; ++ unsigned long flags; + + /* + * From this point debug exceptions are disabled to prevent +@@ -86,16 +97,27 @@ int cpu_suspend(unsigned long arg) + * page tables, so that the thread address space is properly + * set-up on function return. + */ +- ret = __cpu_suspend(arg); ++ ret = __cpu_suspend_enter(arg, fn); + if (ret == 0) { +- cpu_switch_mm(mm->pgd, mm); ++ /* ++ * We are resuming from reset with TTBR0_EL1 set to the ++ * idmap to enable the MMU; restore the active_mm mappings in ++ * TTBR0_EL1 unless the active_mm == &init_mm, in which case ++ * the thread entered __cpu_suspend with TTBR0_EL1 set to ++ * reserved TTBR0 page tables and should be restored as such. ++ */ ++ if (mm == &init_mm) ++ cpu_set_reserved_ttbr0(); ++ else ++ cpu_switch_mm(mm->pgd, mm); ++ + flush_tlb_all(); + + /* + * Restore per-cpu offset before any kernel + * subsystem relying on it has a chance to run. + */ +- set_my_cpu_offset(per_cpu_offset(cpu)); ++ set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + + /* + * Restore HW breakpoint registers to sane values +@@ -116,10 +138,10 @@ int cpu_suspend(unsigned long arg) + return ret; + } + +-extern struct sleep_save_sp sleep_save_sp; +-extern phys_addr_t sleep_idmap_phys; ++struct sleep_save_sp sleep_save_sp; ++phys_addr_t sleep_idmap_phys; + +-static int cpu_suspend_init(void) ++static int __init cpu_suspend_init(void) + { + void *ctx_ptr; + +diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c +index 27c93f4..fc0927a 100644 +--- a/arch/powerpc/kernel/mce_power.c ++++ b/arch/powerpc/kernel/mce_power.c +@@ -78,7 +78,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) + } + if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) +- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); ++ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); + /* reset error bits */ + dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; + } +@@ -109,7 +109,7 @@ static long mce_handle_common_ierror(uint64_t srr1) + break; + case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { +- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); ++ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); + handled = 1; + } + break; +diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c +index 75702e2..f7089fc 100644 +--- a/arch/powerpc/kernel/udbg_16550.c ++++ b/arch/powerpc/kernel/udbg_16550.c +@@ -69,8 +69,12 @@ static void udbg_uart_putc(char c) + + static int udbg_uart_getc_poll(void) + { +- if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR)) ++ if (!udbg_uart_in) ++ return -1; ++ ++ if (!(udbg_uart_in(UART_LSR) & LSR_DR)) + return udbg_uart_in(UART_RBR); ++ + return -1; + } + +diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h +index 2a46ca7..2874be9 100644 +--- a/arch/x86/include/asm/vsyscall.h ++++ b/arch/x86/include/asm/vsyscall.h +@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void) + native_read_tscp(&p); + } else { + /* Load per CPU data from GDT */ +- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); ++ asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); + } + + return p; +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +index 047f540..2f98588 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +@@ -2886,6 +2886,17 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) + return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); + } + ++/* ++ * Using uncore_pmu_event_init pmu event_init callback ++ * as a detection point for uncore events. ++ */ ++static int uncore_pmu_event_init(struct perf_event *event); ++ ++static bool is_uncore_event(struct perf_event *event) ++{ ++ return event->pmu->event_init == uncore_pmu_event_init; ++} ++ + static int + uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) + { +@@ -2900,13 +2911,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b + return -EINVAL; + + n = box->n_events; +- box->event_list[n] = leader; +- n++; ++ ++ if (is_uncore_event(leader)) { ++ box->event_list[n] = leader; ++ n++; ++ } ++ + if (!dogrp) + return n; + + list_for_each_entry(event, &leader->sibling_list, group_entry) { +- if (event->state <= PERF_EVENT_STATE_OFF) ++ if (!is_uncore_event(event) || ++ event->state <= PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 49088b8..dcae8fa 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -4384,7 +4384,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) + * zap all shadow pages. + */ + if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { +- printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); ++ printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n"); + kvm_mmu_invalidate_zap_all_pages(kvm); + } + } +diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c +index 431e875..ab6ba35 100644 +--- a/arch/x86/vdso/vma.c ++++ b/arch/x86/vdso/vma.c +@@ -117,30 +117,45 @@ subsys_initcall(init_vdso); + + struct linux_binprm; + +-/* Put the vdso above the (randomized) stack with another randomized offset. +- This way there is no hole in the middle of address space. +- To save memory make sure it is still in the same PTE as the stack top. +- This doesn't give that many random bits */ ++/* ++ * Put the vdso above the (randomized) stack with another randomized ++ * offset. This way there is no hole in the middle of address space. ++ * To save memory make sure it is still in the same PTE as the stack ++ * top. This doesn't give that many random bits. ++ * ++ * Note that this algorithm is imperfect: the distribution of the vdso ++ * start address within a PMD is biased toward the end. ++ * ++ * Only used for the 64-bit and x32 vdsos. ++ */ + static unsigned long vdso_addr(unsigned long start, unsigned len) + { + unsigned long addr, end; + unsigned offset; +- end = (start + PMD_SIZE - 1) & PMD_MASK; ++ ++ /* ++ * Round up the start address. It can start out unaligned as a result ++ * of stack start randomization. ++ */ ++ start = PAGE_ALIGN(start); ++ ++ /* Round the lowest possible end address up to a PMD boundary. */ ++ end = (start + len + PMD_SIZE - 1) & PMD_MASK; + if (end >= TASK_SIZE_MAX) + end = TASK_SIZE_MAX; + end -= len; +- /* This loses some more bits than a modulo, but is cheaper */ +- offset = get_random_int() & (PTRS_PER_PTE - 1); +- addr = start + (offset << PAGE_SHIFT); +- if (addr >= end) +- addr = end; ++ ++ if (end > start) { ++ offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); ++ addr = start + (offset << PAGE_SHIFT); ++ } else { ++ addr = start; ++ } + + /* +- * page-align it here so that get_unmapped_area doesn't +- * align it wrongfully again to the next page. addr can come in 4K +- * unaligned here as a result of stack start randomization. ++ * Forcibly align the final address in case we have a hardware ++ * issue that requires alignment for performance reasons. + */ +- addr = PAGE_ALIGN(addr); + addr = align_vdso_addr(addr); + + return addr; +diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c +index f872127..78d3835 100644 +--- a/block/blk-mq-cpumap.c ++++ b/block/blk-mq-cpumap.c +@@ -95,7 +95,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg) + unsigned int *map; + + /* If cpus are offline, map them to first hctx */ +- map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL, ++ map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, + reg->numa_node); + if (!map) + return NULL; +diff --git a/block/genhd.c b/block/genhd.c +index e6723bd..a8d586a 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno) + struct disk_part_tbl *old_ptbl = disk->part_tbl; + struct disk_part_tbl *new_ptbl; + int len = old_ptbl ? old_ptbl->len : 0; +- int target = partno + 1; ++ int i, target; + size_t size; +- int i; ++ ++ /* ++ * check for int overflow, since we can get here from blkpg_ioctl() ++ * with a user passed 'partno'. ++ */ ++ target = partno + 1; ++ if (target < 0) ++ return -EINVAL; + + /* disk_max_parts() is zero during initialization, ignore if so */ + if (disk_max_parts(disk) && target > disk_max_parts(disk)) +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c +index c14a00d..19f6505 100644 +--- a/drivers/acpi/device_pm.c ++++ b/drivers/acpi/device_pm.c +@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device) + + device->power.state = ACPI_STATE_UNKNOWN; + if (!acpi_device_is_present(device)) +- return 0; ++ return -ENXIO; + + result = acpi_device_get_power(device, &state); + if (result) +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 493a342..666beea 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -865,7 +865,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device) + if (device->wakeup.flags.valid) + acpi_power_resources_list_free(&device->wakeup.resources); + +- if (!device->flags.power_manageable) ++ if (!device->power.flags.power_resources) + return; + + for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { +@@ -1554,10 +1554,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) + device->power.flags.power_resources) + device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; + +- if (acpi_bus_init_power(device)) { +- acpi_free_power_resources_lists(device); ++ if (acpi_bus_init_power(device)) + device->flags.power_manageable = 0; +- } + } + + static void acpi_bus_get_flags(struct acpi_device *device) +@@ -2043,13 +2041,18 @@ static void acpi_bus_attach(struct acpi_device *device) + /* Skip devices that are not present. */ + if (!acpi_device_is_present(device)) { + device->flags.visited = false; ++ device->flags.power_manageable = 0; + return; + } + if (device->handler) + goto ok; + + if (!device->flags.initialized) { +- acpi_bus_update_power(device, NULL); ++ device->flags.power_manageable = ++ device->power.states[ACPI_STATE_D0].flags.valid; ++ if (acpi_bus_init_power(device)) ++ device->flags.power_manageable = 0; ++ + device->flags.initialized = true; + } + device->flags.visited = false; +diff --git a/drivers/base/bus.c b/drivers/base/bus.c +index 59dc808..45d0fa7 100644 +--- a/drivers/base/bus.c ++++ b/drivers/base/bus.c +@@ -254,13 +254,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus, + const char *buf, size_t count) + { + struct device *dev; ++ int err = -EINVAL; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (!dev) + return -ENODEV; +- if (bus_rescan_devices_helper(dev, NULL) != 0) +- return -EINVAL; +- return count; ++ if (bus_rescan_devices_helper(dev, NULL) == 0) ++ err = count; ++ put_device(dev); ++ return err; + } + + static struct device *next_device(struct klist_iter *i) +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 7cd42ea..d92c7d9 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1743,6 +1743,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 91bc66b..4850da3 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -511,6 +511,7 @@ + #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 + #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 + #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 ++#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a + #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 + + #define USB_VENDOR_ID_LABTEC 0x1020 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index a713e62..4b87bb1 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = { + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO), ++ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, + {} +diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c +index b92bf01..158fcf5 100644 +--- a/drivers/hid/hid-kye.c ++++ b/drivers/hid/hid-kye.c +@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, + } + break; + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: ++ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: + if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) { + rdesc = mousepen_i608x_rdesc_fixed; + *rsize = sizeof(mousepen_i608x_rdesc_fixed); +@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id) + switch (id->product) { + case USB_DEVICE_ID_KYE_EASYPEN_I405X: + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: ++ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: + case USB_DEVICE_ID_KYE_EASYPEN_M610X: + ret = kye_tablet_enable(hdev); + if (ret) { +@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, + USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, ++ USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, + USB_DEVICE_ID_KYE_EASYPEN_M610X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, + USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, +diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c +index 1a07e07..47d7e74 100644 +--- a/drivers/hid/hid-roccat-pyra.c ++++ b/drivers/hid/hid-roccat-pyra.c +@@ -35,6 +35,8 @@ static struct class *pyra_class; + static void profile_activated(struct pyra_device *pyra, + unsigned int new_profile) + { ++ if (new_profile >= ARRAY_SIZE(pyra->profile_settings)) ++ return; + pyra->actual_profile = new_profile; + pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; + } +@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp, + if (off != 0 || count != PYRA_SIZE_SETTINGS) + return -EINVAL; + +- mutex_lock(&pyra->pyra_lock); +- + settings = (struct pyra_settings const *)buf; ++ if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings)) ++ return -EINVAL; ++ ++ mutex_lock(&pyra->pyra_lock); + + retval = pyra_set_settings(usb_dev, settings); + if (retval) { +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index 42eebd1..6e5d8fe 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -136,6 +136,7 @@ struct i2c_hid { + * descriptor. */ + unsigned int bufsize; /* i2c buffer size */ + char *inbuf; /* Input buffer */ ++ char *rawbuf; /* Raw Input buffer */ + char *cmdbuf; /* Command buffer */ + char *argsbuf; /* Command arguments buffer */ + +@@ -355,7 +356,7 @@ static int i2c_hid_hwreset(struct i2c_client *client) + static void i2c_hid_get_input(struct i2c_hid *ihid) + { + int ret, ret_size; +- int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); ++ int size = ihid->bufsize; + + ret = i2c_master_recv(ihid->client, ihid->inbuf, size); + if (ret != size) { +@@ -482,9 +483,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type, + static void i2c_hid_free_buffers(struct i2c_hid *ihid) + { + kfree(ihid->inbuf); ++ kfree(ihid->rawbuf); + kfree(ihid->argsbuf); + kfree(ihid->cmdbuf); + ihid->inbuf = NULL; ++ ihid->rawbuf = NULL; + ihid->cmdbuf = NULL; + ihid->argsbuf = NULL; + ihid->bufsize = 0; +@@ -500,10 +503,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size) + report_size; /* report */ + + ihid->inbuf = kzalloc(report_size, GFP_KERNEL); ++ ihid->rawbuf = kzalloc(report_size, GFP_KERNEL); + ihid->argsbuf = kzalloc(args_len, GFP_KERNEL); + ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL); + +- if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) { ++ if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) { + i2c_hid_free_buffers(ihid); + return -ENOMEM; + } +@@ -530,12 +534,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, + + ret = i2c_hid_get_report(client, + report_type == HID_FEATURE_REPORT ? 0x03 : 0x01, +- report_number, ihid->inbuf, ask_count); ++ report_number, ihid->rawbuf, ask_count); + + if (ret < 0) + return ret; + +- ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8); ++ ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8); + + if (ret_count <= 2) + return 0; +@@ -544,7 +548,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, + + /* The query buffer contains the size, dropping it in the reply */ + count = min(count, ret_count - 2); +- memcpy(buf, ihid->inbuf + 2, count); ++ memcpy(buf, ihid->rawbuf + 2, count); + + return count; + } +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index deb3643..473c0c4 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -116,6 +116,7 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, ++ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS }, +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c +index fa92046..505fe29 100644 +--- a/drivers/hv/channel_mgmt.c ++++ b/drivers/hv/channel_mgmt.c +@@ -202,9 +202,16 @@ static void vmbus_process_rescind_offer(struct work_struct *work) + unsigned long flags; + struct vmbus_channel *primary_channel; + struct vmbus_channel_relid_released msg; ++ struct device *dev; ++ ++ if (channel->device_obj) { ++ dev = get_device(&channel->device_obj->device); ++ if (dev) { ++ vmbus_device_unregister(channel->device_obj); ++ put_device(dev); ++ } ++ } + +- if (channel->device_obj) +- vmbus_device_unregister(channel->device_obj); + memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); + msg.child_relid = channel->offermsg.child_relid; + msg.header.msgtype = CHANNELMSG_RELID_RELEASED; +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 40f6b47..8855ecb 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -1768,7 +1768,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + struct dma_pte *first_pte = NULL, *pte = NULL; + phys_addr_t uninitialized_var(pteval); + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; +- unsigned long sg_res; ++ unsigned long sg_res = 0; + unsigned int largepage_lvl = 0; + unsigned long lvl_pages = 0; + +@@ -1779,10 +1779,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + + prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; + +- if (sg) +- sg_res = 0; +- else { +- sg_res = nr_pages + 1; ++ if (!sg) { ++ sg_res = nr_pages; + pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; + } + +diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c +index 6b1a6ef..0c3a647 100644 +--- a/drivers/misc/genwqe/card_utils.c ++++ b/drivers/misc/genwqe/card_utils.c +@@ -490,6 +490,8 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, + m->nr_pages, + 1, /* write by caller */ + m->page_list); /* ptrs to pages */ ++ if (rc < 0) ++ goto fail_get_user_pages; + + /* assumption: get_user_pages can be killed by signals. */ + if (rc < m->nr_pages) { +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 9ddef47..7e01763 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -1343,6 +1343,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) + + sdhci_runtime_pm_get(host); + ++ present = mmc_gpio_get_cd(host->mmc); ++ + spin_lock_irqsave(&host->lock, flags); + + WARN_ON(host->mrq != NULL); +@@ -1371,7 +1373,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) + * zero: cd-gpio is used, and card is removed + * one: cd-gpio is used, and card is present + */ +- present = mmc_gpio_get_cd(host->mmc); + if (present < 0) { + /* If polling, assume that the card is always present. */ + if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) +@@ -2082,15 +2083,18 @@ static void sdhci_card_event(struct mmc_host *mmc) + { + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; ++ int present; + + /* First check if client has provided their own card event */ + if (host->ops->card_event) + host->ops->card_event(host); + ++ present = sdhci_do_get_cd(host); ++ + spin_lock_irqsave(&host->lock, flags); + + /* Check host->mrq first in case we are runtime suspended */ +- if (host->mrq && !sdhci_do_get_cd(host)) { ++ if (host->mrq && !present) { + pr_err("%s: Card removed during transfer!\n", + mmc_hostname(host->mmc)); + pr_err("%s: Resetting controller.\n", +diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c +index eeab969..b55bc52 100644 +--- a/drivers/mtd/tests/torturetest.c ++++ b/drivers/mtd/tests/torturetest.c +@@ -264,7 +264,9 @@ static int __init tort_init(void) + int i; + void *patt; + +- mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); ++ err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); ++ if (err) ++ goto out; + + /* Check if the eraseblocks contain only 0xFF bytes */ + if (check) { +diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c +index ec2c2dc..2a1b6e0 100644 +--- a/drivers/mtd/ubi/upd.c ++++ b/drivers/mtd/ubi/upd.c +@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, + ubi_assert(!vol->updating && !vol->changing_leb); + vol->updating = 1; + ++ vol->upd_buf = vmalloc(ubi->leb_size); ++ if (!vol->upd_buf) ++ return -ENOMEM; ++ + err = set_update_marker(ubi, vol); + if (err) + return err; +@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, + err = clear_update_marker(ubi, vol, 0); + if (err) + return err; ++ ++ vfree(vol->upd_buf); + vol->updating = 0; + return 0; + } + +- vol->upd_buf = vmalloc(ubi->leb_size); +- if (!vol->upd_buf) +- return -ENOMEM; +- + vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1, + vol->usable_leb_size); + vol->upd_bytes = bytes; +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c +index 02317c1..68b924e 100644 +--- a/drivers/mtd/ubi/wl.c ++++ b/drivers/mtd/ubi/wl.c +@@ -1205,7 +1205,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, + + err = do_sync_erase(ubi, e1, vol_id, lnum, 0); + if (err) { +- kmem_cache_free(ubi_wl_entry_slab, e1); + if (e2) + kmem_cache_free(ubi_wl_entry_slab, e2); + goto out_ro; +@@ -1219,10 +1218,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, + dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", + e2->pnum, vol_id, lnum); + err = do_sync_erase(ubi, e2, vol_id, lnum, 0); +- if (err) { +- kmem_cache_free(ubi_wl_entry_slab, e2); ++ if (err) + goto out_ro; +- } + } + + dbg_wl("done"); +@@ -1258,10 +1255,9 @@ out_not_moved: + + ubi_free_vid_hdr(ubi, vid_hdr); + err = do_sync_erase(ubi, e2, vol_id, lnum, torture); +- if (err) { +- kmem_cache_free(ubi_wl_entry_slab, e2); ++ if (err) + goto out_ro; +- } ++ + mutex_unlock(&ubi->move_mutex); + return 0; + +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +index 0b7a4c3..03e7f0c 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +@@ -734,7 +734,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, + dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); + if (!dev->cmd_buf) { + err = -ENOMEM; +- goto lbl_set_intf_data; ++ goto lbl_free_candev; + } + + dev->udev = usb_dev; +@@ -773,7 +773,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, + err = register_candev(netdev); + if (err) { + dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); +- goto lbl_free_cmd_buf; ++ goto lbl_restore_intf_data; + } + + if (dev->prev_siblings) +@@ -786,14 +786,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, + if (dev->adapter->dev_init) { + err = dev->adapter->dev_init(dev); + if (err) +- goto lbl_free_cmd_buf; ++ goto lbl_unregister_candev; + } + + /* set bus off */ + if (dev->adapter->dev_set_bus) { + err = dev->adapter->dev_set_bus(dev, 0); + if (err) +- goto lbl_free_cmd_buf; ++ goto lbl_unregister_candev; + } + + /* get device number early */ +@@ -805,11 +805,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, + + return 0; + +-lbl_free_cmd_buf: +- kfree(dev->cmd_buf); ++lbl_unregister_candev: ++ unregister_candev(netdev); + +-lbl_set_intf_data: ++lbl_restore_intf_data: + usb_set_intfdata(intf, dev->prev_siblings); ++ kfree(dev->cmd_buf); ++ ++lbl_free_candev: + free_candev(netdev); + + return err; +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +index 263dd92..f7f796a 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +@@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, + if (!(dev->state & PCAN_USB_STATE_CONNECTED)) + return 0; + +- memset(req_addr, '\0', req_size); +- + req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; + + switch (req_id) { +@@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, + default: + p = usb_rcvctrlpipe(dev->udev, 0); + req_type |= USB_DIR_IN; ++ memset(req_addr, '\0', req_size); + break; + } + +diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c +index 0583c69..ddaad71 100644 +--- a/drivers/net/wireless/ath/ath5k/qcu.c ++++ b/drivers/net/wireless/ath/ath5k/qcu.c +@@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, + } else { + switch (queue_type) { + case AR5K_TX_QUEUE_DATA: +- for (queue = AR5K_TX_QUEUE_ID_DATA_MIN; +- ah->ah_txq[queue].tqi_type != +- AR5K_TX_QUEUE_INACTIVE; queue++) { +- +- if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) +- return -EINVAL; +- } ++ queue = queue_info->tqi_subtype; + break; + case AR5K_TX_QUEUE_UAPSD: + queue = AR5K_TX_QUEUE_ID_UAPSD; +diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h +index 0acd4b5..32ae0a4 100644 +--- a/drivers/net/wireless/ath/ath9k/hw.h ++++ b/drivers/net/wireless/ath/ath9k/hw.h +@@ -216,8 +216,8 @@ + #define AH_WOW_BEACON_MISS BIT(3) + + enum ath_hw_txq_subtype { +- ATH_TXQ_AC_BE = 0, +- ATH_TXQ_AC_BK = 1, ++ ATH_TXQ_AC_BK = 0, ++ ATH_TXQ_AC_BE = 1, + ATH_TXQ_AC_VI = 2, + ATH_TXQ_AC_VO = 3, + }; +diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c +index 5f72758..8f93ed3 100644 +--- a/drivers/net/wireless/ath/ath9k/mac.c ++++ b/drivers/net/wireless/ath/ath9k/mac.c +@@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, + q = ATH9K_NUM_TX_QUEUES - 3; + break; + case ATH9K_TX_QUEUE_DATA: +- for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++) +- if (ah->txq[q].tqi_type == +- ATH9K_TX_QUEUE_INACTIVE) +- break; +- if (q == ATH9K_NUM_TX_QUEUES) { +- ath_err(common, "No available TX queue\n"); +- return -1; +- } ++ q = qinfo->tqi_subtype; + break; + default: + ath_err(common, "Invalid TX queue type: %u\n", type); +diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h +index d8948aa..60dc387 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h ++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h +@@ -1394,7 +1394,7 @@ enum iwl_sf_scenario { + #define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ + + /* smart FIFO default values */ +-#define SF_W_MARK_SISO 4096 ++#define SF_W_MARK_SISO 6144 + #define SF_W_MARK_MIMO2 8192 + #define SF_W_MARK_MIMO3 6144 + #define SF_W_MARK_LEGACY 4096 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 34dff3a..5b428db 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -214,14 +214,17 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, + res->flags |= IORESOURCE_SIZEALIGN; + if (res->flags & IORESOURCE_IO) { + l &= PCI_BASE_ADDRESS_IO_MASK; ++ sz &= PCI_BASE_ADDRESS_IO_MASK; + mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; + } else { + l &= PCI_BASE_ADDRESS_MEM_MASK; ++ sz &= PCI_BASE_ADDRESS_MEM_MASK; + mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; + } + } else { + res->flags |= (l & IORESOURCE_ROM_ENABLE); + l &= PCI_ROM_ADDRESS_MASK; ++ sz &= PCI_ROM_ADDRESS_MASK; + mask = (u32)PCI_ROM_ADDRESS_MASK; + } + +diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c +index 7854a65..110eab8 100644 +--- a/drivers/rtc/rtc-isl12057.c ++++ b/drivers/rtc/rtc-isl12057.c +@@ -89,7 +89,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs) + tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]); + + if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */ +- tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x0f); ++ tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f); + if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM) + tm->tm_hour += 12; + } else { /* 24 hour mode */ +@@ -98,7 +98,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs) + + tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]); + tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */ +- tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO]) - 1; /* starts at 1 */ ++ tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */ + tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100; + } + +diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c +index 3eb3642..d2b1ab3 100644 +--- a/drivers/rtc/rtc-sirfsoc.c ++++ b/drivers/rtc/rtc-sirfsoc.c +@@ -290,14 +290,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev) + rtc_div = ((32768 / RTC_HZ) / 2) - 1; + sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV); + +- rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, +- &sirfsoc_rtc_ops, THIS_MODULE); +- if (IS_ERR(rtcdrv->rtc)) { +- err = PTR_ERR(rtcdrv->rtc); +- dev_err(&pdev->dev, "can't register RTC device\n"); +- return err; +- } +- + /* 0x3 -> RTC_CLK */ + sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK, + rtcdrv->rtc_base + RTC_CLOCK_SWITCH); +@@ -312,6 +304,14 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev) + rtcdrv->overflow_rtc = + sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE); + ++ rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, ++ &sirfsoc_rtc_ops, THIS_MODULE); ++ if (IS_ERR(rtcdrv->rtc)) { ++ err = PTR_ERR(rtcdrv->rtc); ++ dev_err(&pdev->dev, "can't register RTC device\n"); ++ return err; ++ } ++ + rtcdrv->irq = platform_get_irq(pdev, 0); + err = devm_request_irq( + &pdev->dev, +diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c +index 119f7af..4dcb292 100644 +--- a/drivers/spi/spi-fsl-spi.c ++++ b/drivers/spi/spi-fsl-spi.c +@@ -362,18 +362,28 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, + static void fsl_spi_do_one_msg(struct spi_message *m) + { + struct spi_device *spi = m->spi; +- struct spi_transfer *t; ++ struct spi_transfer *t, *first; + unsigned int cs_change; + const int nsecs = 50; + int status; + +- cs_change = 1; +- status = 0; ++ /* Don't allow changes if CS is active */ ++ first = list_first_entry(&m->transfers, struct spi_transfer, ++ transfer_list); + list_for_each_entry(t, &m->transfers, transfer_list) { +- if (t->bits_per_word || t->speed_hz) { +- /* Don't allow changes if CS is active */ ++ if ((first->bits_per_word != t->bits_per_word) || ++ (first->speed_hz != t->speed_hz)) { + status = -EINVAL; ++ dev_err(&spi->dev, ++ "bits_per_word/speed_hz should be same for the same SPI transfer\n"); ++ return; ++ } ++ } + ++ cs_change = 1; ++ status = -EINVAL; ++ list_for_each_entry(t, &m->transfers, transfer_list) { ++ if (t->bits_per_word || t->speed_hz) { + if (cs_change) + status = fsl_spi_setup_transfer(spi, t); + if (status < 0) +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index 28ac3f3..d46b4cc 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -321,7 +321,8 @@ static void n_tty_check_unthrottle(struct tty_struct *tty) + + static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) + { +- *read_buf_addr(ldata, ldata->read_head++) = c; ++ *read_buf_addr(ldata, ldata->read_head) = c; ++ ldata->read_head++; + } + + /** +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c +index 9cd706d..7d3a3f5 100644 +--- a/drivers/tty/serial/samsung.c ++++ b/drivers/tty/serial/samsung.c +@@ -544,11 +544,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level, + unsigned int old) + { + struct s3c24xx_uart_port *ourport = to_ourport(port); ++ int timeout = 10000; + + ourport->pm_level = level; + + switch (level) { + case 3: ++ while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) ++ udelay(100); ++ + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); + +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 331f06a..d7049c3 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1169,10 +1169,11 @@ next_desc: + } else { + control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); + data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0)); +- if (!control_interface || !data_interface) { +- dev_dbg(&intf->dev, "no interfaces\n"); +- return -ENODEV; +- } ++ } ++ ++ if (!control_interface || !data_interface) { ++ dev_dbg(&intf->dev, "no interfaces\n"); ++ return -ENODEV; + } + + if (data_interface_num != call_interface_num) +@@ -1448,6 +1449,7 @@ alloc_fail8: + &dev_attr_wCountryCodes); + device_remove_file(&acm->control->dev, + &dev_attr_iCountryCodeRelDate); ++ kfree(acm->country_codes); + } + device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); + alloc_fail7: +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c +index ebd8f21..9df5d6e 100644 +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) + dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; + phys_addr_t paddr = dma; + +- BUG_ON(paddr != dma); /* truncation has occurred, should never happen */ +- + paddr |= baddr & ~PAGE_MASK; + + return paddr; +@@ -447,11 +445,11 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, + + BUG_ON(dir == DMA_NONE); + +- xen_dma_unmap_page(hwdev, paddr, size, dir, attrs); ++ xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); + + /* NOTE: We use dev_addr here, not paddr! */ + if (is_xen_swiotlb_buffer(dev_addr)) { +- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); ++ swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir); + return; + } + +@@ -495,14 +493,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + BUG_ON(dir == DMA_NONE); + + if (target == SYNC_FOR_CPU) +- xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); ++ xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); + + /* NOTE: We use dev_addr here, not paddr! */ + if (is_xen_swiotlb_buffer(dev_addr)) + swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); + + if (target == SYNC_FOR_DEVICE) +- xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); ++ xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); + + if (dir != DMA_FROM_DEVICE) + return; +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 451b00c..12e3556 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -1854,6 +1854,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode) + { + struct btrfs_delayed_node *delayed_node; + ++ /* ++ * we don't do delayed inode updates during log recovery because it ++ * leads to enospc problems. This means we also can't do ++ * delayed inode refs ++ */ ++ if (BTRFS_I(inode)->root->fs_info->log_root_recovering) ++ return -EAGAIN; ++ + delayed_node = btrfs_get_or_create_delayed_node(inode); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index b53278c..94a85ee 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -676,7 +676,7 @@ static int ceph_writepages_start(struct address_space *mapping, + int rc = 0; + unsigned wsize = 1 << inode->i_blkbits; + struct ceph_osd_request *req = NULL; +- int do_sync; ++ int do_sync = 0; + u64 truncate_size, snap_size; + u32 truncate_seq; + +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c +index f4f050a..339c412 100644 +--- a/fs/ceph/mds_client.c ++++ b/fs/ceph/mds_client.c +@@ -1461,15 +1461,18 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc, + + dout("discard_cap_releases mds%d\n", session->s_mds); + +- /* zero out the in-progress message */ +- msg = list_first_entry(&session->s_cap_releases, +- struct ceph_msg, list_head); +- head = msg->front.iov_base; +- num = le32_to_cpu(head->num); +- dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num); +- head->num = cpu_to_le32(0); +- msg->front.iov_len = sizeof(*head); +- session->s_num_cap_releases += num; ++ if (!list_empty(&session->s_cap_releases)) { ++ /* zero out the in-progress message */ ++ msg = list_first_entry(&session->s_cap_releases, ++ struct ceph_msg, list_head); ++ head = msg->front.iov_base; ++ num = le32_to_cpu(head->num); ++ dout("discard_cap_releases mds%d %p %u\n", ++ session->s_mds, msg, num); ++ head->num = cpu_to_le32(0); ++ msg->front.iov_len = sizeof(*head); ++ session->s_num_cap_releases += num; ++ } + + /* requeue completed messages */ + while (!list_empty(&session->s_cap_releases_done)) { +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index a163159..23a51f0 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -476,12 +476,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) + * write_inode() + */ + spin_lock(&inode->i_lock); +- /* Clear I_DIRTY_PAGES if we've written out all dirty pages */ +- if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) +- inode->i_state &= ~I_DIRTY_PAGES; ++ + dirty = inode->i_state & I_DIRTY; +- inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); ++ inode->i_state &= ~I_DIRTY; ++ ++ /* ++ * Paired with smp_mb() in __mark_inode_dirty(). This allows ++ * __mark_inode_dirty() to test i_state without grabbing i_lock - ++ * either they see the I_DIRTY bits cleared or we see the dirtied ++ * inode. ++ * ++ * I_DIRTY_PAGES is always cleared together above even if @mapping ++ * still has dirty pages. The flag is reinstated after smp_mb() if ++ * necessary. This guarantees that either __mark_inode_dirty() ++ * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. ++ */ ++ smp_mb(); ++ ++ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) ++ inode->i_state |= I_DIRTY_PAGES; ++ + spin_unlock(&inode->i_lock); ++ + /* Don't write the inode if only I_DIRTY_PAGES was set */ + if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { + int err = write_inode(inode, wbc); +@@ -1145,12 +1161,11 @@ void __mark_inode_dirty(struct inode *inode, int flags) + } + + /* +- * make sure that changes are seen by all cpus before we test i_state +- * -- mikulas ++ * Paired with smp_mb() in __writeback_single_inode() for the ++ * following lockless i_state test. See there for details. + */ + smp_mb(); + +- /* avoid the locking if we can */ + if ((inode->i_state & flags) == flags) + return; + +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index 34d2a1f..daa53da 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -1209,15 +1209,14 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source) + return 0; + } + +-static long long ++static int + compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) + { +- long long res; +- +- res = o1->len - o2->len; +- if (res) +- return res; +- return (long long)memcmp(o1->data, o2->data, o1->len); ++ if (o1->len < o2->len) ++ return -1; ++ if (o1->len > o2->len) ++ return 1; ++ return memcmp(o1->data, o2->data, o1->len); + } + + static int same_name(const char *n1, const char *n2) +@@ -1401,7 +1400,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) + static struct nfs4_client * + find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) + { +- long long cmp; ++ int cmp; + struct rb_node *node = root->rb_node; + struct nfs4_client *clp; + +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 8657335..dd1afa3 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -1809,6 +1809,9 @@ static __be32 nfsd4_encode_components_esc(char sep, char *components, + } + else + end++; ++ if (found_esc) ++ end = next; ++ + str = end; + } + *pp = p; +diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c +index 1e0bbae..09480c53 100644 +--- a/fs/nilfs2/inode.c ++++ b/fs/nilfs2/inode.c +@@ -49,6 +49,8 @@ struct nilfs_iget_args { + int for_gc; + }; + ++static int nilfs_iget_test(struct inode *inode, void *opaque); ++ + void nilfs_inode_add_blocks(struct inode *inode, int n) + { + struct nilfs_root *root = NILFS_I(inode)->i_root; +@@ -347,6 +349,17 @@ const struct address_space_operations nilfs_aops = { + .is_partially_uptodate = block_is_partially_uptodate, + }; + ++static int nilfs_insert_inode_locked(struct inode *inode, ++ struct nilfs_root *root, ++ unsigned long ino) ++{ ++ struct nilfs_iget_args args = { ++ .ino = ino, .root = root, .cno = 0, .for_gc = 0 ++ }; ++ ++ return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); ++} ++ + struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) + { + struct super_block *sb = dir->i_sb; +@@ -382,7 +395,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) + if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { + err = nilfs_bmap_read(ii->i_bmap, NULL); + if (err < 0) +- goto failed_bmap; ++ goto failed_after_creation; + + set_bit(NILFS_I_BMAP, &ii->i_state); + /* No lock is needed; iget() ensures it. */ +@@ -398,21 +411,24 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) + spin_lock(&nilfs->ns_next_gen_lock); + inode->i_generation = nilfs->ns_next_generation++; + spin_unlock(&nilfs->ns_next_gen_lock); +- insert_inode_hash(inode); ++ if (nilfs_insert_inode_locked(inode, root, ino) < 0) { ++ err = -EIO; ++ goto failed_after_creation; ++ } + + err = nilfs_init_acl(inode, dir); + if (unlikely(err)) +- goto failed_acl; /* never occur. When supporting ++ goto failed_after_creation; /* never occur. When supporting + nilfs_init_acl(), proper cancellation of + above jobs should be considered */ + + return inode; + +- failed_acl: +- failed_bmap: ++ failed_after_creation: + clear_nlink(inode); ++ unlock_new_inode(inode); + iput(inode); /* raw_inode will be deleted through +- generic_delete_inode() */ ++ nilfs_evict_inode() */ + goto failed; + + failed_ifile_create_inode: +@@ -460,8 +476,8 @@ int nilfs_read_inode_common(struct inode *inode, + inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); + inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); + inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); +- if (inode->i_nlink == 0 && inode->i_mode == 0) +- return -EINVAL; /* this inode is deleted */ ++ if (inode->i_nlink == 0) ++ return -ESTALE; /* this inode is deleted */ + + inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); + ii->i_flags = le32_to_cpu(raw_inode->i_flags); +diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c +index 9de78f0..0f84b25 100644 +--- a/fs/nilfs2/namei.c ++++ b/fs/nilfs2/namei.c +@@ -51,9 +51,11 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode) + int err = nilfs_add_link(dentry, inode); + if (!err) { + d_instantiate(dentry, inode); ++ unlock_new_inode(inode); + return 0; + } + inode_dec_link_count(inode); ++ unlock_new_inode(inode); + iput(inode); + return err; + } +@@ -182,6 +184,7 @@ out: + out_fail: + drop_nlink(inode); + nilfs_mark_inode_dirty(inode); ++ unlock_new_inode(inode); + iput(inode); + goto out; + } +@@ -201,11 +204,15 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir, + inode_inc_link_count(inode); + ihold(inode); + +- err = nilfs_add_nondir(dentry, inode); +- if (!err) ++ err = nilfs_add_link(dentry, inode); ++ if (!err) { ++ d_instantiate(dentry, inode); + err = nilfs_transaction_commit(dir->i_sb); +- else ++ } else { ++ inode_dec_link_count(inode); ++ iput(inode); + nilfs_transaction_abort(dir->i_sb); ++ } + + return err; + } +@@ -243,6 +250,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + + nilfs_mark_inode_dirty(inode); + d_instantiate(dentry, inode); ++ unlock_new_inode(inode); + out: + if (!err) + err = nilfs_transaction_commit(dir->i_sb); +@@ -255,6 +263,7 @@ out_fail: + drop_nlink(inode); + drop_nlink(inode); + nilfs_mark_inode_dirty(inode); ++ unlock_new_inode(inode); + iput(inode); + out_dir: + drop_nlink(dir); +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c +index aeb44e8..bb6ee06 100644 +--- a/fs/ocfs2/aops.c ++++ b/fs/ocfs2/aops.c +@@ -899,7 +899,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) + } + } + +-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) ++static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) + { + int i; + +@@ -920,7 +920,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) + page_cache_release(wc->w_target_page); + } + ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); ++} + ++static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) ++{ ++ ocfs2_unlock_pages(wc); + brelse(wc->w_di_bh); + kfree(wc); + } +@@ -2045,11 +2049,19 @@ out_write_size: + di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); + ocfs2_journal_dirty(handle, wc->w_di_bh); + ++ /* unlock pages before dealloc since it needs acquiring j_trans_barrier ++ * lock, or it will cause a deadlock since journal commit threads holds ++ * this lock and will ask for the page lock when flushing the data. ++ * put it here to preserve the unlock order. ++ */ ++ ocfs2_unlock_pages(wc); ++ + ocfs2_commit_trans(osb, handle); + + ocfs2_run_deallocs(osb, &wc->w_dealloc); + +- ocfs2_free_write_ctxt(wc); ++ brelse(wc->w_di_bh); ++ kfree(wc); + + return copied; + } +diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c +index feed025f..b242762 100644 +--- a/fs/ocfs2/namei.c ++++ b/fs/ocfs2/namei.c +@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, + struct inode *inode, + const char *symname); + ++static int ocfs2_double_lock(struct ocfs2_super *osb, ++ struct buffer_head **bh1, ++ struct inode *inode1, ++ struct buffer_head **bh2, ++ struct inode *inode2, ++ int rename); ++ ++static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); + /* An orphan dir name is an 8 byte value, printed as a hex string */ + #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) + +@@ -656,8 +664,10 @@ static int ocfs2_link(struct dentry *old_dentry, + { + handle_t *handle; + struct inode *inode = old_dentry->d_inode; ++ struct inode *old_dir = old_dentry->d_parent->d_inode; + int err; + struct buffer_head *fe_bh = NULL; ++ struct buffer_head *old_dir_bh = NULL; + struct buffer_head *parent_fe_bh = NULL; + struct ocfs2_dinode *fe = NULL; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); +@@ -674,19 +684,33 @@ static int ocfs2_link(struct dentry *old_dentry, + + dquot_initialize(dir); + +- err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); ++ err = ocfs2_double_lock(osb, &old_dir_bh, old_dir, ++ &parent_fe_bh, dir, 0); + if (err < 0) { + if (err != -ENOENT) + mlog_errno(err); + return err; + } + ++ /* make sure both dirs have bhs ++ * get an extra ref on old_dir_bh if old==new */ ++ if (!parent_fe_bh) { ++ if (old_dir_bh) { ++ parent_fe_bh = old_dir_bh; ++ get_bh(parent_fe_bh); ++ } else { ++ mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str); ++ err = -EIO; ++ goto out; ++ } ++ } ++ + if (!dir->i_nlink) { + err = -ENOENT; + goto out; + } + +- err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, ++ err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, + old_dentry->d_name.len, &old_de_ino); + if (err) { + err = -ENOENT; +@@ -779,10 +803,11 @@ out_unlock_inode: + ocfs2_inode_unlock(inode, 1); + + out: +- ocfs2_inode_unlock(dir, 1); ++ ocfs2_double_unlock(old_dir, dir); + + brelse(fe_bh); + brelse(parent_fe_bh); ++ brelse(old_dir_bh); + + ocfs2_free_dir_lookup_result(&lookup); + +@@ -991,14 +1016,15 @@ leave: + } + + /* +- * The only place this should be used is rename! ++ * The only place this should be used is rename and link! + * if they have the same id, then the 1st one is the only one locked. + */ + static int ocfs2_double_lock(struct ocfs2_super *osb, + struct buffer_head **bh1, + struct inode *inode1, + struct buffer_head **bh2, +- struct inode *inode2) ++ struct inode *inode2, ++ int rename) + { + int status; + struct ocfs2_inode_info *oi1 = OCFS2_I(inode1); +@@ -1028,7 +1054,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, + } + /* lock id2 */ + status = ocfs2_inode_lock_nested(inode2, bh2, 1, +- OI_LS_RENAME1); ++ rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); +@@ -1037,7 +1063,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, + } + + /* lock id1 */ +- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); ++ status = ocfs2_inode_lock_nested(inode1, bh1, 1, ++ rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT); + if (status < 0) { + /* + * An error return must mean that no cluster locks +@@ -1137,7 +1164,7 @@ static int ocfs2_rename(struct inode *old_dir, + + /* if old and new are the same, this'll just do one lock. */ + status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, +- &new_dir_bh, new_dir); ++ &new_dir_bh, new_dir, 1); + if (status < 0) { + mlog_errno(status); + goto bail; +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c +index fa8cef2..e7d95f9 100644 +--- a/fs/pstore/ram.c ++++ b/fs/pstore/ram.c +@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400); + MODULE_PARM_DESC(mem_size, + "size of reserved RAM used to store oops/panic logs"); + ++static unsigned int mem_type; ++module_param(mem_type, uint, 0600); ++MODULE_PARM_DESC(mem_type, ++ "set to 1 to try to use unbuffered memory (default 0)"); ++ + static int dump_oops = 1; + module_param(dump_oops, int, 0600); + MODULE_PARM_DESC(dump_oops, +@@ -79,6 +84,7 @@ struct ramoops_context { + struct persistent_ram_zone *fprz; + phys_addr_t phys_addr; + unsigned long size; ++ unsigned int memtype; + size_t record_size; + size_t console_size; + size_t ftrace_size; +@@ -353,7 +359,8 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, + size_t sz = cxt->record_size; + + cxt->przs[i] = persistent_ram_new(*paddr, sz, 0, +- &cxt->ecc_info); ++ &cxt->ecc_info, ++ cxt->memtype); + if (IS_ERR(cxt->przs[i])) { + err = PTR_ERR(cxt->przs[i]); + dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n", +@@ -383,7 +390,7 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt, + return -ENOMEM; + } + +- *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info); ++ *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype); + if (IS_ERR(*prz)) { + int err = PTR_ERR(*prz); + +@@ -431,6 +438,7 @@ static int ramoops_probe(struct platform_device *pdev) + cxt->dump_read_cnt = 0; + cxt->size = pdata->mem_size; + cxt->phys_addr = pdata->mem_address; ++ cxt->memtype = pdata->mem_type; + cxt->record_size = pdata->record_size; + cxt->console_size = pdata->console_size; + cxt->ftrace_size = pdata->ftrace_size; +@@ -561,6 +569,7 @@ static void ramoops_register_dummy(void) + + dummy_data->mem_size = mem_size; + dummy_data->mem_address = mem_address; ++ dummy_data->mem_type = 0; + dummy_data->record_size = record_size; + dummy_data->console_size = ramoops_console_size; + dummy_data->ftrace_size = ramoops_ftrace_size; +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c +index de272d4..bda61a7 100644 +--- a/fs/pstore/ram_core.c ++++ b/fs/pstore/ram_core.c +@@ -380,7 +380,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz) + persistent_ram_update_header_ecc(prz); + } + +-static void *persistent_ram_vmap(phys_addr_t start, size_t size) ++static void *persistent_ram_vmap(phys_addr_t start, size_t size, ++ unsigned int memtype) + { + struct page **pages; + phys_addr_t page_start; +@@ -392,7 +393,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size) + page_start = start - offset_in_page(start); + page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); + +- prot = pgprot_noncached(PAGE_KERNEL); ++ if (memtype) ++ prot = pgprot_noncached(PAGE_KERNEL); ++ else ++ prot = pgprot_writecombine(PAGE_KERNEL); + + pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL); + if (!pages) { +@@ -411,8 +415,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size) + return vaddr; + } + +-static void *persistent_ram_iomap(phys_addr_t start, size_t size) ++static void *persistent_ram_iomap(phys_addr_t start, size_t size, ++ unsigned int memtype) + { ++ void *va; ++ + if (!request_mem_region(start, size, "persistent_ram")) { + pr_err("request mem region (0x%llx@0x%llx) failed\n", + (unsigned long long)size, (unsigned long long)start); +@@ -422,19 +429,24 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size) + buffer_start_add = buffer_start_add_locked; + buffer_size_add = buffer_size_add_locked; + +- return ioremap(start, size); ++ if (memtype) ++ va = ioremap(start, size); ++ else ++ va = ioremap_wc(start, size); ++ ++ return va; + } + + static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, +- struct persistent_ram_zone *prz) ++ struct persistent_ram_zone *prz, int memtype) + { + prz->paddr = start; + prz->size = size; + + if (pfn_valid(start >> PAGE_SHIFT)) +- prz->vaddr = persistent_ram_vmap(start, size); ++ prz->vaddr = persistent_ram_vmap(start, size, memtype); + else +- prz->vaddr = persistent_ram_iomap(start, size); ++ prz->vaddr = persistent_ram_iomap(start, size, memtype); + + if (!prz->vaddr) { + pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, +@@ -502,7 +514,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz) + } + + struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, +- u32 sig, struct persistent_ram_ecc_info *ecc_info) ++ u32 sig, struct persistent_ram_ecc_info *ecc_info, ++ unsigned int memtype) + { + struct persistent_ram_zone *prz; + int ret = -ENOMEM; +@@ -513,7 +526,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, + goto err; + } + +- ret = persistent_ram_buffer_map(start, size, prz); ++ ret = persistent_ram_buffer_map(start, size, prz, memtype); + if (ret) + goto err; + +diff --git a/include/linux/mm.h b/include/linux/mm.h +index d5039da..46b8ab5 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1866,7 +1866,7 @@ extern int expand_downwards(struct vm_area_struct *vma, + #if VM_GROWSUP + extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); + #else +- #define expand_upwards(vma, address) do { } while (0) ++ #define expand_upwards(vma, address) (0) + #endif + + /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ +diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h +index 9974975..4af3fdc 100644 +--- a/include/linux/pstore_ram.h ++++ b/include/linux/pstore_ram.h +@@ -53,7 +53,8 @@ struct persistent_ram_zone { + }; + + struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, +- u32 sig, struct persistent_ram_ecc_info *ecc_info); ++ u32 sig, struct persistent_ram_ecc_info *ecc_info, ++ unsigned int memtype); + void persistent_ram_free(struct persistent_ram_zone *prz); + void persistent_ram_zap(struct persistent_ram_zone *prz); + +@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, + struct ramoops_platform_data { + unsigned long mem_size; + unsigned long mem_address; ++ unsigned int mem_type; + unsigned long record_size; + unsigned long console_size; + unsigned long ftrace_size; +diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h +index 67e1bbf..dc7bb01 100644 +--- a/include/trace/events/sched.h ++++ b/include/trace/events/sched.h +@@ -100,7 +100,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p) + /* + * For all intents and purposes a preempted task is a running task. + */ +- if (task_preempt_count(p) & PREEMPT_ACTIVE) ++ if (preempt_count() & PREEMPT_ACTIVE) + state = TASK_RUNNING | TASK_STATE_MAX; + #endif + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 4bbb27a..69cffb4 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -7240,11 +7240,11 @@ SYSCALL_DEFINE5(perf_event_open, + + if (move_group) { + synchronize_rcu(); +- perf_install_in_context(ctx, group_leader, event->cpu); ++ perf_install_in_context(ctx, group_leader, group_leader->cpu); + get_ctx(ctx); + list_for_each_entry(sibling, &group_leader->sibling_list, + group_entry) { +- perf_install_in_context(ctx, sibling, event->cpu); ++ perf_install_in_context(ctx, sibling, sibling->cpu); + get_ctx(ctx); + } + } +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index 37dac98..8d3c5dd 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -550,24 +550,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) + static + int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) + { +- int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); +- int rorun = dl_se->runtime <= 0; +- +- if (!rorun && !dmiss) +- return 0; +- +- /* +- * If we are beyond our current deadline and we are still +- * executing, then we have already used some of the runtime of +- * the next instance. Thus, if we do not account that, we are +- * stealing bandwidth from the system at each deadline miss! +- */ +- if (dmiss) { +- dl_se->runtime = rorun ? dl_se->runtime : 0; +- dl_se->runtime -= rq_clock(rq) - dl_se->deadline; +- } +- +- return 1; ++ return (dl_se->runtime <= 0); + } + + extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); +@@ -806,10 +789,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, + * parameters of the task might need updating. Otherwise, + * we want a replenishment of its runtime. + */ +- if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) +- replenish_dl_entity(dl_se, pi_se); +- else ++ if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) + update_dl_entity(dl_se, pi_se); ++ else if (flags & ENQUEUE_REPLENISH) ++ replenish_dl_entity(dl_se, pi_se); + + __enqueue_dl_entity(dl_se); + } +diff --git a/mm/memory.c b/mm/memory.c +index 48d7365..924429e 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3204,7 +3204,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo + if (prev && prev->vm_end == address) + return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; + +- expand_downwards(vma, address - PAGE_SIZE); ++ return expand_downwards(vma, address - PAGE_SIZE); + } + if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { + struct vm_area_struct *next = vma->vm_next; +@@ -3213,7 +3213,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo + if (next && next->vm_start == address + PAGE_SIZE) + return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; + +- expand_upwards(vma, address + PAGE_SIZE); ++ return expand_upwards(vma, address + PAGE_SIZE); + } + return 0; + } +diff --git a/mm/mmap.c b/mm/mmap.c +index b91ac80..085bcd8 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -2058,14 +2058,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + { + struct mm_struct *mm = vma->vm_mm; + struct rlimit *rlim = current->signal->rlim; +- unsigned long new_start; ++ unsigned long new_start, actual_size; + + /* address space limit tests */ + if (!may_expand_vm(mm, grow)) + return -ENOMEM; + + /* Stack limit test */ +- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) ++ actual_size = size; ++ if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) ++ actual_size -= PAGE_SIZE; ++ if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + return -ENOMEM; + + /* mlock limit tests */ +diff --git a/mm/vmscan.c b/mm/vmscan.c +index deb139e..be6a689 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -2860,18 +2860,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, + return false; + + /* +- * There is a potential race between when kswapd checks its watermarks +- * and a process gets throttled. There is also a potential race if +- * processes get throttled, kswapd wakes, a large process exits therby +- * balancing the zones that causes kswapd to miss a wakeup. If kswapd +- * is going to sleep, no process should be sleeping on pfmemalloc_wait +- * so wake them now if necessary. If necessary, processes will wake +- * kswapd and get throttled again ++ * The throttled processes are normally woken up in balance_pgdat() as ++ * soon as pfmemalloc_watermark_ok() is true. But there is a potential ++ * race between when kswapd checks the watermarks and a process gets ++ * throttled. There is also a potential race if processes get ++ * throttled, kswapd wakes, a large process exits thereby balancing the ++ * zones, which causes kswapd to exit balance_pgdat() before reaching ++ * the wake up checks. If kswapd is going to sleep, no process should ++ * be sleeping on pfmemalloc_wait, so wake them now if necessary. If ++ * the wake up is premature, processes will wake kswapd and get ++ * throttled again. The difference from wake ups in balance_pgdat() is ++ * that here we are under prepare_to_wait(). + */ +- if (waitqueue_active(&pgdat->pfmemalloc_wait)) { +- wake_up(&pgdat->pfmemalloc_wait); +- return false; +- } ++ if (waitqueue_active(&pgdat->pfmemalloc_wait)) ++ wake_up_all(&pgdat->pfmemalloc_wait); + + return pgdat_balanced(pgdat, order, classzone_idx); + } +diff --git a/scripts/kernel-doc b/scripts/kernel-doc +index da058da..2438cc3 100755 +--- a/scripts/kernel-doc ++++ b/scripts/kernel-doc +@@ -1753,7 +1753,7 @@ sub dump_struct($$) { + # strip kmemcheck_bitfield_{begin,end}.*; + $members =~ s/kmemcheck_bitfield_.*?;//gos; + # strip attributes +- $members =~ s/__aligned\s*\(.+\)//gos; ++ $members =~ s/__aligned\s*\([^;]*\)//gos; + + create_parameterlist($members, ';', $file); + check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested); +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index dafcf82..f6e5c4e 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -338,8 +338,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid, + unsigned int parm; + + parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT); +- if (parm == -1) ++ if (parm == -1) { ++ *start_id = 0; + return 0; ++ } + *start_id = (parm >> 16) & 0x7fff; + return (int)(parm & 0x7fff); + } +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 15270a2..12f28d7 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -593,9 +593,9 @@ static void stac_store_hints(struct hda_codec *codec) + spec->gpio_mask; + } + if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) +- spec->gpio_mask &= spec->gpio_mask; +- if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) + spec->gpio_dir &= spec->gpio_mask; ++ if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) ++ spec->gpio_data &= spec->gpio_mask; + if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) + spec->eapd_mask &= spec->gpio_mask; + if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute)) +diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c +index ddfb0fd..9dd260f 100644 +--- a/sound/soc/codecs/max98090.c ++++ b/sound/soc/codecs/max98090.c +@@ -1378,8 +1378,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = { + {"STENL Mux", "Sidetone Left", "DMICL"}, + {"STENR Mux", "Sidetone Right", "ADCR"}, + {"STENR Mux", "Sidetone Right", "DMICR"}, +- {"DACL", "NULL", "STENL Mux"}, +- {"DACR", "NULL", "STENL Mux"}, ++ {"DACL", NULL, "STENL Mux"}, ++ {"DACR", NULL, "STENL Mux"}, + + {"AIFINL", NULL, "SHDN"}, + {"AIFINR", NULL, "SHDN"}, +diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c +index 4068f24..bb3878c 100644 +--- a/sound/soc/codecs/sigmadsp.c ++++ b/sound/soc/codecs/sigmadsp.c +@@ -176,6 +176,13 @@ static int _process_sigma_firmware(struct device *dev, + goto done; + } + ++ if (ssfw_head->version != 1) { ++ dev_err(dev, ++ "Failed to load firmware: Invalid version %d. Supported firmware versions: 1\n", ++ ssfw_head->version); ++ goto done; ++ } ++ + crc = crc32(0, fw->data + sizeof(*ssfw_head), + fw->size - sizeof(*ssfw_head)); + pr_debug("%s: crc=%x\n", __func__, crc); +diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c +index 25c31f1..2f63575 100644 +--- a/sound/soc/dwc/designware_i2s.c ++++ b/sound/soc/dwc/designware_i2s.c +@@ -263,6 +263,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream, + snd_soc_dai_set_dma_data(dai, substream, NULL); + } + ++static int dw_i2s_prepare(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai); ++ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ i2s_write_reg(dev->i2s_base, TXFFR, 1); ++ else ++ i2s_write_reg(dev->i2s_base, RXFFR, 1); ++ ++ return 0; ++} ++ + static int dw_i2s_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) + { +@@ -294,6 +307,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = { + .startup = dw_i2s_startup, + .shutdown = dw_i2s_shutdown, + .hw_params = dw_i2s_hw_params, ++ .prepare = dw_i2s_prepare, + .trigger = dw_i2s_trigger, + }; + +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c +index d1d72ff..621bc9e 100644 +--- a/sound/usb/mixer_maps.c ++++ b/sound/usb/mixer_maps.c +@@ -328,8 +328,11 @@ static struct usbmix_name_map gamecom780_map[] = { + {} + }; + +-static const struct usbmix_name_map kef_x300a_map[] = { +- { 10, NULL }, /* firmware locks up (?) when we try to access this FU */ ++/* some (all?) SCMS USB3318 devices are affected by a firmware lock up ++ * when anything attempts to access FU 10 (control) ++ */ ++static const struct usbmix_name_map scms_usb3318_map[] = { ++ { 10, NULL }, + { 0 } + }; + +@@ -425,8 +428,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { + .map = ebox44_map, + }, + { ++ /* KEF X300A */ + .id = USB_ID(0x27ac, 0x1000), +- .map = kef_x300a_map, ++ .map = scms_usb3318_map, ++ }, ++ { ++ /* Arcam rPAC */ ++ .id = USB_ID(0x25c4, 0x0003), ++ .map = scms_usb3318_map, + }, + { 0 } /* terminator */ + }; +diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h +index a59743f..0001c9a 100644 +--- a/tools/perf/util/hist.h ++++ b/tools/perf/util/hist.h +@@ -36,6 +36,7 @@ struct events_stats { + u32 nr_invalid_chains; + u32 nr_unknown_id; + u32 nr_unprocessable_samples; ++ u32 nr_unordered_events; + }; + + enum hist_column { +diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c +index 5da6ce7..c1f20e9 100644 +--- a/tools/perf/util/session.c ++++ b/tools/perf/util/session.c +@@ -638,8 +638,7 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event, + return -ETIME; + + if (timestamp < s->ordered_samples.last_flush) { +- printf("Warning: Timestamp below last timeslice flush\n"); +- return -EINVAL; ++ s->stats.nr_unordered_events++; + } + + if (!list_empty(sc)) { +@@ -1135,6 +1134,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session, + "Do you have a KVM guest running and not using 'perf kvm'?\n", + session->stats.nr_unprocessable_samples); + } ++ if (session->stats.nr_unordered_events != 0) ++ ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events); + } + + volatile int session_done; diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.29-30.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.29-30.patch new file mode 100644 index 0000000000..eda2f694ea --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.29-30.patch @@ -0,0 +1,4387 @@ +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index 7116fda..5d91ba1 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -1172,6 +1172,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + i8042.notimeout [HW] Ignore timeout condition signalled by controller + i8042.reset [HW] Reset the controller during init and cleanup + i8042.unlock [HW] Unlock (ignore) the keylock ++ i8042.kbdreset [HW] Reset device connected to KBD port + + i810= [HW,DRM] + +diff --git a/Makefile b/Makefile +index 7aff64e..5b94752 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 29 ++SUBLEVEL = 30 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts +index 398064c..4c169d8 100644 +--- a/arch/arc/boot/dts/nsimosci.dts ++++ b/arch/arc/boot/dts/nsimosci.dts +@@ -20,7 +20,7 @@ + /* this is for console on PGU */ + /* bootargs = "console=tty0 consoleblank=0"; */ + /* this is for console on serial */ +- bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug"; ++ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug"; + }; + + aliases { +@@ -46,9 +46,9 @@ + #interrupt-cells = <1>; + }; + +- uart0: serial@c0000000 { ++ uart0: serial@f0000000 { + compatible = "ns8250"; +- reg = <0xc0000000 0x2000>; ++ reg = <0xf0000000 0x2000>; + interrupts = <11>; + clock-frequency = <3686400>; + baud = <115200>; +@@ -57,21 +57,21 @@ + no-loopback-test = <1>; + }; + +- pgu0: pgu@c9000000 { ++ pgu0: pgu@f9000000 { + compatible = "snps,arcpgufb"; +- reg = <0xc9000000 0x400>; ++ reg = <0xf9000000 0x400>; + }; + +- ps2: ps2@c9001000 { ++ ps2: ps2@f9001000 { + compatible = "snps,arc_ps2"; +- reg = <0xc9000400 0x14>; ++ reg = <0xf9000400 0x14>; + interrupts = <13>; + interrupt-names = "arc_ps2_irq"; + }; + +- eth0: ethernet@c0003000 { ++ eth0: ethernet@f0003000 { + compatible = "snps,oscilan"; +- reg = <0xc0003000 0x44>; ++ reg = <0xf0003000 0x44>; + interrupts = <7>, <8>; + interrupt-names = "rx", "tx"; + }; +diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h +index 66ee552..5faad17 100644 +--- a/arch/arc/include/asm/linkage.h ++++ b/arch/arc/include/asm/linkage.h +@@ -13,20 +13,6 @@ + + #define ASM_NL ` /* use '`' to mark new line in macro */ + +-/* Can't use the ENTRY macro in linux/linkage.h +- * gas considers ';' as comment vs. newline +- */ +-.macro ARC_ENTRY name +- .global \name +- .align 4 +- \name: +-.endm +- +-.macro ARC_EXIT name +-#define ASM_PREV_SYM_ADDR(name) .-##name +- .size \ name, ASM_PREV_SYM_ADDR(\name) +-.endm +- + /* annotation for data we want in DCCM - if enabled in .config */ + .macro ARCFP_DATA nm + #ifdef CONFIG_ARC_HAS_DCCM +diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S +index 65690e7..2ff0347 100644 +--- a/arch/arc/kernel/ctx_sw_asm.S ++++ b/arch/arc/kernel/ctx_sw_asm.S +@@ -62,4 +62,4 @@ __switch_to: + ld.ab blink, [sp, 4] + j [blink] + +-ARC_EXIT __switch_to ++END(__switch_to) +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S +index 6e8f83a..29b82ad 100644 +--- a/arch/arc/kernel/entry.S ++++ b/arch/arc/kernel/entry.S +@@ -141,7 +141,7 @@ VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26) + VECTOR reserved ; Reserved Exceptions + .endr + +-#include /* ARC_{EXTRY,EXIT} */ ++#include /* {EXTRY,EXIT} */ + #include /* SAVE_ALL_{INT1,INT2,SYS...} */ + #include + #include +@@ -184,7 +184,7 @@ reserved: ; processor restart + ; --------------------------------------------- + ; Level 2 ISR: Can interrupt a Level 1 ISR + ; --------------------------------------------- +-ARC_ENTRY handle_interrupt_level2 ++ENTRY(handle_interrupt_level2) + + ; TODO-vineetg for SMP this wont work + ; free up r9 as scratchpad +@@ -225,14 +225,14 @@ ARC_ENTRY handle_interrupt_level2 + + b ret_from_exception + +-ARC_EXIT handle_interrupt_level2 ++END(handle_interrupt_level2) + + #endif + + ; --------------------------------------------- + ; Level 1 ISR + ; --------------------------------------------- +-ARC_ENTRY handle_interrupt_level1 ++ENTRY(handle_interrupt_level1) + + /* free up r9 as scratchpad */ + #ifdef CONFIG_SMP +@@ -265,7 +265,7 @@ ARC_ENTRY handle_interrupt_level1 + sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg + + b ret_from_exception +-ARC_EXIT handle_interrupt_level1 ++END(handle_interrupt_level1) + + ;################### Non TLB Exception Handling ############################# + +@@ -273,7 +273,7 @@ ARC_EXIT handle_interrupt_level1 + ; Instruction Error Exception Handler + ; --------------------------------------------- + +-ARC_ENTRY instr_service ++ENTRY(instr_service) + + EXCEPTION_PROLOGUE + +@@ -284,13 +284,13 @@ ARC_ENTRY instr_service + + bl do_insterror_or_kprobe + b ret_from_exception +-ARC_EXIT instr_service ++END(instr_service) + + ; --------------------------------------------- + ; Memory Error Exception Handler + ; --------------------------------------------- + +-ARC_ENTRY mem_service ++ENTRY(mem_service) + + EXCEPTION_PROLOGUE + +@@ -301,13 +301,13 @@ ARC_ENTRY mem_service + + bl do_memory_error + b ret_from_exception +-ARC_EXIT mem_service ++END(mem_service) + + ; --------------------------------------------- + ; Machine Check Exception Handler + ; --------------------------------------------- + +-ARC_ENTRY EV_MachineCheck ++ENTRY(EV_MachineCheck) + + EXCEPTION_PROLOGUE + +@@ -331,13 +331,13 @@ ARC_ENTRY EV_MachineCheck + + j do_machine_check_fault + +-ARC_EXIT EV_MachineCheck ++END(EV_MachineCheck) + + ; --------------------------------------------- + ; Protection Violation Exception Handler + ; --------------------------------------------- + +-ARC_ENTRY EV_TLBProtV ++ENTRY(EV_TLBProtV) + + EXCEPTION_PROLOGUE + +@@ -385,12 +385,12 @@ ARC_ENTRY EV_TLBProtV + + b ret_from_exception + +-ARC_EXIT EV_TLBProtV ++END(EV_TLBProtV) + + ; --------------------------------------------- + ; Privilege Violation Exception Handler + ; --------------------------------------------- +-ARC_ENTRY EV_PrivilegeV ++ENTRY(EV_PrivilegeV) + + EXCEPTION_PROLOGUE + +@@ -401,12 +401,12 @@ ARC_ENTRY EV_PrivilegeV + + bl do_privilege_fault + b ret_from_exception +-ARC_EXIT EV_PrivilegeV ++END(EV_PrivilegeV) + + ; --------------------------------------------- + ; Extension Instruction Exception Handler + ; --------------------------------------------- +-ARC_ENTRY EV_Extension ++ENTRY(EV_Extension) + + EXCEPTION_PROLOGUE + +@@ -417,7 +417,7 @@ ARC_ENTRY EV_Extension + + bl do_extension_fault + b ret_from_exception +-ARC_EXIT EV_Extension ++END(EV_Extension) + + ;######################### System Call Tracing ######################### + +@@ -504,7 +504,7 @@ trap_with_param: + ; (2) Break Points + ;------------------------------------------------------------------ + +-ARC_ENTRY EV_Trap ++ENTRY(EV_Trap) + + EXCEPTION_PROLOGUE + +@@ -534,9 +534,9 @@ ARC_ENTRY EV_Trap + jl [r9] ; Entry into Sys Call Handler + + ; fall through to ret_from_system_call +-ARC_EXIT EV_Trap ++END(EV_Trap) + +-ARC_ENTRY ret_from_system_call ++ENTRY(ret_from_system_call) + + st r0, [sp, PT_r0] ; sys call return value in pt_regs + +@@ -546,7 +546,7 @@ ARC_ENTRY ret_from_system_call + ; + ; If ret to user mode do we need to handle signals, schedule() et al. + +-ARC_ENTRY ret_from_exception ++ENTRY(ret_from_exception) + + ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32 + ld r8, [sp, PT_status32] ; returning to User/Kernel Mode +@@ -728,9 +728,9 @@ not_level1_interrupt: + debug_marker_syscall: + rtie + +-ARC_EXIT ret_from_exception ++END(ret_from_exception) + +-ARC_ENTRY ret_from_fork ++ENTRY(ret_from_fork) + ; when the forked child comes here from the __switch_to function + ; r0 has the last task pointer. + ; put last task in scheduler queue +@@ -747,11 +747,11 @@ ARC_ENTRY ret_from_fork + ; special case of kernel_thread entry point returning back due to + ; kernel_execve() - pretend return from syscall to ret to userland + b ret_from_exception +-ARC_EXIT ret_from_fork ++END(ret_from_fork) + + ;################### Special Sys Call Wrappers ########################## + +-ARC_ENTRY sys_clone_wrapper ++ENTRY(sys_clone_wrapper) + SAVE_CALLEE_SAVED_USER + bl @sys_clone + DISCARD_CALLEE_SAVED_USER +@@ -761,7 +761,7 @@ ARC_ENTRY sys_clone_wrapper + bnz tracesys_exit + + b ret_from_system_call +-ARC_EXIT sys_clone_wrapper ++END(sys_clone_wrapper) + + #ifdef CONFIG_ARC_DW2_UNWIND + ; Workaround for bug 94179 (STAR ): +diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S +index bc813d5..978bf83 100644 +--- a/arch/arc/lib/memcmp.S ++++ b/arch/arc/lib/memcmp.S +@@ -6,7 +6,7 @@ + * published by the Free Software Foundation. + */ + +-#include ++#include + + #ifdef __LITTLE_ENDIAN__ + #define WORD2 r2 +@@ -16,7 +16,7 @@ + #define SHIFT r2 + #endif + +-ARC_ENTRY memcmp ++ENTRY(memcmp) + or r12,r0,r1 + asl_s r12,r12,30 + sub r3,r2,1 +@@ -121,4 +121,4 @@ ARC_ENTRY memcmp + .Lnil: + j_s.d [blink] + mov r0,0 +-ARC_EXIT memcmp ++END(memcmp) +diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S +index b64cc10..3222573 100644 +--- a/arch/arc/lib/memcpy-700.S ++++ b/arch/arc/lib/memcpy-700.S +@@ -6,9 +6,9 @@ + * published by the Free Software Foundation. + */ + +-#include ++#include + +-ARC_ENTRY memcpy ++ENTRY(memcpy) + or r3,r0,r1 + asl_s r3,r3,30 + mov_s r5,r0 +@@ -63,4 +63,4 @@ ARC_ENTRY memcpy + .Lendbloop: + j_s.d [blink] + stb r12,[r5,0] +-ARC_EXIT memcpy ++END(memcpy) +diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S +index 9b2d88d..d36bd43 100644 +--- a/arch/arc/lib/memset.S ++++ b/arch/arc/lib/memset.S +@@ -6,11 +6,11 @@ + * published by the Free Software Foundation. + */ + +-#include ++#include + + #define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */ + +-ARC_ENTRY memset ++ENTRY(memset) + mov_s r4,r0 + or r12,r0,r2 + bmsk.f r12,r12,1 +@@ -46,14 +46,14 @@ ARC_ENTRY memset + stb.ab r1,[r4,1] + .Ltiny_end: + j_s [blink] +-ARC_EXIT memset ++END(memset) + + ; memzero: @r0 = mem, @r1 = size_t + ; memset: @r0 = mem, @r1 = char, @r2 = size_t + +-ARC_ENTRY memzero ++ENTRY(memzero) + ; adjust bzero args to memset args + mov r2, r1 + mov r1, 0 + b memset ;tail call so need to tinker with blink +-ARC_EXIT memzero ++END(memzero) +diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S +index 9c548c7..b725d58 100644 +--- a/arch/arc/lib/strchr-700.S ++++ b/arch/arc/lib/strchr-700.S +@@ -11,9 +11,9 @@ + presence of the norm instruction makes it easier to operate on whole + words branch-free. */ + +-#include ++#include + +-ARC_ENTRY strchr ++ENTRY(strchr) + extb_s r1,r1 + asl r5,r1,8 + bmsk r2,r0,1 +@@ -130,4 +130,4 @@ ARC_ENTRY strchr + j_s.d [blink] + mov.mi r0,0 + #endif /* ENDIAN */ +-ARC_EXIT strchr ++END(strchr) +diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S +index 5dc802b..3544600 100644 +--- a/arch/arc/lib/strcmp.S ++++ b/arch/arc/lib/strcmp.S +@@ -13,9 +13,9 @@ + source 1; however, that would increase the overhead for loop setup / finish, + and strcmp might often terminate early. */ + +-#include ++#include + +-ARC_ENTRY strcmp ++ENTRY(strcmp) + or r2,r0,r1 + bmsk_s r2,r2,1 + brne r2,0,.Lcharloop +@@ -93,4 +93,4 @@ ARC_ENTRY strcmp + .Lcmpend: + j_s.d [blink] + sub r0,r2,r3 +-ARC_EXIT strcmp ++END(strcmp) +diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S +index b7ca4ae..8422f38 100644 +--- a/arch/arc/lib/strcpy-700.S ++++ b/arch/arc/lib/strcpy-700.S +@@ -16,9 +16,9 @@ + there, but the it is not likely to be taken often, and it + would also be likey to cost an unaligned mispredict at the next call. */ + +-#include ++#include + +-ARC_ENTRY strcpy ++ENTRY(strcpy) + or r2,r0,r1 + bmsk_s r2,r2,1 + brne.d r2,0,charloop +@@ -67,4 +67,4 @@ charloop: + brne.d r3,0,charloop + stb.ab r3,[r10,1] + j [blink] +-ARC_EXIT strcpy ++END(strcpy) +diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S +index 39759e0..53cfd56 100644 +--- a/arch/arc/lib/strlen.S ++++ b/arch/arc/lib/strlen.S +@@ -6,9 +6,9 @@ + * published by the Free Software Foundation. + */ + +-#include ++#include + +-ARC_ENTRY strlen ++ENTRY(strlen) + or r3,r0,7 + ld r2,[r3,-7] + ld.a r6,[r3,-3] +@@ -80,4 +80,4 @@ ARC_ENTRY strlen + .Learly_end: + b.d .Lend + sub_s.ne r1,r1,r1 +-ARC_EXIT strlen ++END(strlen) +diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S +index 3fcfdb3..79bfc81 100644 +--- a/arch/arc/mm/tlbex.S ++++ b/arch/arc/mm/tlbex.S +@@ -260,7 +260,7 @@ ARCFP_CODE ;Fast Path Code, candidate for ICCM + ; I-TLB Miss Exception Handler + ;----------------------------------------------------------------------------- + +-ARC_ENTRY EV_TLBMissI ++ENTRY(EV_TLBMissI) + + TLBMISS_FREEUP_REGS + +@@ -293,13 +293,13 @@ ARC_ENTRY EV_TLBMissI + TLBMISS_RESTORE_REGS + rtie + +-ARC_EXIT EV_TLBMissI ++END(EV_TLBMissI) + + ;----------------------------------------------------------------------------- + ; D-TLB Miss Exception Handler + ;----------------------------------------------------------------------------- + +-ARC_ENTRY EV_TLBMissD ++ENTRY(EV_TLBMissD) + + TLBMISS_FREEUP_REGS + +@@ -381,6 +381,4 @@ do_slow_path_pf: + bl do_page_fault + b ret_from_exception + +-ARC_EXIT EV_TLBMissD +- +-ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr ++END(EV_TLBMissD) +diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi +index de16119..6a26e79 100644 +--- a/arch/arm/boot/dts/imx25.dtsi ++++ b/arch/arm/boot/dts/imx25.dtsi +@@ -158,7 +158,7 @@ + #size-cells = <0>; + compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; + reg = <0x43fa4000 0x4000>; +- clocks = <&clks 62>, <&clks 62>; ++ clocks = <&clks 78>, <&clks 78>; + clock-names = "ipg", "per"; + interrupts = <14>; + status = "disabled"; +diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c +index 4d677f4..01a5765 100644 +--- a/arch/arm/mach-imx/clk-imx6q.c ++++ b/arch/arm/mach-imx/clk-imx6q.c +@@ -161,8 +161,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) + post_div_table[1].div = 1; + post_div_table[2].div = 1; + video_div_table[1].div = 1; +- video_div_table[2].div = 1; +- }; ++ video_div_table[3].div = 1; ++ } + + /* type name parent_name base div_mask */ + clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f); +diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c +index 74044aa..73d80b8 100644 +--- a/arch/arm/mach-omap2/timer.c ++++ b/arch/arm/mach-omap2/timer.c +@@ -513,11 +513,11 @@ static void __init realtime_counter_init(void) + rate = clk_get_rate(sys_clk); + /* Numerator/denumerator values refer TRM Realtime Counter section */ + switch (rate) { +- case 1200000: ++ case 12000000: + num = 64; + den = 125; + break; +- case 1300000: ++ case 13000000: + num = 768; + den = 1625; + break; +@@ -529,11 +529,11 @@ static void __init realtime_counter_init(void) + num = 192; + den = 625; + break; +- case 2600000: ++ case 26000000: + num = 384; + den = 1625; + break; +- case 2700000: ++ case 27000000: + num = 256; + den = 1125; + break; +diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c +index f74ab53..2b73c8a 100644 +--- a/arch/arm/mach-shmobile/setup-sh73a0.c ++++ b/arch/arm/mach-shmobile/setup-sh73a0.c +@@ -617,6 +617,7 @@ static struct platform_device ipmmu_device = { + + static struct renesas_intc_irqpin_config irqpin0_platform_data = { + .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */ ++ .control_parent = true, + }; + + static struct resource irqpin0_resources[] = { +@@ -678,6 +679,7 @@ static struct platform_device irqpin1_device = { + + static struct renesas_intc_irqpin_config irqpin2_platform_data = { + .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */ ++ .control_parent = true, + }; + + static struct resource irqpin2_resources[] = { +@@ -708,6 +710,7 @@ static struct platform_device irqpin2_device = { + + static struct renesas_intc_irqpin_config irqpin3_platform_data = { + .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */ ++ .control_parent = true, + }; + + static struct resource irqpin3_resources[] = { +diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h +index d2d11b7..8121aa6 100644 +--- a/arch/parisc/include/asm/ldcw.h ++++ b/arch/parisc/include/asm/ldcw.h +@@ -33,11 +33,18 @@ + + #endif /*!CONFIG_PA20*/ + +-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ ++/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. ++ We don't explicitly expose that "*a" may be written as reload ++ fails to find a register in class R1_REGS when "a" needs to be ++ reloaded when generating 64-bit PIC code. Instead, we clobber ++ memory to indicate to the compiler that the assembly code reads ++ or writes to items other than those listed in the input and output ++ operands. This may pessimize the code somewhat but __ldcw is ++ usually used within code blocks surrounded by memory barriors. */ + #define __ldcw(a) ({ \ + unsigned __ret; \ +- __asm__ __volatile__(__LDCW " 0(%2),%0" \ +- : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ ++ __asm__ __volatile__(__LDCW " 0(%1),%0" \ ++ : "=r" (__ret) : "r" (a) : "memory"); \ + __ret; \ + }) + +diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common +index 21ca44c..1f0ea55 100644 +--- a/arch/um/Kconfig.common ++++ b/arch/um/Kconfig.common +@@ -2,6 +2,7 @@ config UML + bool + default y + select HAVE_UID16 ++ select HAVE_FUTEX_CMPXCHG if FUTEX + select GENERIC_IRQ_SHOW + select GENERIC_CPU_DEVICES + select GENERIC_IO +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 79a3f96..a1f5b18 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -1017,6 +1017,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) + regs->flags &= ~X86_EFLAGS_IF; + trace_hardirqs_off(); + regs->ip = (unsigned long)(jp->entry); ++ ++ /* ++ * jprobes use jprobe_return() which skips the normal return ++ * path of the function, and this messes up the accounting of the ++ * function graph tracer to get messed up. ++ * ++ * Pause function graph tracing while performing the jprobe function. ++ */ ++ pause_graph_tracing(); + return 1; + } + +@@ -1042,24 +1051,25 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + u8 *addr = (u8 *) (regs->ip - 1); + struct jprobe *jp = container_of(p, struct jprobe, kp); ++ void *saved_sp = kcb->jprobe_saved_sp; + + if ((addr > (u8 *) jprobe_return) && + (addr < (u8 *) jprobe_return_end)) { +- if (stack_addr(regs) != kcb->jprobe_saved_sp) { ++ if (stack_addr(regs) != saved_sp) { + struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; + printk(KERN_ERR + "current sp %p does not match saved sp %p\n", +- stack_addr(regs), kcb->jprobe_saved_sp); ++ stack_addr(regs), saved_sp); + printk(KERN_ERR "Saved registers for jprobe %p\n", jp); + show_regs(saved_regs); + printk(KERN_ERR "Current registers\n"); + show_regs(regs); + BUG(); + } ++ /* It's OK to start function graph tracing again */ ++ unpause_graph_tracing(); + *regs = kcb->jprobe_saved_regs; +- memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), +- kcb->jprobes_stack, +- MIN_STACK_SIZE(kcb->jprobe_saved_sp)); ++ memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); + preempt_enable_no_resched(); + return 1; + } +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 0c90f4b..de42688 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -2320,12 +2320,12 @@ static __init void nested_vmx_setup_ctls_msrs(void) + nested_vmx_secondary_ctls_low = 0; + nested_vmx_secondary_ctls_high &= + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | +- SECONDARY_EXEC_UNRESTRICTED_GUEST | + SECONDARY_EXEC_WBINVD_EXITING; + + if (enable_ept) { + /* nested EPT: emulate EPT also to L1 */ +- nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; ++ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT | ++ SECONDARY_EXEC_UNRESTRICTED_GUEST; + nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | + VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | + VMX_EPT_INVEPT_BIT; +diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c +index 531d426..bd16d6c 100644 +--- a/arch/x86/um/sys_call_table_32.c ++++ b/arch/x86/um/sys_call_table_32.c +@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void); + + extern asmlinkage void sys_ni_syscall(void); + +-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { ++const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { + /* + * Smells like a compiler bug -- it doesn't work + * when the & below is removed. +diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c +index f2f0723..9578308 100644 +--- a/arch/x86/um/sys_call_table_64.c ++++ b/arch/x86/um/sys_call_table_64.c +@@ -46,7 +46,7 @@ typedef void (*sys_call_ptr_t)(void); + + extern void sys_ni_syscall(void); + +-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { ++const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { + /* + * Smells like a compiler bug -- it doesn't work + * when the & below is removed. +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index f9c4632..7145f6d 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -2232,14 +2232,17 @@ int __clk_get(struct clk *clk) + + void __clk_put(struct clk *clk) + { ++ struct module *owner; ++ + if (!clk || WARN_ON_ONCE(IS_ERR(clk))) + return; + + clk_prepare_lock(); ++ owner = clk->owner; + kref_put(&clk->ref, __clk_release); + clk_prepare_unlock(); + +- module_put(clk->owner); ++ module_put(owner); + } + + /*** clk rate change notifiers ***/ +diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c +index 884187f..7f30b94 100644 +--- a/drivers/clk/samsung/clk-exynos-audss.c ++++ b/drivers/clk/samsung/clk-exynos-audss.c +@@ -210,6 +210,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev) + { + int i; + ++#ifdef CONFIG_PM_SLEEP ++ unregister_syscore_ops(&exynos_audss_clk_syscore_ops); ++#endif ++ + of_clk_del_provider(pdev->dev.of_node); + + for (i = 0; i < clk_data.clk_num; i++) { +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c +index e0a98f5..74ed17d 100644 +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -44,8 +44,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) + return false; + + ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); +- if (ret < 0) +- return false; ++ if (ret < 0) { ++ /* We've found the gpio chip, but the translation failed. ++ * Return true to stop looking and return the translation ++ * error via out_gpio ++ */ ++ gg_data->out_gpio = ERR_PTR(ret); ++ return true; ++ } + + gg_data->out_gpio = gpio_to_desc(ret + gc->base); + return true; +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index 50c4922..5b88c83 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -1222,6 +1222,9 @@ int gpiochip_add(struct gpio_chip *chip) + + spin_unlock_irqrestore(&gpio_lock, flags); + ++ if (status) ++ goto fail; ++ + #ifdef CONFIG_PINCTRL + INIT_LIST_HEAD(&chip->pin_ranges); + #endif +@@ -1229,12 +1232,12 @@ int gpiochip_add(struct gpio_chip *chip) + of_gpiochip_add(chip); + acpi_gpiochip_add(chip); + +- if (status) +- goto fail; +- + status = gpiochip_export(chip); +- if (status) ++ if (status) { ++ acpi_gpiochip_remove(chip); ++ of_gpiochip_remove(chip); + goto fail; ++ } + + pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__, + chip->base, chip->base + chip->ngpio - 1, +diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c +index 3c78b22..800e06c 100644 +--- a/drivers/gpu/drm/i915/i915_gem_stolen.c ++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c +@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) + r = devm_request_mem_region(dev->dev, base + 1, + dev_priv->gtt.stolen_size - 1, + "Graphics Stolen Memory"); +- if (r == NULL) { ++ /* ++ * GEN3 firmware likes to smash pci bridges into the stolen ++ * range. Apparently this works. ++ */ ++ if (r == NULL && !IS_GEN3(dev)) { + DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", + base, base + (uint32_t)dev_priv->gtt.stolen_size); + base = 0; +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 0a3b938..0c83b3d 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -320,6 +320,7 @@ + #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ + #define PIPE_CONTROL_CS_STALL (1<<20) + #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) ++#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) + #define PIPE_CONTROL_QW_WRITE (1<<14) + #define PIPE_CONTROL_DEPTH_STALL (1<<13) + #define PIPE_CONTROL_WRITE_FLUSH (1<<12) +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c +index d488fc7..d2af1e1 100644 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c +@@ -334,12 +334,15 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; ++ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + ++ flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; ++ + /* Workaround: we must issue a pipe_control with CS-stall bit + * set before a pipe_control command that has the state cache + * invalidate bit set. */ +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c +index c879631..b6c063c 100644 +--- a/drivers/gpu/drm/i915/intel_uncore.c ++++ b/drivers/gpu/drm/i915/intel_uncore.c +@@ -451,8 +451,8 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) + static void + assert_device_not_suspended(struct drm_i915_private *dev_priv) + { +- WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, +- "Device suspended\n"); ++ WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, ++ "Device suspended\n"); + } + + #define REG_READ_HEADER(x) \ +diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c +index a75c35c..165401c 100644 +--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c ++++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c +@@ -24,13 +24,6 @@ + + #include "nv04.h" + +-static void +-nv4c_mc_msi_rearm(struct nouveau_mc *pmc) +-{ +- struct nv04_mc_priv *priv = (void *)pmc; +- nv_wr08(priv, 0x088050, 0xff); +-} +- + struct nouveau_oclass * + nv4c_mc_oclass = &(struct nouveau_mc_oclass) { + .base.handle = NV_SUBDEV(MC, 0x4c), +@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) { + .fini = _nouveau_mc_fini, + }, + .intr = nv04_mc_intr, +- .msi_rearm = nv4c_mc_msi_rearm, + }.base; +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c +index 5727dbd..b4dbaded 100644 +--- a/drivers/gpu/drm/radeon/atombios_dp.c ++++ b/drivers/gpu/drm/radeon/atombios_dp.c +@@ -576,6 +576,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector, + struct radeon_connector_atom_dig *dig_connector; + int dp_clock; + ++ if ((mode->clock > 340000) && ++ (!radeon_connector_is_dp12_capable(connector))) ++ return MODE_CLOCK_HIGH; ++ + if (!radeon_connector->con_priv) + return MODE_CLOCK_HIGH; + dig_connector = radeon_connector->con_priv; +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c +index 543ba2d..c7c2856 100644 +--- a/drivers/gpu/drm/radeon/ci_dpm.c ++++ b/drivers/gpu/drm/radeon/ci_dpm.c +@@ -4733,7 +4733,7 @@ void ci_dpm_disable(struct radeon_device *rdev) + ci_enable_spread_spectrum(rdev, false); + ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); + ci_stop_dpm(rdev); +- ci_enable_ds_master_switch(rdev, true); ++ ci_enable_ds_master_switch(rdev, false); + ci_enable_ulv(rdev, false); + ci_clear_vc(rdev); + ci_reset_to_default(rdev); +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index ddf70d6..8ef67cb 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -5879,6 +5879,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable) + } + + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); ++ data |= 0x00000001; + data &= 0xfffffffd; + if (orig != data) + WREG32(RLC_CGTT_MGCG_OVERRIDE, data); +@@ -5910,7 +5911,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable) + } + } else { + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); +- data |= 0x00000002; ++ data |= 0x00000003; + if (orig != data) + WREG32(RLC_CGTT_MGCG_OVERRIDE, data); + +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index 040a2a1..45a9a03 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -191,7 +191,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, + rbo = container_of(bo, struct radeon_bo, tbo); + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: +- if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) ++ if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); + else + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index cf4bad2..76329d2 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, + * + * @pool: to free the pages from + * @free_all: If set to true will free all pages in pool +- * @gfp: GFP flags. ++ * @use_static: Safe to use static buffer + **/ + static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, +- gfp_t gfp) ++ bool use_static) + { ++ static struct page *static_buf[NUM_PAGES_TO_ALLOC]; + unsigned long irq_flags; + struct page *p; + struct page **pages_to_free; +@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, + if (NUM_PAGES_TO_ALLOC < nr_free) + npages_to_free = NUM_PAGES_TO_ALLOC; + +- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); ++ if (use_static) ++ pages_to_free = static_buf; ++ else ++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), ++ GFP_KERNEL); + if (!pages_to_free) { + pr_err("Failed to allocate memory for pool free operation\n"); + return 0; +@@ -374,7 +379,8 @@ restart: + if (freed_pages) + ttm_pages_put(pages_to_free, freed_pages); + out: +- kfree(pages_to_free); ++ if (pages_to_free != static_buf) ++ kfree(pages_to_free); + return nr_free; + } + +@@ -383,8 +389,6 @@ out: + * + * XXX: (dchinner) Deadlock warning! + * +- * We need to pass sc->gfp_mask to ttm_page_pool_free(). +- * + * This code is crying out for a shrinker per pool.... + */ + static unsigned long +@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) + if (shrink_pages == 0) + break; + pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; +- shrink_pages = ttm_page_pool_free(pool, nr_free, +- sc->gfp_mask); ++ /* OK to use static buffer since global mutex is held. */ ++ shrink_pages = ttm_page_pool_free(pool, nr_free, true); + freed += nr_free - shrink_pages; + } + mutex_unlock(&lock); +@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, + } + spin_unlock_irqrestore(&pool->lock, irq_flags); + if (npages) +- ttm_page_pool_free(pool, npages, GFP_KERNEL); ++ ttm_page_pool_free(pool, npages, false); + } + + /* +@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void) + pr_info("Finalizing pool allocator\n"); + ttm_pool_mm_shrink_fini(_manager); + ++ /* OK to use static buffer since global mutex is no longer used. */ + for (i = 0; i < NUM_POOLS; ++i) +- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, +- GFP_KERNEL); ++ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); + + kobject_put(&_manager->kobj); + _manager = NULL; +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +index ca65df1..3dfa97d 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +@@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) + * + * @pool: to free the pages from + * @nr_free: If set to true will free all pages in pool +- * @gfp: GFP flags. ++ * @use_static: Safe to use static buffer + **/ + static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, +- gfp_t gfp) ++ bool use_static) + { ++ static struct page *static_buf[NUM_PAGES_TO_ALLOC]; + unsigned long irq_flags; + struct dma_page *dma_p, *tmp; + struct page **pages_to_free; +@@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, + npages_to_free, nr_free); + } + #endif +- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); ++ if (use_static) ++ pages_to_free = static_buf; ++ else ++ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), ++ GFP_KERNEL); + + if (!pages_to_free) { + pr_err("%s: Failed to allocate memory for pool free operation\n", +@@ -502,7 +507,8 @@ restart: + if (freed_pages) + ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); + out: +- kfree(pages_to_free); ++ if (pages_to_free != static_buf) ++ kfree(pages_to_free); + return nr_free; + } + +@@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type) + if (pool->type != type) + continue; + /* Takes a spinlock.. */ +- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL); ++ /* OK to use static buffer since global mutex is held. */ ++ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); + WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); + /* This code path is called after _all_ references to the + * struct device has been dropped - so nobody should be +@@ -984,7 +991,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) + + /* shrink pool if necessary (only on !is_cached pools)*/ + if (npages) +- ttm_dma_page_pool_free(pool, npages, GFP_KERNEL); ++ ttm_dma_page_pool_free(pool, npages, false); + ttm->state = tt_unpopulated; + } + EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); +@@ -994,8 +1001,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); + * + * XXX: (dchinner) Deadlock warning! + * +- * We need to pass sc->gfp_mask to ttm_dma_page_pool_free(). +- * + * I'm getting sadder as I hear more pathetical whimpers about needing per-pool + * shrinkers + */ +@@ -1028,8 +1033,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) + if (++idx < pool_offset) + continue; + nr_free = shrink_pages; +- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, +- sc->gfp_mask); ++ /* OK to use static buffer since global mutex is held. */ ++ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); + freed += nr_free - shrink_pages; + + pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +index 436b013..b65272d 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -1049,6 +1049,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, + if (ret != 0) + goto out_no_queue; + ++ return 0; ++ + out_no_queue: + event->base.destroy(&event->base); + out_no_event: +@@ -1124,17 +1126,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, + + BUG_ON(fence == NULL); + +- if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) +- ret = vmw_event_fence_action_create(file_priv, fence, +- arg->flags, +- arg->user_data, +- true); +- else +- ret = vmw_event_fence_action_create(file_priv, fence, +- arg->flags, +- arg->user_data, +- true); +- ++ ret = vmw_event_fence_action_create(file_priv, fence, ++ arg->flags, ++ arg->user_data, ++ true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to attach event to fence.\n"); +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index a96cfc3..6014227 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -41,6 +41,7 @@ static DEFINE_MUTEX(device_list_mutex); + static LIST_HEAD(device_list); + static struct workqueue_struct *isert_rx_wq; + static struct workqueue_struct *isert_comp_wq; ++static struct workqueue_struct *isert_release_wq; + + static void + isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); +@@ -52,6 +53,11 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); + static int + isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct isert_rdma_wr *wr); ++static int ++isert_rdma_post_recvl(struct isert_conn *isert_conn); ++static int ++isert_rdma_accept(struct isert_conn *isert_conn); ++struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); + + static void + isert_qp_event_callback(struct ib_event *e, void *context) +@@ -132,12 +138,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) + ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); + if (ret) { + pr_err("rdma_create_qp failed for cma_id %d\n", ret); +- return ret; ++ goto err; + } + isert_conn->conn_qp = cma_id->qp; + pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); + + return 0; ++err: ++ mutex_lock(&device_list_mutex); ++ device->cq_active_qps[min_index]--; ++ mutex_unlock(&device_list_mutex); ++ ++ return ret; + } + + static void +@@ -489,8 +501,8 @@ err: + static int + isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + { +- struct iscsi_np *np = cma_id->context; +- struct isert_np *isert_np = np->np_context; ++ struct isert_np *isert_np = cma_id->context; ++ struct iscsi_np *np = isert_np->np; + struct isert_conn *isert_conn; + struct isert_device *device; + struct ib_device *ib_dev = cma_id->device; +@@ -515,6 +527,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + isert_conn->state = ISER_CONN_INIT; + INIT_LIST_HEAD(&isert_conn->conn_accept_node); + init_completion(&isert_conn->conn_login_comp); ++ init_completion(&isert_conn->login_req_comp); + init_completion(&isert_conn->conn_wait); + init_completion(&isert_conn->conn_wait_comp_err); + kref_init(&isert_conn->conn_kref); +@@ -522,7 +535,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + spin_lock_init(&isert_conn->conn_lock); + INIT_LIST_HEAD(&isert_conn->conn_fr_pool); + +- cma_id->context = isert_conn; + isert_conn->conn_cm_id = cma_id; + isert_conn->responder_resources = event->param.conn.responder_resources; + isert_conn->initiator_depth = event->param.conn.initiator_depth; +@@ -596,6 +608,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + if (ret) + goto out_conn_dev; + ++ ret = isert_rdma_post_recvl(isert_conn); ++ if (ret) ++ goto out_conn_dev; ++ ++ ret = isert_rdma_accept(isert_conn); ++ if (ret) ++ goto out_conn_dev; ++ + mutex_lock(&isert_np->np_accept_mutex); + list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); + mutex_unlock(&isert_np->np_accept_mutex); +@@ -620,6 +640,7 @@ out_login_buf: + kfree(isert_conn->login_buf); + out: + kfree(isert_conn); ++ rdma_reject(cma_id, NULL, 0); + return ret; + } + +@@ -635,18 +656,20 @@ isert_connect_release(struct isert_conn *isert_conn) + if (device && device->use_fastreg) + isert_conn_free_fastreg_pool(isert_conn); + ++ isert_free_rx_descriptors(isert_conn); ++ rdma_destroy_id(isert_conn->conn_cm_id); ++ + if (isert_conn->conn_qp) { + cq_index = ((struct isert_cq_desc *) + isert_conn->conn_qp->recv_cq->cq_context)->cq_index; + pr_debug("isert_connect_release: cq_index: %d\n", cq_index); ++ mutex_lock(&device_list_mutex); + isert_conn->conn_device->cq_active_qps[cq_index]--; ++ mutex_unlock(&device_list_mutex); + +- rdma_destroy_qp(isert_conn->conn_cm_id); ++ ib_destroy_qp(isert_conn->conn_qp); + } + +- isert_free_rx_descriptors(isert_conn); +- rdma_destroy_id(isert_conn->conn_cm_id); +- + ib_dereg_mr(isert_conn->conn_mr); + ib_dealloc_pd(isert_conn->conn_pd); + +@@ -669,9 +692,19 @@ isert_connect_release(struct isert_conn *isert_conn) + static void + isert_connected_handler(struct rdma_cm_id *cma_id) + { +- struct isert_conn *isert_conn = cma_id->context; ++ struct isert_conn *isert_conn = cma_id->qp->qp_context; ++ ++ pr_info("conn %p\n", isert_conn); + +- kref_get(&isert_conn->conn_kref); ++ if (!kref_get_unless_zero(&isert_conn->conn_kref)) { ++ pr_warn("conn %p connect_release is running\n", isert_conn); ++ return; ++ } ++ ++ mutex_lock(&isert_conn->conn_mutex); ++ if (isert_conn->state != ISER_CONN_FULL_FEATURE) ++ isert_conn->state = ISER_CONN_UP; ++ mutex_unlock(&isert_conn->conn_mutex); + } + + static void +@@ -692,65 +725,108 @@ isert_put_conn(struct isert_conn *isert_conn) + kref_put(&isert_conn->conn_kref, isert_release_conn_kref); + } + ++/** ++ * isert_conn_terminate() - Initiate connection termination ++ * @isert_conn: isert connection struct ++ * ++ * Notes: ++ * In case the connection state is FULL_FEATURE, move state ++ * to TEMINATING and start teardown sequence (rdma_disconnect). ++ * In case the connection state is UP, complete flush as well. ++ * ++ * This routine must be called with conn_mutex held. Thus it is ++ * safe to call multiple times. ++ */ + static void +-isert_disconnect_work(struct work_struct *work) ++isert_conn_terminate(struct isert_conn *isert_conn) + { +- struct isert_conn *isert_conn = container_of(work, +- struct isert_conn, conn_logout_work); ++ int err; + +- pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); +- mutex_lock(&isert_conn->conn_mutex); +- if (isert_conn->state == ISER_CONN_UP) ++ switch (isert_conn->state) { ++ case ISER_CONN_TERMINATING: ++ break; ++ case ISER_CONN_UP: ++ /* ++ * No flush completions will occur as we didn't ++ * get to ISER_CONN_FULL_FEATURE yet, complete ++ * to allow teardown progress. ++ */ ++ complete(&isert_conn->conn_wait_comp_err); ++ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ ++ pr_info("Terminating conn %p state %d\n", ++ isert_conn, isert_conn->state); + isert_conn->state = ISER_CONN_TERMINATING; +- +- if (isert_conn->post_recv_buf_count == 0 && +- atomic_read(&isert_conn->post_send_buf_count) == 0) { +- mutex_unlock(&isert_conn->conn_mutex); +- goto wake_up; +- } +- if (!isert_conn->conn_cm_id) { +- mutex_unlock(&isert_conn->conn_mutex); +- isert_put_conn(isert_conn); +- return; ++ err = rdma_disconnect(isert_conn->conn_cm_id); ++ if (err) ++ pr_warn("Failed rdma_disconnect isert_conn %p\n", ++ isert_conn); ++ break; ++ default: ++ pr_warn("conn %p teminating in state %d\n", ++ isert_conn, isert_conn->state); + } ++} + +- if (isert_conn->disconnect) { +- /* Send DREQ/DREP towards our initiator */ +- rdma_disconnect(isert_conn->conn_cm_id); +- } ++static int ++isert_np_cma_handler(struct isert_np *isert_np, ++ enum rdma_cm_event_type event) ++{ ++ pr_debug("isert np %p, handling event %d\n", isert_np, event); + +- mutex_unlock(&isert_conn->conn_mutex); ++ switch (event) { ++ case RDMA_CM_EVENT_DEVICE_REMOVAL: ++ isert_np->np_cm_id = NULL; ++ break; ++ case RDMA_CM_EVENT_ADDR_CHANGE: ++ isert_np->np_cm_id = isert_setup_id(isert_np); ++ if (IS_ERR(isert_np->np_cm_id)) { ++ pr_err("isert np %p setup id failed: %ld\n", ++ isert_np, PTR_ERR(isert_np->np_cm_id)); ++ isert_np->np_cm_id = NULL; ++ } ++ break; ++ default: ++ pr_err("isert np %p Unexpected event %d\n", ++ isert_np, event); ++ } + +-wake_up: +- complete(&isert_conn->conn_wait); ++ return -1; + } + + static int +-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) ++isert_disconnected_handler(struct rdma_cm_id *cma_id, ++ enum rdma_cm_event_type event) + { ++ struct isert_np *isert_np = cma_id->context; + struct isert_conn *isert_conn; + +- if (!cma_id->qp) { +- struct isert_np *isert_np = cma_id->context; ++ if (isert_np->np_cm_id == cma_id) ++ return isert_np_cma_handler(cma_id->context, event); + +- isert_np->np_cm_id = NULL; +- return -1; +- } ++ isert_conn = cma_id->qp->qp_context; + +- isert_conn = (struct isert_conn *)cma_id->context; ++ mutex_lock(&isert_conn->conn_mutex); ++ isert_conn_terminate(isert_conn); ++ mutex_unlock(&isert_conn->conn_mutex); + +- isert_conn->disconnect = disconnect; +- INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); +- schedule_work(&isert_conn->conn_logout_work); ++ pr_info("conn %p completing conn_wait\n", isert_conn); ++ complete(&isert_conn->conn_wait); + + return 0; + } + ++static void ++isert_connect_error(struct rdma_cm_id *cma_id) ++{ ++ struct isert_conn *isert_conn = cma_id->qp->qp_context; ++ ++ isert_put_conn(isert_conn); ++} ++ + static int + isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + { + int ret = 0; +- bool disconnect = false; + + pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", + event->event, event->status, cma_id->context, cma_id); +@@ -768,11 +844,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ + case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ +- disconnect = true; + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ +- ret = isert_disconnected_handler(cma_id, disconnect); ++ ret = isert_disconnected_handler(cma_id, event->event); + break; ++ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ ++ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ + case RDMA_CM_EVENT_CONNECT_ERROR: ++ isert_connect_error(cma_id); ++ break; + default: + pr_err("Unhandled RDMA CMA event: %d\n", event->event); + break; +@@ -906,7 +985,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, + * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. + */ + mutex_lock(&isert_conn->conn_mutex); +- if (coalesce && isert_conn->state == ISER_CONN_UP && ++ if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE && + ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { + tx_desc->llnode_active = true; + llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); +@@ -1003,7 +1082,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, + if (ret) + return ret; + +- isert_conn->state = ISER_CONN_UP; ++ /* Now we are in FULL_FEATURE phase */ ++ mutex_lock(&isert_conn->conn_mutex); ++ isert_conn->state = ISER_CONN_FULL_FEATURE; ++ mutex_unlock(&isert_conn->conn_mutex); + goto post_send; + } + +@@ -1020,18 +1102,17 @@ post_send: + } + + static void +-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, +- struct isert_conn *isert_conn) ++isert_rx_login_req(struct isert_conn *isert_conn) + { ++ struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf; ++ int rx_buflen = isert_conn->login_req_len; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_login *login = conn->conn_login; + int size; + +- if (!login) { +- pr_err("conn->conn_login is NULL\n"); +- dump_stack(); +- return; +- } ++ pr_info("conn %p\n", isert_conn); ++ ++ WARN_ON_ONCE(!login); + + if (login->first_request) { + struct iscsi_login_req *login_req = +@@ -1394,11 +1475,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, + hdr->opcode, hdr->itt, hdr->flags, + (int)(xfer_len - ISER_HEADERS_LEN)); + +- if ((char *)desc == isert_conn->login_req_buf) +- isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, +- isert_conn); +- else ++ if ((char *)desc == isert_conn->login_req_buf) { ++ isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN; ++ if (isert_conn->conn) { ++ struct iscsi_login *login = isert_conn->conn->conn_login; ++ ++ if (login && !login->first_request) ++ isert_rx_login_req(isert_conn); ++ } ++ mutex_lock(&isert_conn->conn_mutex); ++ complete(&isert_conn->login_req_comp); ++ mutex_unlock(&isert_conn->conn_mutex); ++ } else { + isert_rx_do_work(desc, isert_conn); ++ } + + ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, + DMA_FROM_DEVICE); +@@ -1799,7 +1889,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn) + msleep(3000); + + mutex_lock(&isert_conn->conn_mutex); +- isert_conn->state = ISER_CONN_DOWN; ++ isert_conn_terminate(isert_conn); + mutex_unlock(&isert_conn->conn_mutex); + + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); +@@ -2579,13 +2669,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) + return ret; + } + ++struct rdma_cm_id * ++isert_setup_id(struct isert_np *isert_np) ++{ ++ struct iscsi_np *np = isert_np->np; ++ struct rdma_cm_id *id; ++ struct sockaddr *sa; ++ int ret; ++ ++ sa = (struct sockaddr *)&np->np_sockaddr; ++ pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); ++ ++ id = rdma_create_id(isert_cma_handler, isert_np, ++ RDMA_PS_TCP, IB_QPT_RC); ++ if (IS_ERR(id)) { ++ pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); ++ ret = PTR_ERR(id); ++ goto out; ++ } ++ pr_debug("id %p context %p\n", id, id->context); ++ ++ ret = rdma_bind_addr(id, sa); ++ if (ret) { ++ pr_err("rdma_bind_addr() failed: %d\n", ret); ++ goto out_id; ++ } ++ ++ ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG); ++ if (ret) { ++ pr_err("rdma_listen() failed: %d\n", ret); ++ goto out_id; ++ } ++ ++ return id; ++out_id: ++ rdma_destroy_id(id); ++out: ++ return ERR_PTR(ret); ++} ++ + static int + isert_setup_np(struct iscsi_np *np, + struct __kernel_sockaddr_storage *ksockaddr) + { + struct isert_np *isert_np; + struct rdma_cm_id *isert_lid; +- struct sockaddr *sa; + int ret; + + isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); +@@ -2597,9 +2725,8 @@ isert_setup_np(struct iscsi_np *np, + mutex_init(&isert_np->np_accept_mutex); + INIT_LIST_HEAD(&isert_np->np_accept_list); + init_completion(&isert_np->np_login_comp); ++ isert_np->np = np; + +- sa = (struct sockaddr *)ksockaddr; +- pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa); + /* + * Setup the np->np_sockaddr from the passed sockaddr setup + * in iscsi_target_configfs.c code.. +@@ -2607,37 +2734,20 @@ isert_setup_np(struct iscsi_np *np, + memcpy(&np->np_sockaddr, ksockaddr, + sizeof(struct __kernel_sockaddr_storage)); + +- isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, +- IB_QPT_RC); ++ isert_lid = isert_setup_id(isert_np); + if (IS_ERR(isert_lid)) { +- pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n", +- PTR_ERR(isert_lid)); + ret = PTR_ERR(isert_lid); + goto out; + } + +- ret = rdma_bind_addr(isert_lid, sa); +- if (ret) { +- pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret); +- goto out_lid; +- } +- +- ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG); +- if (ret) { +- pr_err("rdma_listen() for isert_lid failed: %d\n", ret); +- goto out_lid; +- } +- + isert_np->np_cm_id = isert_lid; + np->np_context = isert_np; +- pr_debug("Setup isert_lid->context: %p\n", isert_lid->context); + + return 0; + +-out_lid: +- rdma_destroy_id(isert_lid); + out: + kfree(isert_np); ++ + return ret; + } + +@@ -2673,7 +2783,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + int ret; + +- pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); ++ pr_info("before login_req comp conn: %p\n", isert_conn); ++ ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); ++ if (ret) { ++ pr_err("isert_conn %p interrupted before got login req\n", ++ isert_conn); ++ return ret; ++ } ++ reinit_completion(&isert_conn->login_req_comp); ++ + /* + * For login requests after the first PDU, isert_rx_login_req() will + * kick schedule_delayed_work(&conn->login_work) as the packet is +@@ -2683,11 +2801,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) + if (!login->first_request) + return 0; + ++ isert_rx_login_req(isert_conn); ++ ++ pr_info("before conn_login_comp conn: %p\n", conn); + ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); + if (ret) + return ret; + +- pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); ++ pr_info("processing login->req: %p\n", login->req); ++ + return 0; + } + +@@ -2765,17 +2887,10 @@ accept_wait: + isert_conn->conn = conn; + max_accept = 0; + +- ret = isert_rdma_post_recvl(isert_conn); +- if (ret) +- return ret; +- +- ret = isert_rdma_accept(isert_conn); +- if (ret) +- return ret; +- + isert_set_conn_info(np, conn, isert_conn); + +- pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); ++ pr_debug("Processing isert_conn: %p\n", isert_conn); ++ + return 0; + } + +@@ -2791,6 +2906,24 @@ isert_free_np(struct iscsi_np *np) + kfree(isert_np); + } + ++static void isert_release_work(struct work_struct *work) ++{ ++ struct isert_conn *isert_conn = container_of(work, ++ struct isert_conn, ++ release_work); ++ ++ pr_info("Starting release conn %p\n", isert_conn); ++ ++ wait_for_completion(&isert_conn->conn_wait); ++ ++ mutex_lock(&isert_conn->conn_mutex); ++ isert_conn->state = ISER_CONN_DOWN; ++ mutex_unlock(&isert_conn->conn_mutex); ++ ++ pr_info("Destroying conn %p\n", isert_conn); ++ isert_put_conn(isert_conn); ++} ++ + static void isert_wait_conn(struct iscsi_conn *conn) + { + struct isert_conn *isert_conn = conn->context; +@@ -2798,10 +2931,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) + pr_debug("isert_wait_conn: Starting \n"); + + mutex_lock(&isert_conn->conn_mutex); +- if (isert_conn->conn_cm_id) { +- pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); +- rdma_disconnect(isert_conn->conn_cm_id); +- } + /* + * Only wait for conn_wait_comp_err if the isert_conn made it + * into full feature phase.. +@@ -2810,14 +2939,13 @@ static void isert_wait_conn(struct iscsi_conn *conn) + mutex_unlock(&isert_conn->conn_mutex); + return; + } +- if (isert_conn->state == ISER_CONN_UP) +- isert_conn->state = ISER_CONN_TERMINATING; ++ isert_conn_terminate(isert_conn); + mutex_unlock(&isert_conn->conn_mutex); + + wait_for_completion(&isert_conn->conn_wait_comp_err); + +- wait_for_completion(&isert_conn->conn_wait); +- isert_put_conn(isert_conn); ++ INIT_WORK(&isert_conn->release_work, isert_release_work); ++ queue_work(isert_release_wq, &isert_conn->release_work); + } + + static void isert_free_conn(struct iscsi_conn *conn) +@@ -2863,10 +2991,21 @@ static int __init isert_init(void) + goto destroy_rx_wq; + } + ++ isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, ++ WQ_UNBOUND_MAX_ACTIVE); ++ if (!isert_release_wq) { ++ pr_err("Unable to allocate isert_release_wq\n"); ++ ret = -ENOMEM; ++ goto destroy_comp_wq; ++ } ++ + iscsit_register_transport(&iser_target_transport); +- pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); ++ pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); ++ + return 0; + ++destroy_comp_wq: ++ destroy_workqueue(isert_comp_wq); + destroy_rx_wq: + destroy_workqueue(isert_rx_wq); + return ret; +@@ -2875,6 +3014,7 @@ destroy_rx_wq: + static void __exit isert_exit(void) + { + flush_scheduled_work(); ++ destroy_workqueue(isert_release_wq); + destroy_workqueue(isert_comp_wq); + destroy_workqueue(isert_rx_wq); + iscsit_unregister_transport(&iser_target_transport); +diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h +index cbecaab..1178c5b 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.h ++++ b/drivers/infiniband/ulp/isert/ib_isert.h +@@ -23,6 +23,7 @@ enum iser_ib_op_code { + enum iser_conn_state { + ISER_CONN_INIT, + ISER_CONN_UP, ++ ISER_CONN_FULL_FEATURE, + ISER_CONN_TERMINATING, + ISER_CONN_DOWN, + }; +@@ -102,6 +103,7 @@ struct isert_conn { + char *login_req_buf; + char *login_rsp_buf; + u64 login_req_dma; ++ int login_req_len; + u64 login_rsp_dma; + unsigned int conn_rx_desc_head; + struct iser_rx_desc *conn_rx_descs; +@@ -109,13 +111,13 @@ struct isert_conn { + struct iscsi_conn *conn; + struct list_head conn_accept_node; + struct completion conn_login_comp; ++ struct completion login_req_comp; + struct iser_tx_desc conn_login_tx_desc; + struct rdma_cm_id *conn_cm_id; + struct ib_pd *conn_pd; + struct ib_mr *conn_mr; + struct ib_qp *conn_qp; + struct isert_device *conn_device; +- struct work_struct conn_logout_work; + struct mutex conn_mutex; + struct completion conn_wait; + struct completion conn_wait_comp_err; +@@ -124,10 +126,10 @@ struct isert_conn { + int conn_fr_pool_size; + /* lock to protect fastreg pool */ + spinlock_t conn_lock; ++ struct work_struct release_work; + #define ISERT_COMP_BATCH_COUNT 8 + int conn_comp_batch; + struct llist_head conn_comp_llist; +- bool disconnect; + }; + + #define ISERT_MAX_CQ 64 +@@ -158,6 +160,7 @@ struct isert_device { + }; + + struct isert_np { ++ struct iscsi_np *np; + struct semaphore np_sem; + struct rdma_cm_id *np_cm_id; + struct mutex np_accept_mutex; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 8fca488f..c43c46f 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -408,6 +408,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + }, + }, + { ++ /* Acer Aspire 7738 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), ++ }, ++ }, ++ { + /* Gericom Bellagio */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), +@@ -721,6 +728,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { + { } + }; + ++/* ++ * Some laptops need keyboard reset before probing for the trackpad to get ++ * it detected, initialised & finally work. ++ */ ++static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { ++ { ++ /* Gigabyte P35 v2 - Elantech touchpad */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), ++ }, ++ }, ++ { ++ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "X3"), ++ }, ++ }, ++ { ++ /* Gigabyte P34 - Elantech touchpad */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "P34"), ++ }, ++ }, ++ { } ++}; ++ + #endif /* CONFIG_X86 */ + + #ifdef CONFIG_PNP +@@ -1016,6 +1052,9 @@ static int __init i8042_platform_init(void) + if (dmi_check_system(i8042_dmi_dritek_table)) + i8042_dritek = true; + ++ if (dmi_check_system(i8042_dmi_kbdreset_table)) ++ i8042_kbdreset = true; ++ + /* + * A20 was already enabled during early kernel init. But some buggy + * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 3807c3e..eb796ff 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -67,6 +67,10 @@ static bool i8042_notimeout; + module_param_named(notimeout, i8042_notimeout, bool, 0); + MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); + ++static bool i8042_kbdreset; ++module_param_named(kbdreset, i8042_kbdreset, bool, 0); ++MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port"); ++ + #ifdef CONFIG_X86 + static bool i8042_dritek; + module_param_named(dritek, i8042_dritek, bool, 0); +@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void) + return -1; + + /* ++ * Reset keyboard (needed on some laptops to successfully detect ++ * touchpad, e.g., some Gigabyte laptop models with Elantech ++ * touchpads). ++ */ ++ if (i8042_kbdreset) { ++ pr_warn("Attempting to reset device connected to KBD port\n"); ++ i8042_kbd_write(NULL, (unsigned char) 0xff); ++ } ++ ++/* + * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and + * used it for a PCI card or somethig else. + */ +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c +index 5f9c2a6..fbcb622 100644 +--- a/drivers/md/bcache/btree.c ++++ b/drivers/md/bcache/btree.c +@@ -199,7 +199,7 @@ void bch_btree_node_read_done(struct btree *b) + struct bset *i = btree_bset_first(b); + struct btree_iter *iter; + +- iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT); ++ iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); + iter->size = b->c->sb.bucket_size / b->c->sb.block_size; + iter->used = 0; + +diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c +index 2335529..ab5d9a3 100644 +--- a/drivers/media/i2c/smiapp-pll.c ++++ b/drivers/media/i2c/smiapp-pll.c +@@ -67,7 +67,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll) + { + dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div); + dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier); +- if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) { ++ if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) { + dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div); + dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div); + } +@@ -77,7 +77,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll) + dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz); + dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz); + dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz); +- if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) { ++ if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) { + dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n", + pll->op_sys_clk_freq_hz); + dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n", +diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c +index 7026ab0..873d062 100644 +--- a/drivers/media/i2c/smiapp/smiapp-core.c ++++ b/drivers/media/i2c/smiapp/smiapp-core.c +@@ -2624,7 +2624,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev) + pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE; + pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]; + ++ mutex_lock(&sensor->mutex); + rval = smiapp_update_mode(sensor); ++ mutex_unlock(&sensor->mutex); + if (rval) { + dev_err(&client->dev, "update mode failed\n"); + goto out_nvm_release; +diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c +index dd32dec..1d4b110 100644 +--- a/drivers/media/usb/au0828/au0828-cards.c ++++ b/drivers/media/usb/au0828/au0828-cards.c +@@ -36,6 +36,11 @@ static void hvr950q_cs5340_audio(void *priv, int enable) + au0828_clear(dev, REG_000, 0x10); + } + ++/* ++ * WARNING: There's a quirks table at sound/usb/quirks-table.h ++ * that should also be updated every time a new device with V4L2 support ++ * is added here. ++ */ + struct au0828_board au0828_boards[] = { + [AU0828_BOARD_UNKNOWN] = { + .name = "Unknown board", +diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c +index af176b6..e6d3561 100644 +--- a/drivers/media/usb/dvb-usb/af9005.c ++++ b/drivers/media/usb/dvb-usb/af9005.c +@@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void) + err("usb_register failed. (%d)", result); + return result; + } ++#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE) ++ /* FIXME: convert to todays kernel IR infrastructure */ + rc_decode = symbol_request(af9005_rc_decode); + rc_keys = symbol_request(rc_map_af9005_table); + rc_keys_size = symbol_request(rc_map_af9005_table_size); ++#endif + if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) { + err("af9005_rc_decode function not found, disabling remote"); + af9005_properties.rc.legacy.rc_query = NULL; +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index 753ad4c..4531441 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -1603,12 +1603,12 @@ static void uvc_delete(struct uvc_device *dev) + { + struct list_head *p, *n; + +- usb_put_intf(dev->intf); +- usb_put_dev(dev->udev); +- + uvc_status_cleanup(dev); + uvc_ctrl_cleanup_device(dev); + ++ usb_put_intf(dev->intf); ++ usb_put_dev(dev->udev); ++ + if (dev->vdev.dev) + v4l2_device_unregister(&dev->vdev); + #ifdef CONFIG_MEDIA_CONTROLLER +diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c +index e77d110..4e65b35 100644 +--- a/drivers/net/can/usb/kvaser_usb.c ++++ b/drivers/net/can/usb/kvaser_usb.c +@@ -1237,6 +1237,9 @@ static int kvaser_usb_close(struct net_device *netdev) + if (err) + netdev_warn(netdev, "Cannot stop device, error %d\n", err); + ++ /* reset tx contexts */ ++ kvaser_usb_unlink_tx_urbs(priv); ++ + priv->can.state = CAN_STATE_STOPPED; + close_candev(priv->netdev); + +@@ -1285,12 +1288,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, + if (!urb) { + netdev_err(netdev, "No memory left for URBs\n"); + stats->tx_dropped++; +- goto nourbmem; ++ dev_kfree_skb(skb); ++ return NETDEV_TX_OK; + } + + buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); + if (!buf) { + stats->tx_dropped++; ++ dev_kfree_skb(skb); + goto nobufmem; + } + +@@ -1325,6 +1330,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, + } + } + ++ /* This should never happen; it implies a flow control bug */ + if (!context) { + netdev_warn(netdev, "cannot find free context\n"); + ret = NETDEV_TX_BUSY; +@@ -1355,9 +1361,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, + if (unlikely(err)) { + can_free_echo_skb(netdev, context->echo_index); + +- skb = NULL; /* set to NULL to avoid double free in +- * dev_kfree_skb(skb) */ +- + atomic_dec(&priv->active_tx_urbs); + usb_unanchor_urb(urb); + +@@ -1379,8 +1382,6 @@ releasebuf: + kfree(buf); + nobufmem: + usb_free_urb(urb); +-nourbmem: +- dev_kfree_skb(skb); + return ret; + } + +@@ -1492,6 +1493,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf, + struct kvaser_usb_net_priv *priv; + int i, err; + ++ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); ++ if (err) ++ return err; ++ + netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); + if (!netdev) { + dev_err(&intf->dev, "Cannot alloc candev\n"); +@@ -1595,9 +1600,6 @@ static int kvaser_usb_probe(struct usb_interface *intf, + + usb_set_intfdata(intf, dev); + +- for (i = 0; i < MAX_NET_DEVICES; i++) +- kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i); +- + err = kvaser_usb_get_software_info(dev); + if (err) { + dev_err(&intf->dev, +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c +index 380d249..3e1d7d2 100644 +--- a/drivers/net/ethernet/atheros/alx/main.c ++++ b/drivers/net/ethernet/atheros/alx/main.c +@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx) + schedule_work(&alx->reset_wk); + } + +-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) ++static int alx_clean_rx_irq(struct alx_priv *alx, int budget) + { + struct alx_rx_queue *rxq = &alx->rxq; + struct alx_rrd *rrd; + struct alx_buffer *rxb; + struct sk_buff *skb; + u16 length, rfd_cleaned = 0; ++ int work = 0; + +- while (budget > 0) { ++ while (work < budget) { + rrd = &rxq->rrd[rxq->rrd_read_idx]; + if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) + break; +@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) + ALX_GET_FIELD(le32_to_cpu(rrd->word0), + RRD_NOR) != 1) { + alx_schedule_reset(alx); +- return 0; ++ return work; + } + + rxb = &rxq->bufs[rxq->read_idx]; +@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) + } + + napi_gro_receive(&alx->napi, skb); +- budget--; ++ work++; + + next_pkt: + if (++rxq->read_idx == alx->rx_ringsz) +@@ -258,21 +259,22 @@ next_pkt: + if (rfd_cleaned) + alx_refill_rx_ring(alx, GFP_ATOMIC); + +- return budget > 0; ++ return work; + } + + static int alx_poll(struct napi_struct *napi, int budget) + { + struct alx_priv *alx = container_of(napi, struct alx_priv, napi); + struct alx_hw *hw = &alx->hw; +- bool complete = true; + unsigned long flags; ++ bool tx_complete; ++ int work; + +- complete = alx_clean_tx_irq(alx) && +- alx_clean_rx_irq(alx, budget); ++ tx_complete = alx_clean_tx_irq(alx); ++ work = alx_clean_rx_irq(alx, budget); + +- if (!complete) +- return 1; ++ if (!tx_complete || work == budget) ++ return budget; + + napi_complete(&alx->napi); + +@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget) + + alx_post_write(hw); + +- return 0; ++ return work; + } + + static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 086eac5..8206113 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -17731,23 +17731,6 @@ static int tg3_init_one(struct pci_dev *pdev, + goto err_out_apeunmap; + } + +- /* +- * Reset chip in case UNDI or EFI driver did not shutdown +- * DMA self test will enable WDMAC and we'll see (spurious) +- * pending DMA on the PCI bus at that point. +- */ +- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || +- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { +- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); +- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); +- } +- +- err = tg3_test_dma(tp); +- if (err) { +- dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); +- goto err_out_apeunmap; +- } +- + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; +@@ -17792,6 +17775,23 @@ static int tg3_init_one(struct pci_dev *pdev, + sndmbx += 0xc; + } + ++ /* ++ * Reset chip in case UNDI or EFI driver did not shutdown ++ * DMA self test will enable WDMAC and we'll see (spurious) ++ * pending DMA on the PCI bus at that point. ++ */ ++ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || ++ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { ++ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ } ++ ++ err = tg3_test_dma(tp); ++ if (err) { ++ dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); ++ goto err_out_apeunmap; ++ } ++ + tg3_init_coal(tp); + + pci_set_drvdata(pdev, dev); +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index b740bfc..ff9b423 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -1044,10 +1044,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); + } + +- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { +- skb->csum = htons(checksum); +- skb->ip_summed = CHECKSUM_COMPLETE; +- } ++ /* Hardware does not provide whole packet checksum. It only ++ * provides pseudo checksum. Since hw validates the packet ++ * checksum but not provide us the checksum value. use ++ * CHECSUM_UNNECESSARY. ++ */ ++ if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok && ++ ipv4_csum_ok) ++ skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (vlan_stripped) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c +index 921b9df..316650c 100644 +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -596,7 +596,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) + + /* Clear all mcast from ALE */ + cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << +- priv->host_port); ++ priv->host_port, -1); + + /* Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); +@@ -620,6 +620,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) + { + struct cpsw_priv *priv = netdev_priv(ndev); ++ int vid; ++ ++ if (priv->data.dual_emac) ++ vid = priv->slaves[priv->emac_port].port_vlan; ++ else ++ vid = priv->data.default_vlan; + + if (ndev->flags & IFF_PROMISC) { + /* Enable promiscuous mode */ +@@ -631,7 +637,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) + } + + /* Clear all mcast from ALE */ +- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); ++ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, ++ vid); + + if (!netdev_mc_empty(ndev)) { + struct netdev_hw_addr *ha; +@@ -716,6 +723,14 @@ static void cpsw_rx_handler(void *token, int len, int status) + static irqreturn_t cpsw_interrupt(int irq, void *dev_id) + { + struct cpsw_priv *priv = dev_id; ++ int value = irq - priv->irqs_table[0]; ++ ++ /* NOTICE: Ending IRQ here. The trick with the 'value' variable above ++ * is to make sure we will always write the correct value to the EOI ++ * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 ++ * for TX Interrupt and 3 for MISC Interrupt. ++ */ ++ cpdma_ctlr_eoi(priv->dma, value); + + cpsw_intr_disable(priv); + if (priv->irq_enabled == true) { +@@ -745,8 +760,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) + int num_tx, num_rx; + + num_tx = cpdma_chan_process(priv->txch, 128); +- if (num_tx) +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + + num_rx = cpdma_chan_process(priv->rxch, budget); + if (num_rx < budget) { +@@ -754,7 +767,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) + + napi_complete(napi); + cpsw_intr_enable(priv); +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + prim_cpsw = cpsw_get_slave_priv(priv, 0); + if (prim_cpsw->irq_enabled == false) { + prim_cpsw->irq_enabled = true; +@@ -1265,8 +1277,6 @@ static int cpsw_ndo_open(struct net_device *ndev) + napi_enable(&priv->napi); + cpdma_ctlr_start(priv->dma); + cpsw_intr_enable(priv); +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = true; +@@ -1512,9 +1522,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) + cpdma_chan_start(priv->txch); + cpdma_ctlr_int_ctrl(priv->dma, true); + cpsw_intr_enable(priv); +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); +- + } + + static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) +@@ -1560,9 +1567,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) + cpsw_interrupt(ndev->irq, priv); + cpdma_ctlr_int_ctrl(priv->dma, true); + cpsw_intr_enable(priv); +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); +- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); +- + } + #endif + +diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c +index 7f89306..4eceb7e 100644 +--- a/drivers/net/ethernet/ti/cpsw_ale.c ++++ b/drivers/net/ethernet/ti/cpsw_ale.c +@@ -236,7 +236,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + } + +-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) ++int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) + { + u32 ale_entry[ALE_ENTRY_WORDS]; + int ret, idx; +@@ -247,6 +247,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) + if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) + continue; + ++ /* if vid passed is -1 then remove all multicast entry from ++ * the table irrespective of vlan id, if a valid vlan id is ++ * passed then remove only multicast added to that vlan id. ++ * if vlan id doesn't match then move on to next entry. ++ */ ++ if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid) ++ continue; ++ + if (cpsw_ale_get_mcast(ale_entry)) { + u8 addr[6]; + +diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h +index de409c3..e701358 100644 +--- a/drivers/net/ethernet/ti/cpsw_ale.h ++++ b/drivers/net/ethernet/ti/cpsw_ale.h +@@ -88,7 +88,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale); + + int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); + int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); +-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); ++int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid); + int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); + int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 979fe43..32efe83 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind) + static void team_notify_peers_work(struct work_struct *work) + { + struct team *team; ++ int val; + + team = container_of(work, struct team, notify_peers.dw.work); + +@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work) + schedule_delayed_work(&team->notify_peers.dw, 0); + return; + } ++ val = atomic_dec_if_positive(&team->notify_peers.count_pending); ++ if (val < 0) { ++ rtnl_unlock(); ++ return; ++ } + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); + rtnl_unlock(); +- if (!atomic_dec_and_test(&team->notify_peers.count_pending)) ++ if (val) + schedule_delayed_work(&team->notify_peers.dw, + msecs_to_jiffies(team->notify_peers.interval)); + } +@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team) + static void team_mcast_rejoin_work(struct work_struct *work) + { + struct team *team; ++ int val; + + team = container_of(work, struct team, mcast_rejoin.dw.work); + +@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work) + schedule_delayed_work(&team->mcast_rejoin.dw, 0); + return; + } ++ val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); ++ if (val < 0) { ++ rtnl_unlock(); ++ return; ++ } + call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); + rtnl_unlock(); +- if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) ++ if (val) + schedule_delayed_work(&team->mcast_rejoin.dw, + msecs_to_jiffies(team->mcast_rejoin.interval)); + } +diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c +index 3dc9344..07fbcb0 100644 +--- a/drivers/platform/x86/hp_accel.c ++++ b/drivers/platform/x86/hp_accel.c +@@ -237,6 +237,7 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { + AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap), + AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap), + AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted), ++ AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted), + { NULL, } + /* Laptop models without axis info (yet): + * "NC6910" "HP Compaq 6910" +diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c +index bb86494..19915c5 100644 +--- a/drivers/s390/char/con3215.c ++++ b/drivers/s390/char/con3215.c +@@ -288,12 +288,16 @@ static void raw3215_timeout(unsigned long __data) + unsigned long flags; + + spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); +- if (raw->flags & RAW3215_TIMER_RUNS) { +- del_timer(&raw->timer); +- raw->flags &= ~RAW3215_TIMER_RUNS; +- if (!(raw->port.flags & ASYNC_SUSPENDED)) { +- raw3215_mk_write_req(raw); +- raw3215_start_io(raw); ++ raw->flags &= ~RAW3215_TIMER_RUNS; ++ if (!(raw->port.flags & ASYNC_SUSPENDED)) { ++ raw3215_mk_write_req(raw); ++ raw3215_start_io(raw); ++ if ((raw->queued_read || raw->queued_write) && ++ !(raw->flags & RAW3215_WORKING) && ++ !(raw->flags & RAW3215_TIMER_RUNS)) { ++ raw->timer.expires = RAW3215_TIMEOUT + jiffies; ++ add_timer(&raw->timer); ++ raw->flags |= RAW3215_TIMER_RUNS; + } + } + spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); +@@ -317,17 +321,15 @@ static inline void raw3215_try_io(struct raw3215_info *raw) + (raw->flags & RAW3215_FLUSHING)) { + /* execute write requests bigger than minimum size */ + raw3215_start_io(raw); +- if (raw->flags & RAW3215_TIMER_RUNS) { +- del_timer(&raw->timer); +- raw->flags &= ~RAW3215_TIMER_RUNS; +- } +- } else if (!(raw->flags & RAW3215_TIMER_RUNS)) { +- /* delay small writes */ +- raw->timer.expires = RAW3215_TIMEOUT + jiffies; +- add_timer(&raw->timer); +- raw->flags |= RAW3215_TIMER_RUNS; + } + } ++ if ((raw->queued_read || raw->queued_write) && ++ !(raw->flags & RAW3215_WORKING) && ++ !(raw->flags & RAW3215_TIMER_RUNS)) { ++ raw->timer.expires = RAW3215_TIMEOUT + jiffies; ++ add_timer(&raw->timer); ++ raw->flags |= RAW3215_TIMER_RUNS; ++ } + } + + /* +@@ -1027,12 +1029,26 @@ static int tty3215_write(struct tty_struct * tty, + const unsigned char *buf, int count) + { + struct raw3215_info *raw; ++ int i, written; + + if (!tty) + return 0; + raw = (struct raw3215_info *) tty->driver_data; +- raw3215_write(raw, buf, count); +- return count; ++ written = count; ++ while (count > 0) { ++ for (i = 0; i < count; i++) ++ if (buf[i] == '\t' || buf[i] == '\n') ++ break; ++ raw3215_write(raw, buf, i); ++ count -= i; ++ buf += i; ++ if (count > 0) { ++ raw3215_putchar(raw, *buf); ++ count--; ++ buf++; ++ } ++ } ++ return written; + } + + /* +@@ -1180,7 +1196,7 @@ static int __init tty3215_init(void) + driver->subtype = SYSTEM_TYPE_TTY; + driver->init_termios = tty_std_termios; + driver->init_termios.c_iflag = IGNBRK | IGNPAR; +- driver->init_termios.c_oflag = ONLCR | XTABS; ++ driver->init_termios.c_oflag = ONLCR; + driver->init_termios.c_lflag = ISIG; + driver->flags = TTY_DRIVER_REAL_RAW; + tty_set_operations(driver, &tty3215_ops); +diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c +index 410f4a3..72f9c55 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c +@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, + &mpt2sas_phy->remote_identify); + _transport_add_phy_to_an_existing_port(ioc, sas_node, + mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); +- } else { ++ } else + memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct + sas_identify)); +- _transport_del_phy_from_an_existing_port(ioc, sas_node, +- mpt2sas_phy); +- } + + if (mpt2sas_phy->phy) + mpt2sas_phy->phy->negotiated_linkrate = +diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c +index 65170cb..55aa597 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c +@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + &mpt3sas_phy->remote_identify); + _transport_add_phy_to_an_existing_port(ioc, sas_node, + mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); +- } else { ++ } else + memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct + sas_identify)); +- _transport_del_phy_from_an_existing_port(ioc, sas_node, +- mpt3sas_phy); +- } + + if (mpt3sas_phy->phy) + mpt3sas_phy->phy->negotiated_linkrate = +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c +index c1d04d4..262ab83 100644 +--- a/drivers/scsi/scsi_devinfo.c ++++ b/drivers/scsi/scsi_devinfo.c +@@ -211,6 +211,7 @@ static struct { + {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN}, + {"MegaRAID", "LD", NULL, BLIST_FORCELUN}, + {"MICROP", "4110", NULL, BLIST_NOTQ}, ++ {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC}, + {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2}, + {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN}, + {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index ed0f899..86b0515 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -1690,13 +1690,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) + if (ret == -EAGAIN) { + /* no more space */ + +- if (cmd_request->bounce_sgl_count) { ++ if (cmd_request->bounce_sgl_count) + destroy_bounce_buffer(cmd_request->bounce_sgl, + cmd_request->bounce_sgl_count); + +- ret = SCSI_MLQUEUE_DEVICE_BUSY; +- goto queue_error; +- } ++ ret = SCSI_MLQUEUE_DEVICE_BUSY; ++ goto queue_error; + } + + return 0; +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index d509aa7..c5d3811 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -1186,6 +1186,9 @@ old_sess_out: + conn->sock = NULL; + } + ++ if (conn->conn_transport->iscsit_wait_conn) ++ conn->conn_transport->iscsit_wait_conn(conn); ++ + if (conn->conn_transport->iscsit_free_conn) + conn->conn_transport->iscsit_free_conn(conn); + +diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c +index ab77f80..1e406af 100644 +--- a/drivers/target/iscsi/iscsi_target_util.c ++++ b/drivers/target/iscsi/iscsi_target_util.c +@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data( + struct iscsi_conn *conn, + struct iscsi_data_count *count) + { +- int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; ++ int ret, iov_len; + struct kvec *iov_p; + struct msghdr msg; + + if (!conn || !conn->sock || !conn->conn_ops) + return -1; + +- if (data <= 0) { +- pr_err("Data length is: %d\n", data); ++ if (count->data_length <= 0) { ++ pr_err("Data length is: %d\n", count->data_length); + return -1; + } + +@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data( + iov_p = count->iov; + iov_len = count->iov_count; + +- while (total_tx < data) { +- tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, +- (data - total_tx)); +- if (tx_loop <= 0) { +- pr_debug("tx_loop: %d total_tx %d\n", +- tx_loop, total_tx); +- return tx_loop; +- } +- total_tx += tx_loop; +- pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", +- tx_loop, total_tx, data); ++ ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, ++ count->data_length); ++ if (ret != count->data_length) { ++ pr_err("Unexpected ret: %d send data %d\n", ++ ret, count->data_length); ++ return -EPIPE; + } ++ pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); + +- return total_tx; ++ return ret; + } + + int rx_data( +diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c +index fadad7c..67c802c 100644 +--- a/drivers/target/loopback/tcm_loop.c ++++ b/drivers/target/loopback/tcm_loop.c +@@ -153,18 +153,11 @@ static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag) + /* + * Locate the SAM Task Attr from struct scsi_cmnd * + */ +-static int tcm_loop_sam_attr(struct scsi_cmnd *sc) +-{ +- if (sc->device->tagged_supported) { +- switch (sc->tag) { +- case HEAD_OF_QUEUE_TAG: +- return MSG_HEAD_TAG; +- case ORDERED_QUEUE_TAG: +- return MSG_ORDERED_TAG; +- default: +- break; +- } +- } ++static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag) ++{ ++ if (sc->device->tagged_supported && ++ sc->device->ordered_tags && tag >= 0) ++ return MSG_ORDERED_TAG; + + return MSG_SIMPLE_TAG; + } +@@ -197,7 +190,7 @@ static void tcm_loop_submission_work(struct work_struct *work) + set_host_byte(sc, DID_TRANSPORT_DISRUPTED); + goto out_done; + } +- tl_nexus = tl_hba->tl_nexus; ++ tl_nexus = tl_tpg->tl_nexus; + if (!tl_nexus) { + scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" + " does not exist\n"); +@@ -214,7 +207,7 @@ static void tcm_loop_submission_work(struct work_struct *work) + } + rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, + &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, +- scsi_bufflen(sc), tcm_loop_sam_attr(sc), ++ scsi_bufflen(sc), tcm_loop_sam_attr(sc, tl_cmd->sc_cmd_tag), + sc->sc_data_direction, 0, + scsi_sglist(sc), scsi_sg_count(sc), + sgl_bidi, sgl_bidi_count, +@@ -252,7 +245,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) + } + + tl_cmd->sc = sc; +- tl_cmd->sc_cmd_tag = sc->tag; ++ tl_cmd->sc_cmd_tag = sc->request->tag; + INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); + queue_work(tcm_loop_workqueue, &tl_cmd->work); + return 0; +@@ -263,16 +256,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) + * to struct scsi_device + */ + static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, +- struct tcm_loop_nexus *tl_nexus, + int lun, int task, enum tcm_tmreq_table tmr) + { + struct se_cmd *se_cmd = NULL; + struct se_session *se_sess; + struct se_portal_group *se_tpg; ++ struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_cmd *tl_cmd = NULL; + struct tcm_loop_tmr *tl_tmr = NULL; + int ret = TMR_FUNCTION_FAILED, rc; + ++ /* ++ * Locate the tl_nexus and se_sess pointers ++ */ ++ tl_nexus = tl_tpg->tl_nexus; ++ if (!tl_nexus) { ++ pr_err("Unable to perform device reset without" ++ " active I_T Nexus\n"); ++ return ret; ++ } ++ + tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); + if (!tl_cmd) { + pr_err("Unable to allocate memory for tl_cmd\n"); +@@ -288,7 +291,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, + + se_cmd = &tl_cmd->tl_se_cmd; + se_tpg = &tl_tpg->tl_se_tpg; +- se_sess = tl_nexus->se_sess; ++ se_sess = tl_tpg->tl_nexus->se_sess; + /* + * Initialize struct se_cmd descriptor from target_core_mod infrastructure + */ +@@ -333,7 +336,6 @@ release: + static int tcm_loop_abort_task(struct scsi_cmnd *sc) + { + struct tcm_loop_hba *tl_hba; +- struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_tpg *tl_tpg; + int ret = FAILED; + +@@ -341,22 +343,9 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); +- /* +- * Locate the tl_nexus and se_sess pointers +- */ +- tl_nexus = tl_hba->tl_nexus; +- if (!tl_nexus) { +- pr_err("Unable to perform device reset without" +- " active I_T Nexus\n"); +- return FAILED; +- } +- +- /* +- * Locate the tl_tpg pointer from TargetID in sc->device->id +- */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; +- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, +- sc->tag, TMR_ABORT_TASK); ++ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, ++ sc->request->tag, TMR_ABORT_TASK); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; + } + +@@ -367,7 +356,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) + static int tcm_loop_device_reset(struct scsi_cmnd *sc) + { + struct tcm_loop_hba *tl_hba; +- struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_tpg *tl_tpg; + int ret = FAILED; + +@@ -375,20 +363,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); +- /* +- * Locate the tl_nexus and se_sess pointers +- */ +- tl_nexus = tl_hba->tl_nexus; +- if (!tl_nexus) { +- pr_err("Unable to perform device reset without" +- " active I_T Nexus\n"); +- return FAILED; +- } +- /* +- * Locate the tl_tpg pointer from TargetID in sc->device->id +- */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; +- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, ++ ++ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, + 0, TMR_LUN_RESET); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; + } +@@ -995,8 +972,8 @@ static int tcm_loop_make_nexus( + struct tcm_loop_nexus *tl_nexus; + int ret = -ENOMEM; + +- if (tl_tpg->tl_hba->tl_nexus) { +- pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); ++ if (tl_tpg->tl_nexus) { ++ pr_debug("tl_tpg->tl_nexus already exists\n"); + return -EEXIST; + } + se_tpg = &tl_tpg->tl_se_tpg; +@@ -1031,7 +1008,7 @@ static int tcm_loop_make_nexus( + */ + __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, + tl_nexus->se_sess, tl_nexus); +- tl_tpg->tl_hba->tl_nexus = tl_nexus; ++ tl_tpg->tl_nexus = tl_nexus; + pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" + " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), + name); +@@ -1047,12 +1024,8 @@ static int tcm_loop_drop_nexus( + { + struct se_session *se_sess; + struct tcm_loop_nexus *tl_nexus; +- struct tcm_loop_hba *tl_hba = tpg->tl_hba; + +- if (!tl_hba) +- return -ENODEV; +- +- tl_nexus = tl_hba->tl_nexus; ++ tl_nexus = tpg->tl_nexus; + if (!tl_nexus) + return -ENODEV; + +@@ -1068,13 +1041,13 @@ static int tcm_loop_drop_nexus( + } + + pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" +- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), ++ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), + tl_nexus->se_sess->se_node_acl->initiatorname); + /* + * Release the SCSI I_T Nexus to the emulated SAS Target Port + */ + transport_deregister_session(tl_nexus->se_sess); +- tpg->tl_hba->tl_nexus = NULL; ++ tpg->tl_nexus = NULL; + kfree(tl_nexus); + return 0; + } +@@ -1090,7 +1063,7 @@ static ssize_t tcm_loop_tpg_show_nexus( + struct tcm_loop_nexus *tl_nexus; + ssize_t ret; + +- tl_nexus = tl_tpg->tl_hba->tl_nexus; ++ tl_nexus = tl_tpg->tl_nexus; + if (!tl_nexus) + return -ENODEV; + +diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h +index 54c59d0..6ae49f2 100644 +--- a/drivers/target/loopback/tcm_loop.h ++++ b/drivers/target/loopback/tcm_loop.h +@@ -27,11 +27,6 @@ struct tcm_loop_tmr { + }; + + struct tcm_loop_nexus { +- int it_nexus_active; +- /* +- * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h +- */ +- struct scsi_host *sh; + /* + * Pointer to TCM session for I_T Nexus + */ +@@ -51,6 +46,7 @@ struct tcm_loop_tpg { + atomic_t tl_tpg_port_count; + struct se_portal_group tl_se_tpg; + struct tcm_loop_hba *tl_hba; ++ struct tcm_loop_nexus *tl_nexus; + }; + + struct tcm_loop_hba { +@@ -59,7 +55,6 @@ struct tcm_loop_hba { + struct se_hba_s *se_hba; + struct se_lun *tl_hba_lun; + struct se_port *tl_hba_lun_sep; +- struct tcm_loop_nexus *tl_nexus; + struct device dev; + struct Scsi_Host *sh; + struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; +diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c +index a084325..6e75177 100644 +--- a/drivers/thermal/intel_powerclamp.c ++++ b/drivers/thermal/intel_powerclamp.c +@@ -435,7 +435,6 @@ static int clamp_thread(void *arg) + * allowed. thus jiffies are updated properly. + */ + preempt_disable(); +- tick_nohz_idle_enter(); + /* mwait until target jiffies is reached */ + while (time_before(jiffies, target_jiffies)) { + unsigned long ecx = 1; +@@ -451,7 +450,6 @@ static int clamp_thread(void *arg) + start_critical_timings(); + atomic_inc(&idle_wakeup_counter); + } +- tick_nohz_idle_exit(); + preempt_enable(); + } + del_timer_sync(&wakeup_timer); +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index d90c70c..8f6738d 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -887,8 +887,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) + + if (i == (request->num_mapped_sgs - 1) || + sg_is_last(s)) { +- if (list_is_last(&req->list, +- &dep->request_list)) ++ if (list_empty(&dep->request_list)) + last_one = true; + chain = false; + } +@@ -906,6 +905,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) + if (last_one) + break; + } ++ ++ if (last_one) ++ break; + } else { + dma = req->request.dma; + length = req->request.length; +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c +index e113fd7..c399606 100644 +--- a/drivers/usb/host/ehci-sched.c ++++ b/drivers/usb/host/ehci-sched.c +@@ -1581,6 +1581,10 @@ iso_stream_schedule ( + else + next = (now + 2 + 7) & ~0x07; /* full frame cache */ + ++ /* If needed, initialize last_iso_frame so that this URB will be seen */ ++ if (ehci->isoc_count == 0) ++ ehci->last_iso_frame = now >> 3; ++ + /* + * Use ehci->last_iso_frame as the base. There can't be any + * TDs scheduled for earlier than that. +@@ -1671,10 +1675,6 @@ iso_stream_schedule ( + urb->start_frame = start & (mod - 1); + if (!stream->highspeed) + urb->start_frame >>= 3; +- +- /* Make sure scan_isoc() sees these */ +- if (ehci->isoc_count == 0) +- ehci->last_iso_frame = now >> 3; + return status; + + fail: +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c +index 2f3aceb..f4e6b94 100644 +--- a/drivers/usb/host/pci-quirks.c ++++ b/drivers/usb/host/pci-quirks.c +@@ -571,7 +571,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev) + { + void __iomem *base; + u32 control; +- u32 fminterval; ++ u32 fminterval = 0; ++ bool no_fminterval = false; + int cnt; + + if (!mmio_resource_enabled(pdev, 0)) +@@ -581,6 +582,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev) + if (base == NULL) + return; + ++ /* ++ * ULi M5237 OHCI controller locks the whole system when accessing ++ * the OHCI_FMINTERVAL offset. ++ */ ++ if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237) ++ no_fminterval = true; ++ + control = readl(base + OHCI_CONTROL); + + /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ +@@ -619,7 +627,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev) + } + + /* software reset of the controller, preserving HcFmInterval */ +- fminterval = readl(base + OHCI_FMINTERVAL); ++ if (!no_fminterval) ++ fminterval = readl(base + OHCI_FMINTERVAL); ++ + writel(OHCI_HCR, base + OHCI_CMDSTATUS); + + /* reset requires max 10 us delay */ +@@ -628,7 +638,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev) + break; + udelay(1); + } +- writel(fminterval, base + OHCI_FMINTERVAL); ++ ++ if (!no_fminterval) ++ writel(fminterval, base + OHCI_FMINTERVAL); + + /* Now the controller is safely in SUSPEND and nothing can wake it up */ + iounmap(base); +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c +index abb38c3..6b0fb6a 100644 +--- a/drivers/usb/musb/musb_host.c ++++ b/drivers/usb/musb/musb_host.c +@@ -2640,7 +2640,6 @@ void musb_host_cleanup(struct musb *musb) + if (musb->port_mode == MUSB_PORT_MODE_GADGET) + return; + usb_remove_hcd(musb->hcd); +- musb->hcd = NULL; + } + + void musb_host_free(struct musb *musb) +diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c +index 8d7fc48..29fa1c3 100644 +--- a/drivers/usb/serial/console.c ++++ b/drivers/usb/serial/console.c +@@ -46,6 +46,8 @@ static struct console usbcons; + * ------------------------------------------------------------ + */ + ++static const struct tty_operations usb_console_fake_tty_ops = { ++}; + + /* + * The parsing of the command line works exactly like the +@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options) + goto reset_open_count; + } + kref_init(&tty->kref); +- tty_port_tty_set(&port->port, tty); + tty->driver = usb_serial_tty_driver; + tty->index = co->index; ++ init_ldsem(&tty->ldisc_sem); ++ INIT_LIST_HEAD(&tty->tty_files); ++ kref_get(&tty->driver->kref); ++ tty->ops = &usb_console_fake_tty_ops; + if (tty_init_termios(tty)) { + retval = -ENOMEM; +- goto free_tty; ++ goto put_tty; + } ++ tty_port_tty_set(&port->port, tty); + } + + /* only call the device specific open if this +@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options) + serial->type->set_termios(tty, port, &dummy); + + tty_port_tty_set(&port->port, NULL); +- kfree(tty); ++ tty_kref_put(tty); + } + set_bit(ASYNCB_INITIALIZED, &port->port.flags); + } +@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options) + + fail: + tty_port_tty_set(&port->port, NULL); +- free_tty: +- kfree(tty); ++ put_tty: ++ tty_kref_put(tty); + reset_open_count: + port->port.count = 0; + usb_autopm_put_interface(serial->interface); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 5741e94..9e8708c 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ +- { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ ++ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */ ++ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ ++ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ +diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c +index 49101fe..35297a8 100644 +--- a/drivers/usb/serial/keyspan.c ++++ b/drivers/usb/serial/keyspan.c +@@ -421,6 +421,8 @@ static void usa26_instat_callback(struct urb *urb) + } + port = serial->port[msg->port]; + p_priv = usb_get_serial_port_data(port); ++ if (!p_priv) ++ goto resubmit; + + /* Update handshaking pin state information */ + old_dcd_state = p_priv->dcd_state; +@@ -431,7 +433,7 @@ static void usa26_instat_callback(struct urb *urb) + + if (old_dcd_state != p_priv->dcd_state) + tty_port_tty_hangup(&port->port, true); +- ++resubmit: + /* Resubmit urb so we continue receiving */ + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) +@@ -541,6 +543,8 @@ static void usa28_instat_callback(struct urb *urb) + } + port = serial->port[msg->port]; + p_priv = usb_get_serial_port_data(port); ++ if (!p_priv) ++ goto resubmit; + + /* Update handshaking pin state information */ + old_dcd_state = p_priv->dcd_state; +@@ -551,7 +555,7 @@ static void usa28_instat_callback(struct urb *urb) + + if (old_dcd_state != p_priv->dcd_state && old_dcd_state) + tty_port_tty_hangup(&port->port, true); +- ++resubmit: + /* Resubmit urb so we continue receiving */ + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) +@@ -624,6 +628,8 @@ static void usa49_instat_callback(struct urb *urb) + } + port = serial->port[msg->portNumber]; + p_priv = usb_get_serial_port_data(port); ++ if (!p_priv) ++ goto resubmit; + + /* Update handshaking pin state information */ + old_dcd_state = p_priv->dcd_state; +@@ -634,7 +640,7 @@ static void usa49_instat_callback(struct urb *urb) + + if (old_dcd_state != p_priv->dcd_state && old_dcd_state) + tty_port_tty_hangup(&port->port, true); +- ++resubmit: + /* Resubmit urb so we continue receiving */ + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) +@@ -872,6 +878,8 @@ static void usa90_instat_callback(struct urb *urb) + + port = serial->port[0]; + p_priv = usb_get_serial_port_data(port); ++ if (!p_priv) ++ goto resubmit; + + /* Update handshaking pin state information */ + old_dcd_state = p_priv->dcd_state; +@@ -882,7 +890,7 @@ static void usa90_instat_callback(struct urb *urb) + + if (old_dcd_state != p_priv->dcd_state && old_dcd_state) + tty_port_tty_hangup(&port->port, true); +- ++resubmit: + /* Resubmit urb so we continue receiving */ + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) +@@ -943,6 +951,8 @@ static void usa67_instat_callback(struct urb *urb) + + port = serial->port[msg->port]; + p_priv = usb_get_serial_port_data(port); ++ if (!p_priv) ++ goto resubmit; + + /* Update handshaking pin state information */ + old_dcd_state = p_priv->dcd_state; +@@ -951,7 +961,7 @@ static void usa67_instat_callback(struct urb *urb) + + if (old_dcd_state != p_priv->dcd_state && old_dcd_state) + tty_port_tty_hangup(&port->port, true); +- ++resubmit: + /* Resubmit urb so we continue receiving */ + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index 7ba0424..75e1d03 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -810,13 +810,11 @@ static const struct vfio_device_ops vfio_pci_ops = { + + static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + { +- u8 type; + struct vfio_pci_device *vdev; + struct iommu_group *group; + int ret; + +- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type); +- if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) ++ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) + return -EINVAL; + + group = iommu_group_get(&pdev->dev); +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index 5d0b7b8..486d710 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -861,6 +861,23 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, + return 0; + } + ++static int vhost_scsi_to_tcm_attr(int attr) ++{ ++ switch (attr) { ++ case VIRTIO_SCSI_S_SIMPLE: ++ return MSG_SIMPLE_TAG; ++ case VIRTIO_SCSI_S_ORDERED: ++ return MSG_ORDERED_TAG; ++ case VIRTIO_SCSI_S_HEAD: ++ return MSG_HEAD_TAG; ++ case VIRTIO_SCSI_S_ACA: ++ return MSG_ACA_TAG; ++ default: ++ break; ++ } ++ return MSG_SIMPLE_TAG; ++} ++ + static void tcm_vhost_submission_work(struct work_struct *work) + { + struct tcm_vhost_cmd *cmd = +@@ -887,9 +904,10 @@ static void tcm_vhost_submission_work(struct work_struct *work) + rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, + cmd->tvc_cdb, &cmd->tvc_sense_buf[0], + cmd->tvc_lun, cmd->tvc_exp_data_len, +- cmd->tvc_task_attr, cmd->tvc_data_direction, +- TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, +- sg_bidi_ptr, sg_no_bidi, NULL, 0); ++ vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), ++ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, ++ sg_ptr, cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi, ++ NULL, 0); + if (rc < 0) { + transport_send_check_condition_and_sense(se_cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); +diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c +index b670cbd..ffe024b 100644 +--- a/drivers/video/logo/logo.c ++++ b/drivers/video/logo/logo.c +@@ -21,6 +21,21 @@ static bool nologo; + module_param(nologo, bool, 0); + MODULE_PARM_DESC(nologo, "Disables startup logo"); + ++/* ++ * Logos are located in the initdata, and will be freed in kernel_init. ++ * Use late_init to mark the logos as freed to prevent any further use. ++ */ ++ ++static bool logos_freed; ++ ++static int __init fb_logo_late_init(void) ++{ ++ logos_freed = true; ++ return 0; ++} ++ ++late_initcall(fb_logo_late_init); ++ + /* logo's are marked __initdata. Use __init_refok to tell + * modpost that it is intended that this function uses data + * marked __initdata. +@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth) + { + const struct linux_logo *logo = NULL; + +- if (nologo) ++ if (nologo || logos_freed) + return NULL; + + if (depth >= 1) { +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c +index 223e1cb..59a53f6 100644 +--- a/fs/lockd/svc.c ++++ b/fs/lockd/svc.c +@@ -137,10 +137,6 @@ lockd(void *vrqstp) + + dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); + +- if (!nlm_timeout) +- nlm_timeout = LOCKD_DFLT_TIMEO; +- nlmsvc_timeout = nlm_timeout * HZ; +- + /* + * The main request loop. We don't terminate until the last + * NFS mount or NFS daemon has gone away. +@@ -346,6 +342,10 @@ static struct svc_serv *lockd_create_svc(void) + printk(KERN_WARNING + "lockd_up: no pid, %d users??\n", nlmsvc_users); + ++ if (!nlm_timeout) ++ nlm_timeout = LOCKD_DFLT_TIMEO; ++ nlmsvc_timeout = nlm_timeout * HZ; ++ + serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL); + if (!serv) { + printk(KERN_WARNING "lockd_up: create service failed\n"); +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c +index 1abe4f5..037f957 100644 +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -565,20 +565,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b) + } + + /* +- * Returns true if the server owners match ++ * Returns true if the server major ids match + */ + static bool +-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b) ++nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b) + { + struct nfs41_server_owner *o1 = a->cl_serverowner; + struct nfs41_server_owner *o2 = b->cl_serverowner; + +- if (o1->minor_id != o2->minor_id) { +- dprintk("NFS: --> %s server owner minor IDs do not match\n", +- __func__); +- return false; +- } +- + if (o1->major_id_sz != o2->major_id_sz) + goto out_major_mismatch; + if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0) +@@ -654,7 +648,12 @@ int nfs41_walk_client_list(struct nfs_client *new, + if (!nfs4_match_clientids(pos, new)) + continue; + +- if (!nfs4_match_serverowners(pos, new)) ++ /* ++ * Note that session trunking is just a special subcase of ++ * client id trunking. In either case, we want to fall back ++ * to using the existing nfs_client. ++ */ ++ if (!nfs4_check_clientid_trunking(pos, new)) + continue; + + atomic_inc(&pos->cl_count); +diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c +index 74825be..fbb9dfb 100644 +--- a/fs/notify/inode_mark.c ++++ b/fs/notify/inode_mark.c +@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list) + spin_unlock(&inode->i_lock); + + /* In case the dropping of a reference would nuke next_i. */ +- if ((&next_i->i_sb_list != list) && +- atomic_read(&next_i->i_count)) { ++ while (&next_i->i_sb_list != list) { + spin_lock(&next_i->i_lock); +- if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) { ++ if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) && ++ atomic_read(&next_i->i_count)) { + __iget(next_i); + need_iput = next_i; ++ spin_unlock(&next_i->i_lock); ++ break; + } + spin_unlock(&next_i->i_lock); ++ next_i = list_entry(next_i->i_sb_list.next, ++ struct inode, i_sb_list); + } + + /* +- * We can safely drop inode_sb_list_lock here because we hold +- * references on both inode and next_i. Also no new inodes +- * will be added since the umount has begun. ++ * We can safely drop inode_sb_list_lock here because either ++ * we actually hold references on both inode and next_i or ++ * end of list. Also no new inodes will be added since the ++ * umount has begun. + */ + spin_unlock(&inode_sb_list_lock); + +diff --git a/fs/proc/stat.c b/fs/proc/stat.c +index 6f599c6..dbd0272 100644 +--- a/fs/proc/stat.c ++++ b/fs/proc/stat.c +@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v) + + /* sum again ? it could be updated? */ + for_each_irq_nr(j) +- seq_put_decimal_ull(p, ' ', kstat_irqs(j)); ++ seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j)); + + seq_printf(p, + "\nctxt %llu\n" +diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h +index 51c72be..4b2053a 100644 +--- a/include/linux/kernel_stat.h ++++ b/include/linux/kernel_stat.h +@@ -74,6 +74,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) + * Number of interrupts per specific IRQ source, since bootup + */ + extern unsigned int kstat_irqs(unsigned int irq); ++extern unsigned int kstat_irqs_usr(unsigned int irq); + + /* + * Number of interrupts per cpu, since bootup +diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h +index e9a1d2d..4c399ae 100644 +--- a/include/uapi/linux/in6.h ++++ b/include/uapi/linux/in6.h +@@ -149,7 +149,7 @@ struct in6_flowlabel_req { + /* + * IPV6 socket options + */ +- ++#if __UAPI_DEF_IPV6_OPTIONS + #define IPV6_ADDRFORM 1 + #define IPV6_2292PKTINFO 2 + #define IPV6_2292HOPOPTS 3 +@@ -192,6 +192,7 @@ struct in6_flowlabel_req { + + #define IPV6_IPSEC_POLICY 34 + #define IPV6_XFRM_POLICY 35 ++#endif + + /* + * Multicast: +diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h +index c140620..e28807a 100644 +--- a/include/uapi/linux/libc-compat.h ++++ b/include/uapi/linux/libc-compat.h +@@ -69,6 +69,7 @@ + #define __UAPI_DEF_SOCKADDR_IN6 0 + #define __UAPI_DEF_IPV6_MREQ 0 + #define __UAPI_DEF_IPPROTO_V6 0 ++#define __UAPI_DEF_IPV6_OPTIONS 0 + + #else + +@@ -82,6 +83,7 @@ + #define __UAPI_DEF_SOCKADDR_IN6 1 + #define __UAPI_DEF_IPV6_MREQ 1 + #define __UAPI_DEF_IPPROTO_V6 1 ++#define __UAPI_DEF_IPV6_OPTIONS 1 + + #endif /* _NETINET_IN_H */ + +@@ -103,6 +105,7 @@ + #define __UAPI_DEF_SOCKADDR_IN6 1 + #define __UAPI_DEF_IPV6_MREQ 1 + #define __UAPI_DEF_IPPROTO_V6 1 ++#define __UAPI_DEF_IPV6_OPTIONS 1 + + /* Definitions for xattr.h */ + #define __UAPI_DEF_XATTR 1 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h +index 001fa5b..8a160e8 100644 +--- a/kernel/irq/internals.h ++++ b/kernel/irq/internals.h +@@ -74,6 +74,14 @@ extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); + extern void mask_irq(struct irq_desc *desc); + extern void unmask_irq(struct irq_desc *desc); + ++#ifdef CONFIG_SPARSE_IRQ ++extern void irq_lock_sparse(void); ++extern void irq_unlock_sparse(void); ++#else ++static inline void irq_lock_sparse(void) { } ++static inline void irq_unlock_sparse(void) { } ++#endif ++ + extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); + + irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c +index 8ab8e93..07d4551 100644 +--- a/kernel/irq/irqdesc.c ++++ b/kernel/irq/irqdesc.c +@@ -131,6 +131,16 @@ static void free_masks(struct irq_desc *desc) + static inline void free_masks(struct irq_desc *desc) { } + #endif + ++void irq_lock_sparse(void) ++{ ++ mutex_lock(&sparse_irq_lock); ++} ++ ++void irq_unlock_sparse(void) ++{ ++ mutex_unlock(&sparse_irq_lock); ++} ++ + static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) + { + struct irq_desc *desc; +@@ -167,6 +177,12 @@ static void free_desc(unsigned int irq) + + unregister_irq_proc(irq, desc); + ++ /* ++ * sparse_irq_lock protects also show_interrupts() and ++ * kstat_irq_usr(). Once we deleted the descriptor from the ++ * sparse tree we can free it. Access in proc will fail to ++ * lookup the descriptor. ++ */ + mutex_lock(&sparse_irq_lock); + delete_irq_desc(irq); + mutex_unlock(&sparse_irq_lock); +@@ -489,6 +505,15 @@ void dynamic_irq_cleanup(unsigned int irq) + raw_spin_unlock_irqrestore(&desc->lock, flags); + } + ++/** ++ * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu ++ * @irq: The interrupt number ++ * @cpu: The cpu number ++ * ++ * Returns the sum of interrupt counts on @cpu since boot for ++ * @irq. The caller must ensure that the interrupt is not removed ++ * concurrently. ++ */ + unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) + { + struct irq_desc *desc = irq_to_desc(irq); +@@ -497,6 +522,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) + *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; + } + ++/** ++ * kstat_irqs - Get the statistics for an interrupt ++ * @irq: The interrupt number ++ * ++ * Returns the sum of interrupt counts on all cpus since boot for ++ * @irq. The caller must ensure that the interrupt is not removed ++ * concurrently. ++ */ + unsigned int kstat_irqs(unsigned int irq) + { + struct irq_desc *desc = irq_to_desc(irq); +@@ -509,3 +542,22 @@ unsigned int kstat_irqs(unsigned int irq) + sum += *per_cpu_ptr(desc->kstat_irqs, cpu); + return sum; + } ++ ++/** ++ * kstat_irqs_usr - Get the statistics for an interrupt ++ * @irq: The interrupt number ++ * ++ * Returns the sum of interrupt counts on all cpus since boot for ++ * @irq. Contrary to kstat_irqs() this can be called from any ++ * preemptible context. It's protected against concurrent removal of ++ * an interrupt descriptor when sparse irqs are enabled. ++ */ ++unsigned int kstat_irqs_usr(unsigned int irq) ++{ ++ int sum; ++ ++ irq_lock_sparse(); ++ sum = kstat_irqs(irq); ++ irq_unlock_sparse(); ++ return sum; ++} +diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c +index 36f6ee1..095cd72 100644 +--- a/kernel/irq/proc.c ++++ b/kernel/irq/proc.c +@@ -15,6 +15,23 @@ + + #include "internals.h" + ++/* ++ * Access rules: ++ * ++ * procfs protects read/write of /proc/irq/N/ files against a ++ * concurrent free of the interrupt descriptor. remove_proc_entry() ++ * immediately prevents new read/writes to happen and waits for ++ * already running read/write functions to complete. ++ * ++ * We remove the proc entries first and then delete the interrupt ++ * descriptor from the radix tree and free it. So it is guaranteed ++ * that irq_to_desc(N) is valid as long as the read/writes are ++ * permitted by procfs. ++ * ++ * The read from /proc/interrupts is a different problem because there ++ * is no protection. So the lookup and the access to irqdesc ++ * information must be protected by sparse_irq_lock. ++ */ + static struct proc_dir_entry *root_irq_dir; + + #ifdef CONFIG_SMP +@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v) + seq_putc(p, '\n'); + } + ++ irq_lock_sparse(); + desc = irq_to_desc(i); + if (!desc) +- return 0; ++ goto outsparse; + + raw_spin_lock_irqsave(&desc->lock, flags); + for_each_online_cpu(j) +@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v) + seq_putc(p, '\n'); + out: + raw_spin_unlock_irqrestore(&desc->lock, flags); ++outsparse: ++ irq_unlock_sparse(); + return 0; + } + #endif +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c +index 6558b7a..8c08a6f 100644 +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -807,7 +807,6 @@ void tick_nohz_idle_enter(void) + + local_irq_enable(); + } +-EXPORT_SYMBOL_GPL(tick_nohz_idle_enter); + + /** + * tick_nohz_irq_exit - update next tick event from interrupt exit +@@ -934,7 +933,6 @@ void tick_nohz_idle_exit(void) + + local_irq_enable(); + } +-EXPORT_SYMBOL_GPL(tick_nohz_idle_exit); + + static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) + { +diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c +index 31c5f76..f504027 100644 +--- a/lib/decompress_bunzip2.c ++++ b/lib/decompress_bunzip2.c +@@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd) + if (get_bits(bd, 1)) + return RETVAL_OBSOLETE_INPUT; + origPtr = get_bits(bd, 24); +- if (origPtr > dbufSize) ++ if (origPtr >= dbufSize) + return RETVAL_DATA_ERROR; + /* mapping table: if some byte values are never used (encoding things + like ascii text), the compression code removes the gaps to have fewer +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c +index c46387a..e5c5f57 100644 +--- a/net/batman-adv/fragmentation.c ++++ b/net/batman-adv/fragmentation.c +@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) + kfree(entry); + + /* Make room for the rest of the fragments. */ +- if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) { ++ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { + kfree_skb(skb_out); + skb_out = NULL; + goto free; +@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb, + * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE + */ + mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); +- max_fragment_size = (mtu - header_size - ETH_HLEN); ++ max_fragment_size = mtu - header_size; + max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; + + /* Don't even try to fragment, if we need more than 16 fragments */ +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c +index 36b9ae6..2393ea7 100644 +--- a/net/batman-adv/gateway_client.c ++++ b/net/batman-adv/gateway_client.c +@@ -812,7 +812,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, + goto out; + + gw_node = batadv_gw_node_get(bat_priv, orig_dst_node); +- if (!gw_node->bandwidth_down == 0) ++ if (!gw_node) + goto out; + + switch (atomic_read(&bat_priv->gw_mode)) { +diff --git a/net/core/dev.c b/net/core/dev.c +index 3ed11a5..86bb9cc 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1709,6 +1709,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) + + skb_scrub_packet(skb, true); + skb->protocol = eth_type_trans(skb, dev); ++ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + + return netif_rx_internal(skb); + } +@@ -2529,11 +2530,14 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb, + if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) + features &= ~NETIF_F_GSO_MASK; + +- if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { +- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; +- protocol = veh->h_vlan_encapsulated_proto; +- } else if (!vlan_tx_tag_present(skb)) { +- return harmonize_features(skb, dev, features); ++ if (!vlan_tx_tag_present(skb)) { ++ if (unlikely(protocol == htons(ETH_P_8021Q) || ++ protocol == htons(ETH_P_8021AD))) { ++ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; ++ protocol = veh->h_vlan_encapsulated_proto; ++ } else { ++ return harmonize_features(skb, dev, features); ++ } + } + + features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | +@@ -4701,9 +4705,14 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev, + sysfs_remove_link(&(dev->dev.kobj), linkname); + } + +-#define netdev_adjacent_is_neigh_list(dev, dev_list) \ +- (dev_list == &dev->adj_list.upper || \ +- dev_list == &dev->adj_list.lower) ++static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, ++ struct net_device *adj_dev, ++ struct list_head *dev_list) ++{ ++ return (dev_list == &dev->adj_list.upper || ++ dev_list == &dev->adj_list.lower) && ++ net_eq(dev_net(dev), dev_net(adj_dev)); ++} + + static int __netdev_adjacent_dev_insert(struct net_device *dev, + struct net_device *adj_dev, +@@ -4733,7 +4742,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, + pr_debug("dev_hold for %s, because of link added from %s to %s\n", + adj_dev->name, dev->name, adj_dev->name); + +- if (netdev_adjacent_is_neigh_list(dev, dev_list)) { ++ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) { + ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); + if (ret) + goto free_adj; +@@ -4754,7 +4763,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, + return 0; + + remove_symlinks: +- if (netdev_adjacent_is_neigh_list(dev, dev_list)) ++ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) + netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); + free_adj: + kfree(adj); +@@ -4787,7 +4796,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, + if (adj->master) + sysfs_remove_link(&(dev->dev.kobj), "master"); + +- if (netdev_adjacent_is_neigh_list(dev, dev_list)) ++ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) + netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); + + list_del_rcu(&adj->list); +@@ -5057,11 +5066,65 @@ void netdev_upper_dev_unlink(struct net_device *dev, + } + EXPORT_SYMBOL(netdev_upper_dev_unlink); + ++void netdev_adjacent_add_links(struct net_device *dev) ++{ ++ struct netdev_adjacent *iter; ++ ++ struct net *net = dev_net(dev); ++ ++ list_for_each_entry(iter, &dev->adj_list.upper, list) { ++ if (!net_eq(net,dev_net(iter->dev))) ++ continue; ++ netdev_adjacent_sysfs_add(iter->dev, dev, ++ &iter->dev->adj_list.lower); ++ netdev_adjacent_sysfs_add(dev, iter->dev, ++ &dev->adj_list.upper); ++ } ++ ++ list_for_each_entry(iter, &dev->adj_list.lower, list) { ++ if (!net_eq(net,dev_net(iter->dev))) ++ continue; ++ netdev_adjacent_sysfs_add(iter->dev, dev, ++ &iter->dev->adj_list.upper); ++ netdev_adjacent_sysfs_add(dev, iter->dev, ++ &dev->adj_list.lower); ++ } ++} ++ ++void netdev_adjacent_del_links(struct net_device *dev) ++{ ++ struct netdev_adjacent *iter; ++ ++ struct net *net = dev_net(dev); ++ ++ list_for_each_entry(iter, &dev->adj_list.upper, list) { ++ if (!net_eq(net,dev_net(iter->dev))) ++ continue; ++ netdev_adjacent_sysfs_del(iter->dev, dev->name, ++ &iter->dev->adj_list.lower); ++ netdev_adjacent_sysfs_del(dev, iter->dev->name, ++ &dev->adj_list.upper); ++ } ++ ++ list_for_each_entry(iter, &dev->adj_list.lower, list) { ++ if (!net_eq(net,dev_net(iter->dev))) ++ continue; ++ netdev_adjacent_sysfs_del(iter->dev, dev->name, ++ &iter->dev->adj_list.upper); ++ netdev_adjacent_sysfs_del(dev, iter->dev->name, ++ &dev->adj_list.lower); ++ } ++} ++ + void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) + { + struct netdev_adjacent *iter; + ++ struct net *net = dev_net(dev); ++ + list_for_each_entry(iter, &dev->adj_list.upper, list) { ++ if (!net_eq(net,dev_net(iter->dev))) ++ continue; + netdev_adjacent_sysfs_del(iter->dev, oldname, + &iter->dev->adj_list.lower); + netdev_adjacent_sysfs_add(iter->dev, dev, +@@ -5069,6 +5132,8 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) + } + + list_for_each_entry(iter, &dev->adj_list.lower, list) { ++ if (!net_eq(net,dev_net(iter->dev))) ++ continue; + netdev_adjacent_sysfs_del(iter->dev, oldname, + &iter->dev->adj_list.upper); + netdev_adjacent_sysfs_add(iter->dev, dev, +@@ -6675,6 +6740,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char + + /* Send a netdev-removed uevent to the old namespace */ + kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); ++ netdev_adjacent_del_links(dev); + + /* Actually switch the network namespace */ + dev_net_set(dev, net); +@@ -6689,6 +6755,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char + + /* Send a netdev-add uevent to the new namespace */ + kobject_uevent(&dev->dev.kobj, KOBJ_ADD); ++ netdev_adjacent_add_links(dev); + + /* Fixup kobjects */ + err = device_rename(&dev->dev, dev->name); +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index baf6fc4..e2b1bba 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -3937,6 +3937,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) + skb->local_df = 0; + skb_dst_drop(skb); + skb->mark = 0; ++ skb_init_secmark(skb); + secpath_reset(skb); + nf_reset(skb); + nf_reset_trace(skb); +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index 94213c8..b40b90d 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -250,10 +250,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, + struct ip_tunnel *tunnel = netdev_priv(dev); + const struct iphdr *tnl_params; + +- skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); +- if (IS_ERR(skb)) +- goto out; +- + if (dev->header_ops) { + /* Need space for new headers */ + if (skb_cow_head(skb, dev->needed_headroom - +@@ -266,6 +262,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, + * to gre header. + */ + skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); ++ skb_reset_mac_header(skb); + } else { + if (skb_cow_head(skb, dev->needed_headroom)) + goto free_skb; +@@ -273,6 +270,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, + tnl_params = &tunnel->parms.iph; + } + ++ skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); ++ if (IS_ERR(skb)) ++ goto out; ++ + __gre_xmit(skb, dev, tnl_params, skb->protocol); + + return NETDEV_TX_OK; +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 91b98e5..7efa26b 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1894,7 +1894,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) + break; + +- if (tso_segs == 1) { ++ if (tso_segs == 1 || !sk->sk_gso_max_segs) { + if (unlikely(!tcp_nagle_test(tp, skb, mss_now, + (tcp_skb_is_last(sk, skb) ? + nonagle : TCP_NAGLE_PUSH)))) +@@ -1931,7 +1931,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + } + + limit = mss_now; +- if (tso_segs > 1 && !tcp_urg_mode(tp)) ++ if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp)) + limit = tcp_mss_split_point(sk, skb, mss_now, + min_t(unsigned int, + cwnd_quota, +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c +index cf99377..53ea164 100644 +--- a/net/netfilter/ipset/ip_set_core.c ++++ b/net/netfilter/ipset/ip_set_core.c +@@ -1839,6 +1839,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) + if (*op < IP_SET_OP_VERSION) { + /* Check the version at the beginning of operations */ + struct ip_set_req_version *req_version = data; ++ ++ if (*len < sizeof(struct ip_set_req_version)) { ++ ret = -EINVAL; ++ goto done; ++ } ++ + if (req_version->version != IPSET_PROTOCOL) { + ret = -EPROTO; + goto done; +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 7c177bc..1d52506 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -510,14 +510,14 @@ out: + return err; + } + +-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) ++static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len) + { + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 + struct page *p_start, *p_end; + + /* First page is flushed through netlink_{get,set}_status */ + p_start = pgvec_to_page(hdr + PAGE_SIZE); +- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1); ++ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1); + while (p_start <= p_end) { + flush_dcache_page(p_start); + p_start++; +@@ -535,9 +535,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr) + static void netlink_set_status(struct nl_mmap_hdr *hdr, + enum nl_mmap_status status) + { ++ smp_mb(); + hdr->nm_status = status; + flush_dcache_page(pgvec_to_page(hdr)); +- smp_wmb(); + } + + static struct nl_mmap_hdr * +@@ -699,24 +699,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, + struct nl_mmap_hdr *hdr; + struct sk_buff *skb; + unsigned int maxlen; +- bool excl = true; + int err = 0, len = 0; + +- /* Netlink messages are validated by the receiver before processing. +- * In order to avoid userspace changing the contents of the message +- * after validation, the socket and the ring may only be used by a +- * single process, otherwise we fall back to copying. +- */ +- if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 || +- atomic_read(&nlk->mapped) > 1) +- excl = false; +- + mutex_lock(&nlk->pg_vec_lock); + + ring = &nlk->tx_ring; + maxlen = ring->frame_size - NL_MMAP_HDRLEN; + + do { ++ unsigned int nm_len; ++ + hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID); + if (hdr == NULL) { + if (!(msg->msg_flags & MSG_DONTWAIT) && +@@ -724,35 +716,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, + schedule(); + continue; + } +- if (hdr->nm_len > maxlen) { ++ ++ nm_len = ACCESS_ONCE(hdr->nm_len); ++ if (nm_len > maxlen) { + err = -EINVAL; + goto out; + } + +- netlink_frame_flush_dcache(hdr); ++ netlink_frame_flush_dcache(hdr, nm_len); + +- if (likely(dst_portid == 0 && dst_group == 0 && excl)) { +- skb = alloc_skb_head(GFP_KERNEL); +- if (skb == NULL) { +- err = -ENOBUFS; +- goto out; +- } +- sock_hold(sk); +- netlink_ring_setup_skb(skb, sk, ring, hdr); +- NETLINK_CB(skb).flags |= NETLINK_SKB_TX; +- __skb_put(skb, hdr->nm_len); +- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED); +- atomic_inc(&ring->pending); +- } else { +- skb = alloc_skb(hdr->nm_len, GFP_KERNEL); +- if (skb == NULL) { +- err = -ENOBUFS; +- goto out; +- } +- __skb_put(skb, hdr->nm_len); +- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len); +- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED); ++ skb = alloc_skb(nm_len, GFP_KERNEL); ++ if (skb == NULL) { ++ err = -ENOBUFS; ++ goto out; + } ++ __skb_put(skb, nm_len); ++ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len); ++ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED); + + netlink_increment_head(ring); + +@@ -798,7 +778,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb) + hdr->nm_pid = NETLINK_CB(skb).creds.pid; + hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); + hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); +- netlink_frame_flush_dcache(hdr); ++ netlink_frame_flush_dcache(hdr, hdr->nm_len); + netlink_set_status(hdr, NL_MMAP_STATUS_VALID); + + NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED; +diff --git a/net/wireless/chan.c b/net/wireless/chan.c +index 78559b5..27157a78 100644 +--- a/net/wireless/chan.c ++++ b/net/wireless/chan.c +@@ -516,7 +516,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, + { + struct ieee80211_sta_ht_cap *ht_cap; + struct ieee80211_sta_vht_cap *vht_cap; +- u32 width, control_freq; ++ u32 width, control_freq, cap; + + if (WARN_ON(!cfg80211_chandef_valid(chandef))) + return false; +@@ -554,7 +554,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, + return false; + break; + case NL80211_CHAN_WIDTH_80P80: +- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)) ++ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; ++ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + return false; + case NL80211_CHAN_WIDTH_80: + if (!vht_cap->vht_supported) +@@ -565,7 +566,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, + case NL80211_CHAN_WIDTH_160: + if (!vht_cap->vht_supported) + return false; +- if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)) ++ cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; ++ if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && ++ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + return false; + prohibited_flags |= IEEE80211_CHAN_NO_160MHZ; + width = 160; +diff --git a/net/wireless/reg.c b/net/wireless/reg.c +index 338794e..04d5305 100644 +--- a/net/wireless/reg.c ++++ b/net/wireless/reg.c +@@ -1547,7 +1547,7 @@ static enum reg_request_treatment + reg_process_hint_driver(struct wiphy *wiphy, + struct regulatory_request *driver_request) + { +- const struct ieee80211_regdomain *regd; ++ const struct ieee80211_regdomain *regd, *tmp; + enum reg_request_treatment treatment; + + treatment = __reg_process_hint_driver(driver_request); +@@ -1566,7 +1566,10 @@ reg_process_hint_driver(struct wiphy *wiphy, + kfree(driver_request); + return REG_REQ_IGNORE; + } ++ ++ tmp = get_wiphy_regdom(wiphy); + rcu_assign_pointer(wiphy->regd, regd); ++ rcu_free_regdom(tmp); + } + + +@@ -1625,11 +1628,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy, + return REG_REQ_IGNORE; + return REG_REQ_ALREADY_SET; + } +- /* +- * Two consecutive Country IE hints on the same wiphy. +- * This should be picked up early by the driver/stack +- */ +- if (WARN_ON(regdom_changes(country_ie_request->alpha2))) ++ ++ if (regdom_changes(country_ie_request->alpha2)) + return REG_REQ_OK; + return REG_REQ_ALREADY_SET; + } +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index c657752..83bddbd 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -2804,133 +2804,45 @@ YAMAHA_DEVICE(0x7010, "UB99"), + } + }, + +-/* Hauppauge HVR-950Q and HVR-850 */ +-{ +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7200), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-950Q", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, +-{ +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7210), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-950Q", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, +-{ +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7217), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-950Q", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, +-{ +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721b), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-950Q", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, +-{ +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721e), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-950Q", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, +-{ +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x721f), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-950Q", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, +-{ +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7240), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-850", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, +-{ +- USB_DEVICE_VENDOR_SPEC(0x2040, 0x7280), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-950Q", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, +-{ +- USB_DEVICE_VENDOR_SPEC(0x0fd9, 0x0008), +- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | +- USB_DEVICE_ID_MATCH_INT_CLASS | +- USB_DEVICE_ID_MATCH_INT_SUBCLASS, +- .bInterfaceClass = USB_CLASS_AUDIO, +- .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { +- .vendor_name = "Hauppauge", +- .product_name = "HVR-950Q", +- .ifnum = QUIRK_ANY_INTERFACE, +- .type = QUIRK_AUDIO_ALIGN_TRANSFER, +- } +-}, ++/* ++ * Auvitek au0828 devices with audio interface. ++ * This should be kept in sync with drivers/media/usb/au0828/au0828-cards.c ++ * Please notice that some drivers are DVB only, and don't need to be ++ * here. That's the case, for example, of DVICO_FUSIONHDTV7. ++ */ ++ ++#define AU0828_DEVICE(vid, pid, vname, pname) { \ ++ USB_DEVICE_VENDOR_SPEC(vid, pid), \ ++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ ++ USB_DEVICE_ID_MATCH_INT_CLASS | \ ++ USB_DEVICE_ID_MATCH_INT_SUBCLASS, \ ++ .bInterfaceClass = USB_CLASS_AUDIO, \ ++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, \ ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { \ ++ .vendor_name = vname, \ ++ .product_name = pname, \ ++ .ifnum = QUIRK_ANY_INTERFACE, \ ++ .type = QUIRK_AUDIO_ALIGN_TRANSFER, \ ++ } \ ++} ++ ++AU0828_DEVICE(0x2040, 0x7200, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x7240, "Hauppauge", "HVR-850"), ++AU0828_DEVICE(0x2040, 0x7210, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x7217, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x721b, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x721e, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x721f, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x7280, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x0fd9, 0x0008, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x7201, "Hauppauge", "HVR-950Q-MXL"), ++AU0828_DEVICE(0x2040, 0x7211, "Hauppauge", "HVR-950Q-MXL"), ++AU0828_DEVICE(0x2040, 0x7281, "Hauppauge", "HVR-950Q-MXL"), ++AU0828_DEVICE(0x05e1, 0x0480, "Hauppauge", "Woodbury"), ++AU0828_DEVICE(0x2040, 0x8200, "Hauppauge", "Woodbury"), ++AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"), ++AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), + + /* Digidesign Mbox */ + { diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.30-31.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.30-31.patch new file mode 100644 index 0000000000..880a017542 --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.30-31.patch @@ -0,0 +1,5731 @@ +diff --git a/Makefile b/Makefile +index 5b94752..5abf670 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 30 ++SUBLEVEL = 31 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h +deleted file mode 100644 +index c32245c..0000000 +--- a/arch/arc/include/asm/barrier.h ++++ /dev/null +@@ -1,37 +0,0 @@ +-/* +- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License version 2 as +- * published by the Free Software Foundation. +- */ +- +-#ifndef __ASM_BARRIER_H +-#define __ASM_BARRIER_H +- +-#ifndef __ASSEMBLY__ +- +-/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */ +-#define mb() __asm__ __volatile__ ("" : : : "memory") +-#define rmb() mb() +-#define wmb() mb() +-#define set_mb(var, value) do { var = value; mb(); } while (0) +-#define set_wmb(var, value) do { var = value; wmb(); } while (0) +-#define read_barrier_depends() mb() +- +-/* TODO-vineetg verify the correctness of macros here */ +-#ifdef CONFIG_SMP +-#define smp_mb() mb() +-#define smp_rmb() rmb() +-#define smp_wmb() wmb() +-#else +-#define smp_mb() barrier() +-#define smp_rmb() barrier() +-#define smp_wmb() barrier() +-#endif +- +-#define smp_read_barrier_depends() do { } while (0) +- +-#endif +- +-#endif +diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S +index 2ff0347..e248594 100644 +--- a/arch/arc/kernel/ctx_sw_asm.S ++++ b/arch/arc/kernel/ctx_sw_asm.S +@@ -10,9 +10,9 @@ + * -This is the more "natural" hand written assembler + */ + ++#include + #include /* For the SAVE_* macros */ + #include +-#include + + #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) + +diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi +index 6a26e79..cf3300a 100644 +--- a/arch/arm/boot/dts/imx25.dtsi ++++ b/arch/arm/boot/dts/imx25.dtsi +@@ -352,7 +352,7 @@ + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; + #pwm-cells = <2>; + reg = <0x53fa0000 0x4000>; +- clocks = <&clks 106>, <&clks 36>; ++ clocks = <&clks 106>, <&clks 52>; + clock-names = "ipg", "per"; + interrupts = <36>; + }; +@@ -371,7 +371,7 @@ + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; + #pwm-cells = <2>; + reg = <0x53fa8000 0x4000>; +- clocks = <&clks 107>, <&clks 36>; ++ clocks = <&clks 107>, <&clks 52>; + clock-names = "ipg", "per"; + interrupts = <41>; + }; +@@ -412,7 +412,7 @@ + pwm4: pwm@53fc8000 { + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; + reg = <0x53fc8000 0x4000>; +- clocks = <&clks 108>, <&clks 36>; ++ clocks = <&clks 108>, <&clks 52>; + clock-names = "ipg", "per"; + interrupts = <42>; + }; +@@ -458,7 +458,7 @@ + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; + #pwm-cells = <2>; + reg = <0x53fe0000 0x4000>; +- clocks = <&clks 105>, <&clks 36>; ++ clocks = <&clks 105>, <&clks 52>; + clock-names = "ipg", "per"; + interrupts = <26>; + }; +diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c +index 3003fa1..0409b8f 100644 +--- a/arch/arm/crypto/aes_glue.c ++++ b/arch/arm/crypto/aes_glue.c +@@ -93,6 +93,6 @@ module_exit(aes_fini); + + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("aes"); +-MODULE_ALIAS("aes-asm"); ++MODULE_ALIAS_CRYPTO("aes"); ++MODULE_ALIAS_CRYPTO("aes-asm"); + MODULE_AUTHOR("David McCullough "); +diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c +index 76cd976..ace4cd6 100644 +--- a/arch/arm/crypto/sha1_glue.c ++++ b/arch/arm/crypto/sha1_glue.c +@@ -175,5 +175,5 @@ module_exit(sha1_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)"); +-MODULE_ALIAS("sha1"); ++MODULE_ALIAS_CRYPTO("sha1"); + MODULE_AUTHOR("David McCullough "); +diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c +index f9e8b94..b51da91 100644 +--- a/arch/powerpc/crypto/sha1.c ++++ b/arch/powerpc/crypto/sha1.c +@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); + +-MODULE_ALIAS("sha1-powerpc"); ++MODULE_ALIAS_CRYPTO("sha1"); ++MODULE_ALIAS_CRYPTO("sha1-powerpc"); +diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c +index 23223cd..1f272b2 100644 +--- a/arch/s390/crypto/aes_s390.c ++++ b/arch/s390/crypto/aes_s390.c +@@ -979,7 +979,7 @@ static void __exit aes_s390_fini(void) + module_init(aes_s390_init); + module_exit(aes_s390_fini); + +-MODULE_ALIAS("aes-all"); ++MODULE_ALIAS_CRYPTO("aes-all"); + + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); + MODULE_LICENSE("GPL"); +diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c +index 7acb77f..9e05cc4 100644 +--- a/arch/s390/crypto/des_s390.c ++++ b/arch/s390/crypto/des_s390.c +@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void) + module_init(des_s390_init); + module_exit(des_s390_exit); + +-MODULE_ALIAS("des"); +-MODULE_ALIAS("des3_ede"); ++MODULE_ALIAS_CRYPTO("des"); ++MODULE_ALIAS_CRYPTO("des3_ede"); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); +diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c +index d43485d..7940dc9 100644 +--- a/arch/s390/crypto/ghash_s390.c ++++ b/arch/s390/crypto/ghash_s390.c +@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void) + module_init(ghash_mod_init); + module_exit(ghash_mod_exit); + +-MODULE_ALIAS("ghash"); ++MODULE_ALIAS_CRYPTO("ghash"); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation"); +diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c +index a1b3a9d..5b2bee3 100644 +--- a/arch/s390/crypto/sha1_s390.c ++++ b/arch/s390/crypto/sha1_s390.c +@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void) + module_init(sha1_s390_init); + module_exit(sha1_s390_fini); + +-MODULE_ALIAS("sha1"); ++MODULE_ALIAS_CRYPTO("sha1"); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); +diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c +index 9b85380..b74ff15 100644 +--- a/arch/s390/crypto/sha256_s390.c ++++ b/arch/s390/crypto/sha256_s390.c +@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void) + module_init(sha256_s390_init); + module_exit(sha256_s390_fini); + +-MODULE_ALIAS("sha256"); +-MODULE_ALIAS("sha224"); ++MODULE_ALIAS_CRYPTO("sha256"); ++MODULE_ALIAS_CRYPTO("sha224"); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm"); +diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c +index 32a8138..0c36989 100644 +--- a/arch/s390/crypto/sha512_s390.c ++++ b/arch/s390/crypto/sha512_s390.c +@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = { + } + }; + +-MODULE_ALIAS("sha512"); ++MODULE_ALIAS_CRYPTO("sha512"); + + static int sha384_init(struct shash_desc *desc) + { +@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = { + } + }; + +-MODULE_ALIAS("sha384"); ++MODULE_ALIAS_CRYPTO("sha384"); + + static int __init init(void) + { +diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c +index 503e6d9..ded4cee3 100644 +--- a/arch/sparc/crypto/aes_glue.c ++++ b/arch/sparc/crypto/aes_glue.c +@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); + +-MODULE_ALIAS("aes"); ++MODULE_ALIAS_CRYPTO("aes"); + + #include "crop_devid.c" +diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c +index 888f6260..641f55c 100644 +--- a/arch/sparc/crypto/camellia_glue.c ++++ b/arch/sparc/crypto/camellia_glue.c +@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); + +-MODULE_ALIAS("aes"); ++MODULE_ALIAS_CRYPTO("aes"); + + #include "crop_devid.c" +diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c +index 5162fad..d1064e4 100644 +--- a/arch/sparc/crypto/crc32c_glue.c ++++ b/arch/sparc/crypto/crc32c_glue.c +@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); + +-MODULE_ALIAS("crc32c"); ++MODULE_ALIAS_CRYPTO("crc32c"); + + #include "crop_devid.c" +diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c +index 3065bc6..d115009 100644 +--- a/arch/sparc/crypto/des_glue.c ++++ b/arch/sparc/crypto/des_glue.c +@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); + +-MODULE_ALIAS("des"); ++MODULE_ALIAS_CRYPTO("des"); + + #include "crop_devid.c" +diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c +index 09a9ea1..64c7ff5 100644 +--- a/arch/sparc/crypto/md5_glue.c ++++ b/arch/sparc/crypto/md5_glue.c +@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); + +-MODULE_ALIAS("md5"); ++MODULE_ALIAS_CRYPTO("md5"); + + #include "crop_devid.c" +diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c +index 6cd5f29..1b3e47a 100644 +--- a/arch/sparc/crypto/sha1_glue.c ++++ b/arch/sparc/crypto/sha1_glue.c +@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); + +-MODULE_ALIAS("sha1"); ++MODULE_ALIAS_CRYPTO("sha1"); + + #include "crop_devid.c" +diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c +index 04f555a..41f27cc 100644 +--- a/arch/sparc/crypto/sha256_glue.c ++++ b/arch/sparc/crypto/sha256_glue.c +@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated"); + +-MODULE_ALIAS("sha224"); +-MODULE_ALIAS("sha256"); ++MODULE_ALIAS_CRYPTO("sha224"); ++MODULE_ALIAS_CRYPTO("sha256"); + + #include "crop_devid.c" +diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c +index f04d199..9fff885 100644 +--- a/arch/sparc/crypto/sha512_glue.c ++++ b/arch/sparc/crypto/sha512_glue.c +@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated"); + +-MODULE_ALIAS("sha384"); +-MODULE_ALIAS("sha512"); ++MODULE_ALIAS_CRYPTO("sha384"); ++MODULE_ALIAS_CRYPTO("sha512"); + + #include "crop_devid.c" +diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c +index 004ba56..33294fd 100644 +--- a/arch/tile/mm/homecache.c ++++ b/arch/tile/mm/homecache.c +@@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order) + if (put_page_testzero(page)) { + homecache_change_page_home(page, order, PAGE_HOME_HASH); + if (order == 0) { +- free_hot_cold_page(page, 0); ++ free_hot_cold_page(page, false); + } else { + init_page_count(page); + __free_pages(page, order); +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 98aa930..2f645c9 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -854,7 +854,7 @@ source "kernel/Kconfig.preempt" + + config X86_UP_APIC + bool "Local APIC support on uniprocessors" +- depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI ++ depends on X86_32 && !SMP && !X86_32_NON_STANDARD + ---help--- + A local APIC (Advanced Programmable Interrupt Controller) is an + integrated interrupt controller in the CPU. If you have a single-CPU +@@ -865,6 +865,10 @@ config X86_UP_APIC + performance counters), and the NMI watchdog which detects hard + lockups. + ++config X86_UP_APIC_MSI ++ def_bool y ++ select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI ++ + config X86_UP_IOAPIC + bool "IO-APIC support on uniprocessors" + depends on X86_UP_APIC +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c +index eb25ca1..8f45c85 100644 +--- a/arch/x86/boot/compressed/misc.c ++++ b/arch/x86/boot/compressed/misc.c +@@ -396,6 +396,8 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap, + unsigned long output_len, + unsigned long run_size) + { ++ unsigned char *output_orig = output; ++ + real_mode = rmode; + + sanitize_boot_params(real_mode); +@@ -444,7 +446,12 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap, + debug_putstr("\nDecompressing Linux... "); + decompress(input_data, input_len, NULL, NULL, output, NULL, error); + parse_elf(output); +- handle_relocations(output, output_len); ++ /* ++ * 32-bit always performs relocations. 64-bit relocations are only ++ * needed if kASLR has chosen a different load address. ++ */ ++ if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig) ++ handle_relocations(output, output_len); + debug_putstr("done.\nBooting the kernel.\n"); + return output; + } +diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c +index aafe8ce..e26984f 100644 +--- a/arch/x86/crypto/aes_glue.c ++++ b/arch/x86/crypto/aes_glue.c +@@ -66,5 +66,5 @@ module_exit(aes_fini); + + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("aes"); +-MODULE_ALIAS("aes-asm"); ++MODULE_ALIAS_CRYPTO("aes"); ++MODULE_ALIAS_CRYPTO("aes-asm"); +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c +index 948ad0e..6dfb7d0 100644 +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -1514,4 +1514,4 @@ module_exit(aesni_exit); + + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("aes"); ++MODULE_ALIAS_CRYPTO("aes"); +diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c +index 50ec333..1477cfc 100644 +--- a/arch/x86/crypto/blowfish_glue.c ++++ b/arch/x86/crypto/blowfish_glue.c +@@ -481,5 +481,5 @@ module_exit(fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized"); +-MODULE_ALIAS("blowfish"); +-MODULE_ALIAS("blowfish-asm"); ++MODULE_ALIAS_CRYPTO("blowfish"); ++MODULE_ALIAS_CRYPTO("blowfish-asm"); +diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c +index 4209a76..9a07faf 100644 +--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c ++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c +@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized"); +-MODULE_ALIAS("camellia"); +-MODULE_ALIAS("camellia-asm"); ++MODULE_ALIAS_CRYPTO("camellia"); ++MODULE_ALIAS_CRYPTO("camellia-asm"); +diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c +index 87a041a..ed38d95 100644 +--- a/arch/x86/crypto/camellia_aesni_avx_glue.c ++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c +@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized"); +-MODULE_ALIAS("camellia"); +-MODULE_ALIAS("camellia-asm"); ++MODULE_ALIAS_CRYPTO("camellia"); ++MODULE_ALIAS_CRYPTO("camellia-asm"); +diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c +index c171dcb..5c8b626 100644 +--- a/arch/x86/crypto/camellia_glue.c ++++ b/arch/x86/crypto/camellia_glue.c +@@ -1725,5 +1725,5 @@ module_exit(fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized"); +-MODULE_ALIAS("camellia"); +-MODULE_ALIAS("camellia-asm"); ++MODULE_ALIAS_CRYPTO("camellia"); ++MODULE_ALIAS_CRYPTO("camellia-asm"); +diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c +index e6a3700..f62e9db 100644 +--- a/arch/x86/crypto/cast5_avx_glue.c ++++ b/arch/x86/crypto/cast5_avx_glue.c +@@ -494,4 +494,4 @@ module_exit(cast5_exit); + + MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("cast5"); ++MODULE_ALIAS_CRYPTO("cast5"); +diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c +index 09f3677..0160f68 100644 +--- a/arch/x86/crypto/cast6_avx_glue.c ++++ b/arch/x86/crypto/cast6_avx_glue.c +@@ -611,4 +611,4 @@ module_exit(cast6_exit); + + MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("cast6"); ++MODULE_ALIAS_CRYPTO("cast6"); +diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c +index 9d014a7..1937fc1 100644 +--- a/arch/x86/crypto/crc32-pclmul_glue.c ++++ b/arch/x86/crypto/crc32-pclmul_glue.c +@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini); + MODULE_AUTHOR("Alexander Boyko "); + MODULE_LICENSE("GPL"); + +-MODULE_ALIAS("crc32"); +-MODULE_ALIAS("crc32-pclmul"); ++MODULE_ALIAS_CRYPTO("crc32"); ++MODULE_ALIAS_CRYPTO("crc32-pclmul"); +diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c +index 6812ad9..28640c3 100644 +--- a/arch/x86/crypto/crc32c-intel_glue.c ++++ b/arch/x86/crypto/crc32c-intel_glue.c +@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang , Kent Liu "); + MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); + MODULE_LICENSE("GPL"); + +-MODULE_ALIAS("crct10dif"); +-MODULE_ALIAS("crct10dif-pclmul"); ++MODULE_ALIAS_CRYPTO("crct10dif"); ++MODULE_ALIAS_CRYPTO("crct10dif-pclmul"); +diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c +index 98d7a18..f368ba2 100644 +--- a/arch/x86/crypto/fpu.c ++++ b/arch/x86/crypto/fpu.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + + struct crypto_fpu_ctx { +@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void) + { + crypto_unregister_template(&crypto_fpu_tmpl); + } ++ ++MODULE_ALIAS_CRYPTO("fpu"); +diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c +index d785cf2..a8d6f69 100644 +--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c ++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c +@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("GHASH Message Digest Algorithm, " + "acclerated by PCLMULQDQ-NI"); +-MODULE_ALIAS("ghash"); ++MODULE_ALIAS_CRYPTO("ghash"); +diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c +index 5e8e677..399a29d 100644 +--- a/arch/x86/crypto/salsa20_glue.c ++++ b/arch/x86/crypto/salsa20_glue.c +@@ -119,5 +119,5 @@ module_exit(fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); +-MODULE_ALIAS("salsa20"); +-MODULE_ALIAS("salsa20-asm"); ++MODULE_ALIAS_CRYPTO("salsa20"); ++MODULE_ALIAS_CRYPTO("salsa20-asm"); +diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c +index 2fae489..437e47a 100644 +--- a/arch/x86/crypto/serpent_avx2_glue.c ++++ b/arch/x86/crypto/serpent_avx2_glue.c +@@ -558,5 +558,5 @@ module_exit(fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized"); +-MODULE_ALIAS("serpent"); +-MODULE_ALIAS("serpent-asm"); ++MODULE_ALIAS_CRYPTO("serpent"); ++MODULE_ALIAS_CRYPTO("serpent-asm"); +diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c +index ff48708..7e21739 100644 +--- a/arch/x86/crypto/serpent_avx_glue.c ++++ b/arch/x86/crypto/serpent_avx_glue.c +@@ -617,4 +617,4 @@ module_exit(serpent_exit); + + MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("serpent"); ++MODULE_ALIAS_CRYPTO("serpent"); +diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c +index 8c95f86..bf025ad 100644 +--- a/arch/x86/crypto/serpent_sse2_glue.c ++++ b/arch/x86/crypto/serpent_sse2_glue.c +@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit); + + MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("serpent"); ++MODULE_ALIAS_CRYPTO("serpent"); +diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c +index 4a11a9d..29e1060 100644 +--- a/arch/x86/crypto/sha1_ssse3_glue.c ++++ b/arch/x86/crypto/sha1_ssse3_glue.c +@@ -237,4 +237,4 @@ module_exit(sha1_ssse3_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); + +-MODULE_ALIAS("sha1"); ++MODULE_ALIAS_CRYPTO("sha1"); +diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c +index f248546..4dc100d 100644 +--- a/arch/x86/crypto/sha256_ssse3_glue.c ++++ b/arch/x86/crypto/sha256_ssse3_glue.c +@@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); + +-MODULE_ALIAS("sha256"); +-MODULE_ALIAS("sha224"); ++MODULE_ALIAS_CRYPTO("sha256"); ++MODULE_ALIAS_CRYPTO("sha224"); +diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c +index 8626b03..26a5898 100644 +--- a/arch/x86/crypto/sha512_ssse3_glue.c ++++ b/arch/x86/crypto/sha512_ssse3_glue.c +@@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated"); + +-MODULE_ALIAS("sha512"); +-MODULE_ALIAS("sha384"); ++MODULE_ALIAS_CRYPTO("sha512"); ++MODULE_ALIAS_CRYPTO("sha384"); +diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c +index 4e3c665..1ac531e 100644 +--- a/arch/x86/crypto/twofish_avx_glue.c ++++ b/arch/x86/crypto/twofish_avx_glue.c +@@ -579,4 +579,4 @@ module_exit(twofish_exit); + + MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("twofish"); ++MODULE_ALIAS_CRYPTO("twofish"); +diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c +index 0a52023..77e06c2 100644 +--- a/arch/x86/crypto/twofish_glue.c ++++ b/arch/x86/crypto/twofish_glue.c +@@ -96,5 +96,5 @@ module_exit(fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized"); +-MODULE_ALIAS("twofish"); +-MODULE_ALIAS("twofish-asm"); ++MODULE_ALIAS_CRYPTO("twofish"); ++MODULE_ALIAS_CRYPTO("twofish-asm"); +diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c +index 13e63b3..56d8a08 100644 +--- a/arch/x86/crypto/twofish_glue_3way.c ++++ b/arch/x86/crypto/twofish_glue_3way.c +@@ -495,5 +495,5 @@ module_exit(fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized"); +-MODULE_ALIAS("twofish"); +-MODULE_ALIAS("twofish-asm"); ++MODULE_ALIAS_CRYPTO("twofish"); ++MODULE_ALIAS_CRYPTO("twofish-asm"); +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h +index 50d033a..a94b82e 100644 +--- a/arch/x86/include/asm/desc.h ++++ b/arch/x86/include/asm/desc.h +@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; + } + +-#define _LDT_empty(info) \ ++/* This intentionally ignores lm, since 32-bit apps don't have that field. */ ++#define LDT_empty(info) \ + ((info)->base_addr == 0 && \ + (info)->limit == 0 && \ + (info)->contents == 0 && \ +@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) + (info)->seg_not_present == 1 && \ + (info)->useable == 0) + +-#ifdef CONFIG_X86_64 +-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) +-#else +-#define LDT_empty(info) (_LDT_empty(info)) +-#endif ++/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */ ++static inline bool LDT_zero(const struct user_desc *info) ++{ ++ return (info->base_addr == 0 && ++ info->limit == 0 && ++ info->contents == 0 && ++ info->read_exec_only == 0 && ++ info->seg_32bit == 0 && ++ info->limit_in_pages == 0 && ++ info->seg_not_present == 0 && ++ info->useable == 0); ++} + + static inline void clear_LDT(void) + { +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c +index 832d05a..317c811 100644 +--- a/arch/x86/kernel/cpu/mshyperv.c ++++ b/arch/x86/kernel/cpu/mshyperv.c +@@ -67,6 +67,7 @@ static struct clocksource hyperv_cs = { + .rating = 400, /* use this when running on Hyperv*/ + .read = read_hv_clock, + .mask = CLOCKSOURCE_MASK(64), ++ .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }; + + static void __init ms_hyperv_init_platform(void) +diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c +index 4e942f3..7fc5e84 100644 +--- a/arch/x86/kernel/tls.c ++++ b/arch/x86/kernel/tls.c +@@ -29,7 +29,28 @@ static int get_free_idx(void) + + static bool tls_desc_okay(const struct user_desc *info) + { +- if (LDT_empty(info)) ++ /* ++ * For historical reasons (i.e. no one ever documented how any ++ * of the segmentation APIs work), user programs can and do ++ * assume that a struct user_desc that's all zeros except for ++ * entry_number means "no segment at all". This never actually ++ * worked. In fact, up to Linux 3.19, a struct user_desc like ++ * this would create a 16-bit read-write segment with base and ++ * limit both equal to zero. ++ * ++ * That was close enough to "no segment at all" until we ++ * hardened this function to disallow 16-bit TLS segments. Fix ++ * it up by interpreting these zeroed segments the way that they ++ * were almost certainly intended to be interpreted. ++ * ++ * The correct way to ask for "no segment at all" is to specify ++ * a user_desc that satisfies LDT_empty. To keep everything ++ * working, we accept both. ++ * ++ * Note that there's a similar kludge in modify_ldt -- look at ++ * the distinction between modes 1 and 0x11. ++ */ ++ if (LDT_empty(info) || LDT_zero(info)) + return true; + + /* +@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx, + cpu = get_cpu(); + + while (n-- > 0) { +- if (LDT_empty(info)) ++ if (LDT_empty(info) || LDT_zero(info)) + desc->a = desc->b = 0; + else + fill_ldt(desc, info); +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index f9d976e..b1d9002 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -365,7 +365,7 @@ exit: + * for scheduling or signal handling. The actual stack switch is done in + * entry.S + */ +-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) ++asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) + { + struct pt_regs *regs = eregs; + /* Did already sync */ +@@ -390,7 +390,7 @@ struct bad_iret_stack { + struct pt_regs regs; + }; + +-asmlinkage __visible ++asmlinkage __visible notrace __kprobes + struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) + { + /* +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c +index de02906..b20bced 100644 +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -618,7 +618,7 @@ static unsigned long quick_pit_calibrate(void) + goto success; + } + } +- pr_err("Fast TSC calibration failed\n"); ++ pr_info("Fast TSC calibration failed\n"); + return 0; + + success: +diff --git a/crypto/842.c b/crypto/842.c +index 65c7a89c..b48f4f1 100644 +--- a/crypto/842.c ++++ b/crypto/842.c +@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("842 Compression Algorithm"); ++MODULE_ALIAS_CRYPTO("842"); +diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c +index fd0d6b4..3dd1011 100644 +--- a/crypto/aes_generic.c ++++ b/crypto/aes_generic.c +@@ -1474,4 +1474,5 @@ module_exit(aes_fini); + + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); + MODULE_LICENSE("Dual BSD/GPL"); +-MODULE_ALIAS("aes"); ++MODULE_ALIAS_CRYPTO("aes"); ++MODULE_ALIAS_CRYPTO("aes-generic"); +diff --git a/crypto/algapi.c b/crypto/algapi.c +index 7a1ae87..00d8d93 100644 +--- a/crypto/algapi.c ++++ b/crypto/algapi.c +@@ -495,8 +495,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name) + + struct crypto_template *crypto_lookup_template(const char *name) + { +- return try_then_request_module(__crypto_lookup_template(name), "%s", +- name); ++ return try_then_request_module(__crypto_lookup_template(name), ++ "crypto-%s", name); + } + EXPORT_SYMBOL_GPL(crypto_lookup_template); + +diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c +index 666f196..6f5bebc 100644 +--- a/crypto/ansi_cprng.c ++++ b/crypto/ansi_cprng.c +@@ -476,4 +476,5 @@ module_param(dbg, int, 0); + MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); + module_init(prng_mod_init); + module_exit(prng_mod_fini); +-MODULE_ALIAS("stdrng"); ++MODULE_ALIAS_CRYPTO("stdrng"); ++MODULE_ALIAS_CRYPTO("ansi_cprng"); +diff --git a/crypto/anubis.c b/crypto/anubis.c +index 008c8a4..4bb187c 100644 +--- a/crypto/anubis.c ++++ b/crypto/anubis.c +@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Anubis Cryptographic Algorithm"); ++MODULE_ALIAS_CRYPTO("anubis"); +diff --git a/crypto/api.c b/crypto/api.c +index a2b39c5..2a81e98 100644 +--- a/crypto/api.c ++++ b/crypto/api.c +@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) + + alg = crypto_alg_lookup(name, type, mask); + if (!alg) { +- request_module("%s", name); ++ request_module("crypto-%s", name); + + if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & + CRYPTO_ALG_NEED_FALLBACK)) +- request_module("%s-all", name); ++ request_module("crypto-%s-all", name); + + alg = crypto_alg_lookup(name, type, mask); + } +diff --git a/crypto/arc4.c b/crypto/arc4.c +index 5a772c3..f1a8192 100644 +--- a/crypto/arc4.c ++++ b/crypto/arc4.c +@@ -166,3 +166,4 @@ module_exit(arc4_exit); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); + MODULE_AUTHOR("Jon Oberheide "); ++MODULE_ALIAS_CRYPTO("arc4"); +diff --git a/crypto/authenc.c b/crypto/authenc.c +index e122355..78fb16c 100644 +--- a/crypto/authenc.c ++++ b/crypto/authenc.c +@@ -721,3 +721,4 @@ module_exit(crypto_authenc_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec"); ++MODULE_ALIAS_CRYPTO("authenc"); +diff --git a/crypto/authencesn.c b/crypto/authencesn.c +index 4be0dd4..024bff2 100644 +--- a/crypto/authencesn.c ++++ b/crypto/authencesn.c +@@ -814,3 +814,4 @@ module_exit(crypto_authenc_esn_module_exit); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Steffen Klassert "); + MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers"); ++MODULE_ALIAS_CRYPTO("authencesn"); +diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c +index 8baf544..87b392a 100644 +--- a/crypto/blowfish_generic.c ++++ b/crypto/blowfish_generic.c +@@ -138,4 +138,5 @@ module_exit(blowfish_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); +-MODULE_ALIAS("blowfish"); ++MODULE_ALIAS_CRYPTO("blowfish"); ++MODULE_ALIAS_CRYPTO("blowfish-generic"); +diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c +index 26bcd7a..a02286b 100644 +--- a/crypto/camellia_generic.c ++++ b/crypto/camellia_generic.c +@@ -1098,4 +1098,5 @@ module_exit(camellia_fini); + + MODULE_DESCRIPTION("Camellia Cipher Algorithm"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("camellia"); ++MODULE_ALIAS_CRYPTO("camellia"); ++MODULE_ALIAS_CRYPTO("camellia-generic"); +diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c +index 5558f63..df5c726 100644 +--- a/crypto/cast5_generic.c ++++ b/crypto/cast5_generic.c +@@ -549,4 +549,5 @@ module_exit(cast5_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Cast5 Cipher Algorithm"); +-MODULE_ALIAS("cast5"); ++MODULE_ALIAS_CRYPTO("cast5"); ++MODULE_ALIAS_CRYPTO("cast5-generic"); +diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c +index de73252..058c8d7 100644 +--- a/crypto/cast6_generic.c ++++ b/crypto/cast6_generic.c +@@ -291,4 +291,5 @@ module_exit(cast6_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Cast6 Cipher Algorithm"); +-MODULE_ALIAS("cast6"); ++MODULE_ALIAS_CRYPTO("cast6"); ++MODULE_ALIAS_CRYPTO("cast6-generic"); +diff --git a/crypto/cbc.c b/crypto/cbc.c +index 61ac42e..780ee27 100644 +--- a/crypto/cbc.c ++++ b/crypto/cbc.c +@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("CBC block cipher algorithm"); ++MODULE_ALIAS_CRYPTO("cbc"); +diff --git a/crypto/ccm.c b/crypto/ccm.c +index 1df8421..003bbbd 100644 +--- a/crypto/ccm.c ++++ b/crypto/ccm.c +@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Counter with CBC MAC"); +-MODULE_ALIAS("ccm_base"); +-MODULE_ALIAS("rfc4309"); ++MODULE_ALIAS_CRYPTO("ccm_base"); ++MODULE_ALIAS_CRYPTO("rfc4309"); ++MODULE_ALIAS_CRYPTO("ccm"); +diff --git a/crypto/chainiv.c b/crypto/chainiv.c +index 834d8dd..22b7e55 100644 +--- a/crypto/chainiv.c ++++ b/crypto/chainiv.c +@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Chain IV Generator"); ++MODULE_ALIAS_CRYPTO("chainiv"); +diff --git a/crypto/cmac.c b/crypto/cmac.c +index 50880cf..7a8bfbd 100644 +--- a/crypto/cmac.c ++++ b/crypto/cmac.c +@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("CMAC keyed hash algorithm"); ++MODULE_ALIAS_CRYPTO("cmac"); +diff --git a/crypto/crc32.c b/crypto/crc32.c +index 9d1c415..187ded2 100644 +--- a/crypto/crc32.c ++++ b/crypto/crc32.c +@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini); + MODULE_AUTHOR("Alexander Boyko "); + MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS_CRYPTO("crc32"); +diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c +index 877e711..c1229614 100644 +--- a/crypto/crct10dif_generic.c ++++ b/crypto/crct10dif_generic.c +@@ -124,4 +124,5 @@ module_exit(crct10dif_mod_fini); + MODULE_AUTHOR("Tim Chen "); + MODULE_DESCRIPTION("T10 DIF CRC calculation."); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("crct10dif"); ++MODULE_ALIAS_CRYPTO("crct10dif"); ++MODULE_ALIAS_CRYPTO("crct10dif-generic"); +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index 7bdd61b..75c415d 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -955,3 +955,4 @@ module_exit(cryptd_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Software async crypto daemon"); ++MODULE_ALIAS_CRYPTO("cryptd"); +diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c +index fee7265..7b39fa3 100644 +--- a/crypto/crypto_null.c ++++ b/crypto/crypto_null.c +@@ -149,9 +149,9 @@ static struct crypto_alg null_algs[3] = { { + .coa_decompress = null_compress } } + } }; + +-MODULE_ALIAS("compress_null"); +-MODULE_ALIAS("digest_null"); +-MODULE_ALIAS("cipher_null"); ++MODULE_ALIAS_CRYPTO("compress_null"); ++MODULE_ALIAS_CRYPTO("digest_null"); ++MODULE_ALIAS_CRYPTO("cipher_null"); + + static int __init crypto_null_mod_init(void) + { +diff --git a/crypto/ctr.c b/crypto/ctr.c +index f2b94f2..2386f73 100644 +--- a/crypto/ctr.c ++++ b/crypto/ctr.c +@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("CTR Counter block mode"); +-MODULE_ALIAS("rfc3686"); ++MODULE_ALIAS_CRYPTO("rfc3686"); ++MODULE_ALIAS_CRYPTO("ctr"); +diff --git a/crypto/cts.c b/crypto/cts.c +index 042223f..60b9da3 100644 +--- a/crypto/cts.c ++++ b/crypto/cts.c +@@ -350,3 +350,4 @@ module_exit(crypto_cts_module_exit); + + MODULE_LICENSE("Dual BSD/GPL"); + MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC"); ++MODULE_ALIAS_CRYPTO("cts"); +diff --git a/crypto/deflate.c b/crypto/deflate.c +index b57d70e..95d8d37 100644 +--- a/crypto/deflate.c ++++ b/crypto/deflate.c +@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP"); + MODULE_AUTHOR("James Morris "); +- ++MODULE_ALIAS_CRYPTO("deflate"); +diff --git a/crypto/des_generic.c b/crypto/des_generic.c +index f6cf63f..3ec6071 100644 +--- a/crypto/des_generic.c ++++ b/crypto/des_generic.c +@@ -971,8 +971,6 @@ static struct crypto_alg des_algs[2] = { { + .cia_decrypt = des3_ede_decrypt } } + } }; + +-MODULE_ALIAS("des3_ede"); +- + static int __init des_generic_mod_init(void) + { + return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs)); +@@ -989,4 +987,7 @@ module_exit(des_generic_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); + MODULE_AUTHOR("Dag Arne Osvik "); +-MODULE_ALIAS("des"); ++MODULE_ALIAS_CRYPTO("des"); ++MODULE_ALIAS_CRYPTO("des-generic"); ++MODULE_ALIAS_CRYPTO("des3_ede"); ++MODULE_ALIAS_CRYPTO("des3_ede-generic"); +diff --git a/crypto/ecb.c b/crypto/ecb.c +index 935cfef..12011af 100644 +--- a/crypto/ecb.c ++++ b/crypto/ecb.c +@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("ECB block cipher algorithm"); ++MODULE_ALIAS_CRYPTO("ecb"); +diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c +index 42ce9f5..388f582 100644 +--- a/crypto/eseqiv.c ++++ b/crypto/eseqiv.c +@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator"); ++MODULE_ALIAS_CRYPTO("eseqiv"); +diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c +index 021d7fe..77286ea 100644 +--- a/crypto/fcrypt.c ++++ b/crypto/fcrypt.c +@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini); + MODULE_LICENSE("Dual BSD/GPL"); + MODULE_DESCRIPTION("FCrypt Cipher Algorithm"); + MODULE_AUTHOR("David Howells "); ++MODULE_ALIAS_CRYPTO("fcrypt"); +diff --git a/crypto/gcm.c b/crypto/gcm.c +index b4f0179..9cea4d0 100644 +--- a/crypto/gcm.c ++++ b/crypto/gcm.c +@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Galois/Counter Mode"); + MODULE_AUTHOR("Mikko Herranen "); +-MODULE_ALIAS("gcm_base"); +-MODULE_ALIAS("rfc4106"); +-MODULE_ALIAS("rfc4543"); ++MODULE_ALIAS_CRYPTO("gcm_base"); ++MODULE_ALIAS_CRYPTO("rfc4106"); ++MODULE_ALIAS_CRYPTO("rfc4543"); ++MODULE_ALIAS_CRYPTO("gcm"); +diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c +index 9d3f0c6..bac7099 100644 +--- a/crypto/ghash-generic.c ++++ b/crypto/ghash-generic.c +@@ -172,4 +172,5 @@ module_exit(ghash_mod_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); +-MODULE_ALIAS("ghash"); ++MODULE_ALIAS_CRYPTO("ghash"); ++MODULE_ALIAS_CRYPTO("ghash-generic"); +diff --git a/crypto/hmac.c b/crypto/hmac.c +index 8d9544c..ade790b 100644 +--- a/crypto/hmac.c ++++ b/crypto/hmac.c +@@ -271,3 +271,4 @@ module_exit(hmac_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("HMAC hash algorithm"); ++MODULE_ALIAS_CRYPTO("hmac"); +diff --git a/crypto/khazad.c b/crypto/khazad.c +index 60e7cd6..873eb5d 100644 +--- a/crypto/khazad.c ++++ b/crypto/khazad.c +@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Khazad Cryptographic Algorithm"); ++MODULE_ALIAS_CRYPTO("khazad"); +diff --git a/crypto/krng.c b/crypto/krng.c +index a2d2b72..0224841 100644 +--- a/crypto/krng.c ++++ b/crypto/krng.c +@@ -62,4 +62,5 @@ module_exit(krng_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Kernel Random Number Generator"); +-MODULE_ALIAS("stdrng"); ++MODULE_ALIAS_CRYPTO("stdrng"); ++MODULE_ALIAS_CRYPTO("krng"); +diff --git a/crypto/lrw.c b/crypto/lrw.c +index ba42acc..6f9908a 100644 +--- a/crypto/lrw.c ++++ b/crypto/lrw.c +@@ -400,3 +400,4 @@ module_exit(crypto_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("LRW block cipher mode"); ++MODULE_ALIAS_CRYPTO("lrw"); +diff --git a/crypto/lz4.c b/crypto/lz4.c +index 4586dd1..53279ab 100644 +--- a/crypto/lz4.c ++++ b/crypto/lz4.c +@@ -104,3 +104,4 @@ module_exit(lz4_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("LZ4 Compression Algorithm"); ++MODULE_ALIAS_CRYPTO("lz4"); +diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c +index 151ba31..eaec5fa 100644 +--- a/crypto/lz4hc.c ++++ b/crypto/lz4hc.c +@@ -104,3 +104,4 @@ module_exit(lz4hc_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("LZ4HC Compression Algorithm"); ++MODULE_ALIAS_CRYPTO("lz4hc"); +diff --git a/crypto/lzo.c b/crypto/lzo.c +index 1c2aa69..d1ff694 100644 +--- a/crypto/lzo.c ++++ b/crypto/lzo.c +@@ -103,3 +103,4 @@ module_exit(lzo_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("LZO Compression Algorithm"); ++MODULE_ALIAS_CRYPTO("lzo"); +diff --git a/crypto/md4.c b/crypto/md4.c +index 0477a6a..3515af4 100644 +--- a/crypto/md4.c ++++ b/crypto/md4.c +@@ -255,4 +255,4 @@ module_exit(md4_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("MD4 Message Digest Algorithm"); +- ++MODULE_ALIAS_CRYPTO("md4"); +diff --git a/crypto/md5.c b/crypto/md5.c +index 7febeaa..36f5e5b 100644 +--- a/crypto/md5.c ++++ b/crypto/md5.c +@@ -168,3 +168,4 @@ module_exit(md5_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("MD5 Message Digest Algorithm"); ++MODULE_ALIAS_CRYPTO("md5"); +diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c +index 079b761..46195e0 100644 +--- a/crypto/michael_mic.c ++++ b/crypto/michael_mic.c +@@ -184,3 +184,4 @@ module_exit(michael_mic_exit); + MODULE_LICENSE("GPL v2"); + MODULE_DESCRIPTION("Michael MIC"); + MODULE_AUTHOR("Jouni Malinen "); ++MODULE_ALIAS_CRYPTO("michael_mic"); +diff --git a/crypto/pcbc.c b/crypto/pcbc.c +index d1b8bdf..f654965 100644 +--- a/crypto/pcbc.c ++++ b/crypto/pcbc.c +@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("PCBC block cipher algorithm"); ++MODULE_ALIAS_CRYPTO("pcbc"); +diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c +index 309d345..c305d41 100644 +--- a/crypto/pcrypt.c ++++ b/crypto/pcrypt.c +@@ -565,3 +565,4 @@ module_exit(pcrypt_exit); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Steffen Klassert "); + MODULE_DESCRIPTION("Parallel crypto wrapper"); ++MODULE_ALIAS_CRYPTO("pcrypt"); +diff --git a/crypto/rmd128.c b/crypto/rmd128.c +index 8a0f68b..049486e 100644 +--- a/crypto/rmd128.c ++++ b/crypto/rmd128.c +@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Adrian-Ken Rueegsegger "); + MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); ++MODULE_ALIAS_CRYPTO("rmd128"); +diff --git a/crypto/rmd160.c b/crypto/rmd160.c +index 525d7bb..de585e5 100644 +--- a/crypto/rmd160.c ++++ b/crypto/rmd160.c +@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Adrian-Ken Rueegsegger "); + MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); ++MODULE_ALIAS_CRYPTO("rmd160"); +diff --git a/crypto/rmd256.c b/crypto/rmd256.c +index 69293d9..4ec02a7 100644 +--- a/crypto/rmd256.c ++++ b/crypto/rmd256.c +@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Adrian-Ken Rueegsegger "); + MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); ++MODULE_ALIAS_CRYPTO("rmd256"); +diff --git a/crypto/rmd320.c b/crypto/rmd320.c +index 09f97df..770f2cb 100644 +--- a/crypto/rmd320.c ++++ b/crypto/rmd320.c +@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Adrian-Ken Rueegsegger "); + MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); ++MODULE_ALIAS_CRYPTO("rmd320"); +diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c +index 9a4770c..f550b5d 100644 +--- a/crypto/salsa20_generic.c ++++ b/crypto/salsa20_generic.c +@@ -248,4 +248,5 @@ module_exit(salsa20_generic_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm"); +-MODULE_ALIAS("salsa20"); ++MODULE_ALIAS_CRYPTO("salsa20"); ++MODULE_ALIAS_CRYPTO("salsa20-generic"); +diff --git a/crypto/seed.c b/crypto/seed.c +index 9c904d6..c6ba843 100644 +--- a/crypto/seed.c ++++ b/crypto/seed.c +@@ -476,3 +476,4 @@ module_exit(seed_fini); + MODULE_DESCRIPTION("SEED Cipher Algorithm"); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Hye-Shik Chang , Kim Hyun "); ++MODULE_ALIAS_CRYPTO("seed"); +diff --git a/crypto/seqiv.c b/crypto/seqiv.c +index f2cba4ed..49a4069 100644 +--- a/crypto/seqiv.c ++++ b/crypto/seqiv.c +@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Sequence Number IV Generator"); ++MODULE_ALIAS_CRYPTO("seqiv"); +diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c +index 7ddbd7e..94970a7 100644 +--- a/crypto/serpent_generic.c ++++ b/crypto/serpent_generic.c +@@ -665,5 +665,6 @@ module_exit(serpent_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm"); + MODULE_AUTHOR("Dag Arne Osvik "); +-MODULE_ALIAS("tnepres"); +-MODULE_ALIAS("serpent"); ++MODULE_ALIAS_CRYPTO("tnepres"); ++MODULE_ALIAS_CRYPTO("serpent"); ++MODULE_ALIAS_CRYPTO("serpent-generic"); +diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c +index 4279480..fdf7c00 100644 +--- a/crypto/sha1_generic.c ++++ b/crypto/sha1_generic.c +@@ -153,4 +153,5 @@ module_exit(sha1_generic_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); + +-MODULE_ALIAS("sha1"); ++MODULE_ALIAS_CRYPTO("sha1"); ++MODULE_ALIAS_CRYPTO("sha1-generic"); +diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c +index 5433667..136381b 100644 +--- a/crypto/sha256_generic.c ++++ b/crypto/sha256_generic.c +@@ -384,5 +384,7 @@ module_exit(sha256_generic_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); + +-MODULE_ALIAS("sha224"); +-MODULE_ALIAS("sha256"); ++MODULE_ALIAS_CRYPTO("sha224"); ++MODULE_ALIAS_CRYPTO("sha224-generic"); ++MODULE_ALIAS_CRYPTO("sha256"); ++MODULE_ALIAS_CRYPTO("sha256-generic"); +diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c +index 6ed124f..6c6d901 100644 +--- a/crypto/sha512_generic.c ++++ b/crypto/sha512_generic.c +@@ -287,5 +287,7 @@ module_exit(sha512_generic_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms"); + +-MODULE_ALIAS("sha384"); +-MODULE_ALIAS("sha512"); ++MODULE_ALIAS_CRYPTO("sha384"); ++MODULE_ALIAS_CRYPTO("sha384-generic"); ++MODULE_ALIAS_CRYPTO("sha512"); ++MODULE_ALIAS_CRYPTO("sha512-generic"); +diff --git a/crypto/tea.c b/crypto/tea.c +index 0a57232..b70b441 100644 +--- a/crypto/tea.c ++++ b/crypto/tea.c +@@ -270,8 +270,9 @@ static void __exit tea_mod_fini(void) + crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs)); + } + +-MODULE_ALIAS("xtea"); +-MODULE_ALIAS("xeta"); ++MODULE_ALIAS_CRYPTO("tea"); ++MODULE_ALIAS_CRYPTO("xtea"); ++MODULE_ALIAS_CRYPTO("xeta"); + + module_init(tea_mod_init); + module_exit(tea_mod_fini); +diff --git a/crypto/tgr192.c b/crypto/tgr192.c +index 8740355..f7ed2fb 100644 +--- a/crypto/tgr192.c ++++ b/crypto/tgr192.c +@@ -676,8 +676,9 @@ static void __exit tgr192_mod_fini(void) + crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs)); + } + +-MODULE_ALIAS("tgr160"); +-MODULE_ALIAS("tgr128"); ++MODULE_ALIAS_CRYPTO("tgr192"); ++MODULE_ALIAS_CRYPTO("tgr160"); ++MODULE_ALIAS_CRYPTO("tgr128"); + + module_init(tgr192_mod_init); + module_exit(tgr192_mod_fini); +diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c +index 2d50005..ebf7a3e 100644 +--- a/crypto/twofish_generic.c ++++ b/crypto/twofish_generic.c +@@ -211,4 +211,5 @@ module_exit(twofish_mod_fini); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); +-MODULE_ALIAS("twofish"); ++MODULE_ALIAS_CRYPTO("twofish"); ++MODULE_ALIAS_CRYPTO("twofish-generic"); +diff --git a/crypto/vmac.c b/crypto/vmac.c +index 2eb11a3..bf2d3a8 100644 +--- a/crypto/vmac.c ++++ b/crypto/vmac.c +@@ -713,3 +713,4 @@ module_exit(vmac_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("VMAC hash algorithm"); ++MODULE_ALIAS_CRYPTO("vmac"); +diff --git a/crypto/wp512.c b/crypto/wp512.c +index 180f1d6..253db94 100644 +--- a/crypto/wp512.c ++++ b/crypto/wp512.c +@@ -1167,8 +1167,9 @@ static void __exit wp512_mod_fini(void) + crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs)); + } + +-MODULE_ALIAS("wp384"); +-MODULE_ALIAS("wp256"); ++MODULE_ALIAS_CRYPTO("wp512"); ++MODULE_ALIAS_CRYPTO("wp384"); ++MODULE_ALIAS_CRYPTO("wp256"); + + module_init(wp512_mod_init); + module_exit(wp512_mod_fini); +diff --git a/crypto/xcbc.c b/crypto/xcbc.c +index a5fbdf3..df90b33 100644 +--- a/crypto/xcbc.c ++++ b/crypto/xcbc.c +@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("XCBC keyed hash algorithm"); ++MODULE_ALIAS_CRYPTO("xcbc"); +diff --git a/crypto/xts.c b/crypto/xts.c +index ca1608f..f6fd43f 100644 +--- a/crypto/xts.c ++++ b/crypto/xts.c +@@ -362,3 +362,4 @@ module_exit(crypto_module_exit); + + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("XTS block cipher mode"); ++MODULE_ALIAS_CRYPTO("xts"); +diff --git a/crypto/zlib.c b/crypto/zlib.c +index 06b62e5..d980788 100644 +--- a/crypto/zlib.c ++++ b/crypto/zlib.c +@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini); + MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Zlib Compression Algorithm"); + MODULE_AUTHOR("Sony Corporation"); ++MODULE_ALIAS_CRYPTO("zlib"); +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c +index 37acda6..136803c 100644 +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap) + DPRINTK("ENTER\n"); + + cancel_delayed_work_sync(&ap->sff_pio_task); ++ ++ /* ++ * We wanna reset the HSM state to IDLE. If we do so without ++ * grabbing the port lock, critical sections protected by it which ++ * expect the HSM state to stay stable may get surprised. For ++ * example, we may set IDLE in between the time ++ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls ++ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG(). ++ */ ++ spin_lock_irq(ap->lock); + ap->hsm_task_state = HSM_ST_IDLE; ++ spin_unlock_irq(ap->lock); ++ + ap->sff_pio_task_link = NULL; + + if (ata_msg_ctl(ap)) +diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c +index 523524b..f71e09d 100644 +--- a/drivers/ata/sata_dwc_460ex.c ++++ b/drivers/ata/sata_dwc_460ex.c +@@ -799,7 +799,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) + if (err) { + dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" + " %d\n", __func__, err); +- goto error_out; ++ return err; + } + + /* Enabe DMA */ +@@ -810,11 +810,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) + sata_dma_regs); + + return 0; +- +-error_out: +- dma_dwc_exit(hsdev); +- +- return err; + } + + static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +@@ -1664,7 +1659,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) + char *ver = (char *)&versionr; + u8 *base = NULL; + int err = 0; +- int irq, rc; ++ int irq; + struct ata_host *host; + struct ata_port_info pi = sata_dwc_port_info[0]; + const struct ata_port_info *ppi[] = { &pi, NULL }; +@@ -1727,7 +1722,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) + if (irq == NO_IRQ) { + dev_err(&ofdev->dev, "no SATA DMA irq\n"); + err = -ENODEV; +- goto error_out; ++ goto error_iomap; + } + + /* Get physical SATA DMA register base address */ +@@ -1736,14 +1731,16 @@ static int sata_dwc_probe(struct platform_device *ofdev) + dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" + " address\n"); + err = -ENODEV; +- goto error_out; ++ goto error_iomap; + } + + /* Save dev for later use in dev_xxx() routines */ + host_pvt.dwc_dev = &ofdev->dev; + + /* Initialize AHB DMAC */ +- dma_dwc_init(hsdev, irq); ++ err = dma_dwc_init(hsdev, irq); ++ if (err) ++ goto error_dma_iomap; + + /* Enable SATA Interrupts */ + sata_dwc_enable_interrupts(hsdev); +@@ -1761,9 +1758,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) + * device discovery process, invoking our port_start() handler & + * error_handler() to execute a dummy Softreset EH session + */ +- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); +- +- if (rc != 0) ++ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); ++ if (err) + dev_err(&ofdev->dev, "failed to activate host"); + + dev_set_drvdata(&ofdev->dev, host); +@@ -1772,7 +1768,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) + error_out: + /* Free SATA DMA resources */ + dma_dwc_exit(hsdev); +- ++error_dma_iomap: ++ iounmap((void __iomem *)host_pvt.sata_dma_regs); + error_iomap: + iounmap(base); + error_kmalloc: +@@ -1793,6 +1790,7 @@ static int sata_dwc_remove(struct platform_device *ofdev) + /* Free SATA DMA resources */ + dma_dwc_exit(hsdev); + ++ iounmap((void __iomem *)host_pvt.sata_dma_regs); + iounmap(hsdev->reg_base); + kfree(hsdev); + kfree(host); +diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c +index 104a040..6efdbea 100644 +--- a/drivers/block/drbd/drbd_req.c ++++ b/drivers/block/drbd/drbd_req.c +@@ -1310,6 +1310,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct + struct request_queue * const b = + mdev->ldev->backing_bdev->bd_disk->queue; + if (b->merge_bvec_fn) { ++ bvm->bi_bdev = mdev->ldev->backing_bdev; + backing_limit = b->merge_bvec_fn(b, bvm, bvec); + limit = min(limit, backing_limit); + } +diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c +index 372ae72..e990dee 100644 +--- a/drivers/bus/mvebu-mbus.c ++++ b/drivers/bus/mvebu-mbus.c +@@ -181,12 +181,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus, + } + + /* Checks whether the given window number is available */ ++ ++/* On Armada XP, 375 and 38x the MBus window 13 has the remap ++ * capability, like windows 0 to 7. However, the mvebu-mbus driver ++ * isn't currently taking into account this special case, which means ++ * that when window 13 is actually used, the remap registers are left ++ * to 0, making the device using this MBus window unavailable. The ++ * quick fix for stable is to not use window 13. A follow up patch ++ * will correctly handle this window. ++*/ + static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus, + const int win) + { + void __iomem *addr = mbus->mbuswins_base + + mbus->soc->win_cfg_offset(win); + u32 ctrl = readl(addr + WIN_CTRL_OFF); ++ ++ if (win == 13) ++ return false; ++ + return !(ctrl & WIN_CTRL_ENABLE); + } + +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c +index e252939..831b482 100644 +--- a/drivers/clocksource/exynos_mct.c ++++ b/drivers/clocksource/exynos_mct.c +@@ -98,8 +98,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset) + __raw_writel(value, reg_base + offset); + + if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { +- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; +- switch (offset & EXYNOS4_MCT_L_MASK) { ++ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; ++ switch (offset & ~EXYNOS4_MCT_L_MASK) { + case MCT_L_TCON_OFFSET: + mask = 1 << 3; /* L_TCON write status */ + break; +diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c +index 633ba94..c178ed8 100644 +--- a/drivers/crypto/padlock-aes.c ++++ b/drivers/crypto/padlock-aes.c +@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Michal Ludvig"); + +-MODULE_ALIAS("aes"); ++MODULE_ALIAS_CRYPTO("aes"); +diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c +index 9266c0e..93d7753 100644 +--- a/drivers/crypto/padlock-sha.c ++++ b/drivers/crypto/padlock-sha.c +@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Michal Ludvig"); + +-MODULE_ALIAS("sha1-all"); +-MODULE_ALIAS("sha256-all"); +-MODULE_ALIAS("sha1-padlock"); +-MODULE_ALIAS("sha256-padlock"); ++MODULE_ALIAS_CRYPTO("sha1-all"); ++MODULE_ALIAS_CRYPTO("sha256-all"); ++MODULE_ALIAS_CRYPTO("sha1-padlock"); ++MODULE_ALIAS_CRYPTO("sha256-padlock"); +diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c +index 92105f3..e4cea7c 100644 +--- a/drivers/crypto/ux500/cryp/cryp_core.c ++++ b/drivers/crypto/ux500/cryp/cryp_core.c +@@ -1810,7 +1810,7 @@ module_exit(ux500_cryp_mod_fini); + module_param(cryp_mode, int, 0); + + MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine."); +-MODULE_ALIAS("aes-all"); +-MODULE_ALIAS("des-all"); ++MODULE_ALIAS_CRYPTO("aes-all"); ++MODULE_ALIAS_CRYPTO("des-all"); + + MODULE_LICENSE("GPL"); +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c +index 1c73f4f..8e5e018 100644 +--- a/drivers/crypto/ux500/hash/hash_core.c ++++ b/drivers/crypto/ux500/hash/hash_core.c +@@ -1995,7 +1995,7 @@ module_exit(ux500_hash_mod_fini); + MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine."); + MODULE_LICENSE("GPL"); + +-MODULE_ALIAS("sha1-all"); +-MODULE_ALIAS("sha256-all"); +-MODULE_ALIAS("hmac-sha1-all"); +-MODULE_ALIAS("hmac-sha256-all"); ++MODULE_ALIAS_CRYPTO("sha1-all"); ++MODULE_ALIAS_CRYPTO("sha256-all"); ++MODULE_ALIAS_CRYPTO("hmac-sha1-all"); ++MODULE_ALIAS_CRYPTO("hmac-sha256-all"); +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index 5b88c83..ccbffd0 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -408,7 +408,7 @@ static ssize_t gpio_value_store(struct device *dev, + return status; + } + +-static const DEVICE_ATTR(value, 0644, ++static DEVICE_ATTR(value, 0644, + gpio_value_show, gpio_value_store); + + static irqreturn_t gpio_sysfs_irq(int irq, void *priv) +@@ -633,18 +633,16 @@ static ssize_t gpio_active_low_store(struct device *dev, + return status ? : size; + } + +-static const DEVICE_ATTR(active_low, 0644, ++static DEVICE_ATTR(active_low, 0644, + gpio_active_low_show, gpio_active_low_store); + +-static const struct attribute *gpio_attrs[] = { ++static struct attribute *gpio_attrs[] = { + &dev_attr_value.attr, + &dev_attr_active_low.attr, + NULL, + }; + +-static const struct attribute_group gpio_attr_group = { +- .attrs = (struct attribute **) gpio_attrs, +-}; ++ATTRIBUTE_GROUPS(gpio); + + /* + * /sys/class/gpio/gpiochipN/ +@@ -680,16 +678,13 @@ static ssize_t chip_ngpio_show(struct device *dev, + } + static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); + +-static const struct attribute *gpiochip_attrs[] = { ++static struct attribute *gpiochip_attrs[] = { + &dev_attr_base.attr, + &dev_attr_label.attr, + &dev_attr_ngpio.attr, + NULL, + }; +- +-static const struct attribute_group gpiochip_attr_group = { +- .attrs = (struct attribute **) gpiochip_attrs, +-}; ++ATTRIBUTE_GROUPS(gpiochip); + + /* + * /sys/class/gpio/export ... write-only +@@ -844,18 +839,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) + if (desc->chip->names && desc->chip->names[offset]) + ioname = desc->chip->names[offset]; + +- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), +- desc, ioname ? ioname : "gpio%u", +- desc_to_gpio(desc)); ++ dev = device_create_with_groups(&gpio_class, desc->chip->dev, ++ MKDEV(0, 0), desc, gpio_groups, ++ ioname ? ioname : "gpio%u", ++ desc_to_gpio(desc)); + if (IS_ERR(dev)) { + status = PTR_ERR(dev); + goto fail_unlock; + } + +- status = sysfs_create_group(&dev->kobj, &gpio_attr_group); +- if (status) +- goto fail_unregister_device; +- + if (direction_may_change) { + status = device_create_file(dev, &dev_attr_direction); + if (status) +@@ -866,13 +858,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) + !test_bit(FLAG_IS_OUT, &desc->flags))) { + status = device_create_file(dev, &dev_attr_edge); + if (status) +- goto fail_unregister_device; ++ goto fail_remove_attr_direction; + } + + set_bit(FLAG_EXPORT, &desc->flags); + mutex_unlock(&sysfs_lock); + return 0; + ++fail_remove_attr_direction: ++ device_remove_file(dev, &dev_attr_direction); + fail_unregister_device: + device_unregister(dev); + fail_unlock: +@@ -1006,6 +1000,8 @@ void gpiod_unexport(struct gpio_desc *desc) + mutex_unlock(&sysfs_lock); + + if (dev) { ++ device_remove_file(dev, &dev_attr_edge); ++ device_remove_file(dev, &dev_attr_direction); + device_unregister(dev); + put_device(dev); + } +@@ -1030,13 +1026,13 @@ static int gpiochip_export(struct gpio_chip *chip) + + /* use chip->base for the ID; it's already known to be unique */ + mutex_lock(&sysfs_lock); +- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, +- "gpiochip%d", chip->base); +- if (!IS_ERR(dev)) { +- status = sysfs_create_group(&dev->kobj, +- &gpiochip_attr_group); +- } else ++ dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0), ++ chip, gpiochip_groups, ++ "gpiochip%d", chip->base); ++ if (IS_ERR(dev)) + status = PTR_ERR(dev); ++ else ++ status = 0; + chip->exported = (status == 0); + mutex_unlock(&sysfs_lock); + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 7410a50..3153eab 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -4978,7 +4978,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) + if (!mutex_is_locked(mutex)) + return false; + +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) ++#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) + return mutex->owner == task; + #else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ +diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c +index 5600d4c..64d6cfb 100644 +--- a/drivers/gpu/drm/radeon/radeon_asic.c ++++ b/drivers/gpu/drm/radeon/radeon_asic.c +@@ -335,6 +335,20 @@ static struct radeon_asic_ring r300_gfx_ring = { + .set_wptr = &r100_gfx_set_wptr, + }; + ++static struct radeon_asic_ring rv515_gfx_ring = { ++ .ib_execute = &r100_ring_ib_execute, ++ .emit_fence = &r300_fence_ring_emit, ++ .emit_semaphore = &r100_semaphore_ring_emit, ++ .cs_parse = &r300_cs_parse, ++ .ring_start = &rv515_ring_start, ++ .ring_test = &r100_ring_test, ++ .ib_test = &r100_ib_test, ++ .is_lockup = &r100_gpu_is_lockup, ++ .get_rptr = &r100_gfx_get_rptr, ++ .get_wptr = &r100_gfx_get_wptr, ++ .set_wptr = &r100_gfx_set_wptr, ++}; ++ + static struct radeon_asic r300_asic = { + .init = &r300_init, + .fini = &r300_fini, +@@ -756,7 +770,7 @@ static struct radeon_asic rv515_asic = { + .set_page = &rv370_pcie_gart_set_page, + }, + .ring = { +- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring ++ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring + }, + .irq = { + .set = &rs600_irq_set, +@@ -823,7 +837,7 @@ static struct radeon_asic r520_asic = { + .set_page = &rv370_pcie_gart_set_page, + }, + .ring = { +- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring ++ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring + }, + .irq = { + .set = &rs600_irq_set, +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c +index cfb513f..0095ee7 100644 +--- a/drivers/gpu/drm/radeon/radeon_pm.c ++++ b/drivers/gpu/drm/radeon/radeon_pm.c +@@ -1260,8 +1260,39 @@ dpm_failed: + return ret; + } + ++struct radeon_dpm_quirk { ++ u32 chip_vendor; ++ u32 chip_device; ++ u32 subsys_vendor; ++ u32 subsys_device; ++}; ++ ++/* cards with dpm stability problems */ ++static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { ++ /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ ++ { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, ++ /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ ++ { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, ++ { 0, 0, 0, 0 }, ++}; ++ + int radeon_pm_init(struct radeon_device *rdev) + { ++ struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; ++ bool disable_dpm = false; ++ ++ /* Apply dpm quirks */ ++ while (p && p->chip_device != 0) { ++ if (rdev->pdev->vendor == p->chip_vendor && ++ rdev->pdev->device == p->chip_device && ++ rdev->pdev->subsystem_vendor == p->subsys_vendor && ++ rdev->pdev->subsystem_device == p->subsys_device) { ++ disable_dpm = true; ++ break; ++ } ++ ++p; ++ } ++ + /* enable dpm on rv6xx+ */ + switch (rdev->family) { + case CHIP_RV610: +@@ -1316,6 +1347,8 @@ int radeon_pm_init(struct radeon_device *rdev) + (!(rdev->flags & RADEON_IS_IGP)) && + (!rdev->smc_fw)) + rdev->pm.pm_method = PM_METHOD_PROFILE; ++ else if (disable_dpm && (radeon_dpm == -1)) ++ rdev->pm.pm_method = PM_METHOD_PROFILE; + else if (radeon_dpm == 0) + rdev->pm.pm_method = PM_METHOD_PROFILE; + else +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c +index 879e628..35bf2bb 100644 +--- a/drivers/gpu/drm/radeon/si_dpm.c ++++ b/drivers/gpu/drm/radeon/si_dpm.c +@@ -2900,6 +2900,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev) + return ret; + } + ++struct si_dpm_quirk { ++ u32 chip_vendor; ++ u32 chip_device; ++ u32 subsys_vendor; ++ u32 subsys_device; ++ u32 max_sclk; ++ u32 max_mclk; ++}; ++ ++/* cards with dpm stability problems */ ++static struct si_dpm_quirk si_dpm_quirk_list[] = { ++ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ ++ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, ++ { 0, 0, 0, 0 }, ++}; ++ + static void si_apply_state_adjust_rules(struct radeon_device *rdev, + struct radeon_ps *rps) + { +@@ -2910,7 +2926,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, + u32 mclk, sclk; + u16 vddc, vddci; + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; ++ u32 max_sclk = 0, max_mclk = 0; + int i; ++ struct si_dpm_quirk *p = si_dpm_quirk_list; ++ ++ /* Apply dpm quirks */ ++ while (p && p->chip_device != 0) { ++ if (rdev->pdev->vendor == p->chip_vendor && ++ rdev->pdev->device == p->chip_device && ++ rdev->pdev->subsystem_vendor == p->subsys_vendor && ++ rdev->pdev->subsystem_device == p->subsys_device) { ++ max_sclk = p->max_sclk; ++ max_mclk = p->max_mclk; ++ break; ++ } ++ ++p; ++ } + + if ((rdev->pm.dpm.new_active_crtc_count > 1) || + ni_dpm_vblank_too_short(rdev)) +@@ -2964,6 +2995,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, + if (ps->performance_levels[i].mclk > max_mclk_vddc) + ps->performance_levels[i].mclk = max_mclk_vddc; + } ++ if (max_mclk) { ++ if (ps->performance_levels[i].mclk > max_mclk) ++ ps->performance_levels[i].mclk = max_mclk; ++ } ++ if (max_sclk) { ++ if (ps->performance_levels[i].sclk > max_sclk) ++ ps->performance_levels[i].sclk = max_sclk; ++ } + } + + /* XXX validate the min clocks required for display */ +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c +index ce953d8..fb787c3 100644 +--- a/drivers/input/evdev.c ++++ b/drivers/input/evdev.c +@@ -757,20 +757,23 @@ static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p) + */ + static int evdev_handle_get_val(struct evdev_client *client, + struct input_dev *dev, unsigned int type, +- unsigned long *bits, unsigned int max, +- unsigned int size, void __user *p, int compat) ++ unsigned long *bits, unsigned int maxbit, ++ unsigned int maxlen, void __user *p, ++ int compat) + { + int ret; + unsigned long *mem; ++ size_t len; + +- mem = kmalloc(sizeof(unsigned long) * max, GFP_KERNEL); ++ len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long); ++ mem = kmalloc(len, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + spin_lock_irq(&dev->event_lock); + spin_lock(&client->buffer_lock); + +- memcpy(mem, bits, sizeof(unsigned long) * max); ++ memcpy(mem, bits, len); + + spin_unlock(&dev->event_lock); + +@@ -778,7 +781,7 @@ static int evdev_handle_get_val(struct evdev_client *client, + + spin_unlock_irq(&client->buffer_lock); + +- ret = bits_to_user(mem, max, size, p, compat); ++ ret = bits_to_user(mem, maxbit, maxlen, p, compat); + if (ret < 0) + evdev_queue_syn_dropped(client); + +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c +index a87d3fa..d290e83 100644 +--- a/drivers/md/dm-cache-metadata.c ++++ b/drivers/md/dm-cache-metadata.c +@@ -94,6 +94,9 @@ struct cache_disk_superblock { + } __packed; + + struct dm_cache_metadata { ++ atomic_t ref_count; ++ struct list_head list; ++ + struct block_device *bdev; + struct dm_block_manager *bm; + struct dm_space_map *metadata_sm; +@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags) + + /*----------------------------------------------------------------*/ + +-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, +- sector_t data_block_size, +- bool may_format_device, +- size_t policy_hint_size) ++static struct dm_cache_metadata *metadata_open(struct block_device *bdev, ++ sector_t data_block_size, ++ bool may_format_device, ++ size_t policy_hint_size) + { + int r; + struct dm_cache_metadata *cmd; +@@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, + return NULL; + } + ++ atomic_set(&cmd->ref_count, 1); + init_rwsem(&cmd->root_lock); + cmd->bdev = bdev; + cmd->data_block_size = data_block_size; +@@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, + return cmd; + } + ++/* ++ * We keep a little list of ref counted metadata objects to prevent two ++ * different target instances creating separate bufio instances. This is ++ * an issue if a table is reloaded before the suspend. ++ */ ++static DEFINE_MUTEX(table_lock); ++static LIST_HEAD(table); ++ ++static struct dm_cache_metadata *lookup(struct block_device *bdev) ++{ ++ struct dm_cache_metadata *cmd; ++ ++ list_for_each_entry(cmd, &table, list) ++ if (cmd->bdev == bdev) { ++ atomic_inc(&cmd->ref_count); ++ return cmd; ++ } ++ ++ return NULL; ++} ++ ++static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, ++ sector_t data_block_size, ++ bool may_format_device, ++ size_t policy_hint_size) ++{ ++ struct dm_cache_metadata *cmd, *cmd2; ++ ++ mutex_lock(&table_lock); ++ cmd = lookup(bdev); ++ mutex_unlock(&table_lock); ++ ++ if (cmd) ++ return cmd; ++ ++ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); ++ if (cmd) { ++ mutex_lock(&table_lock); ++ cmd2 = lookup(bdev); ++ if (cmd2) { ++ mutex_unlock(&table_lock); ++ __destroy_persistent_data_objects(cmd); ++ kfree(cmd); ++ return cmd2; ++ } ++ list_add(&cmd->list, &table); ++ mutex_unlock(&table_lock); ++ } ++ ++ return cmd; ++} ++ ++static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size) ++{ ++ if (cmd->data_block_size != data_block_size) { ++ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n", ++ (unsigned long long) data_block_size, ++ (unsigned long long) cmd->data_block_size); ++ return false; ++ } ++ ++ return true; ++} ++ ++struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, ++ sector_t data_block_size, ++ bool may_format_device, ++ size_t policy_hint_size) ++{ ++ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, ++ may_format_device, policy_hint_size); ++ if (cmd && !same_params(cmd, data_block_size)) { ++ dm_cache_metadata_close(cmd); ++ return NULL; ++ } ++ ++ return cmd; ++} ++ + void dm_cache_metadata_close(struct dm_cache_metadata *cmd) + { +- __destroy_persistent_data_objects(cmd); +- kfree(cmd); ++ if (atomic_dec_and_test(&cmd->ref_count)) { ++ mutex_lock(&table_lock); ++ list_del(&cmd->list); ++ mutex_unlock(&table_lock); ++ ++ __destroy_persistent_data_objects(cmd); ++ kfree(cmd); ++ } + } + + /* +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c +index ff284b7..c10dec0 100644 +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -222,7 +222,13 @@ struct cache { + struct list_head need_commit_migrations; + sector_t migration_threshold; + wait_queue_head_t migration_wait; +- atomic_t nr_migrations; ++ atomic_t nr_allocated_migrations; ++ ++ /* ++ * The number of in flight migrations that are performing ++ * background io. eg, promotion, writeback. ++ */ ++ atomic_t nr_io_migrations; + + wait_queue_head_t quiescing_wait; + atomic_t quiescing; +@@ -259,7 +265,6 @@ struct cache { + struct dm_deferred_set *all_io_ds; + + mempool_t *migration_pool; +- struct dm_cache_migration *next_migration; + + struct dm_cache_policy *policy; + unsigned policy_nr_args; +@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel + dm_bio_prison_free_cell(cache->prison, cell); + } + ++static struct dm_cache_migration *alloc_migration(struct cache *cache) ++{ ++ struct dm_cache_migration *mg; ++ ++ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); ++ if (mg) { ++ mg->cache = cache; ++ atomic_inc(&mg->cache->nr_allocated_migrations); ++ } ++ ++ return mg; ++} ++ ++static void free_migration(struct dm_cache_migration *mg) ++{ ++ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations)) ++ wake_up(&mg->cache->migration_wait); ++ ++ mempool_free(mg, mg->cache->migration_pool); ++} ++ + static int prealloc_data_structs(struct cache *cache, struct prealloc *p) + { + if (!p->mg) { +- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); ++ p->mg = alloc_migration(cache); + if (!p->mg) + return -ENOMEM; + } +@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p) + free_prison_cell(cache, p->cell1); + + if (p->mg) +- mempool_free(p->mg, cache->migration_pool); ++ free_migration(p->mg); + } + + static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) +@@ -812,24 +838,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, + * Migration covers moving data from the origin device to the cache, or + * vice versa. + *--------------------------------------------------------------*/ +-static void free_migration(struct dm_cache_migration *mg) +-{ +- mempool_free(mg, mg->cache->migration_pool); +-} +- +-static void inc_nr_migrations(struct cache *cache) ++static void inc_io_migrations(struct cache *cache) + { +- atomic_inc(&cache->nr_migrations); ++ atomic_inc(&cache->nr_io_migrations); + } + +-static void dec_nr_migrations(struct cache *cache) ++static void dec_io_migrations(struct cache *cache) + { +- atomic_dec(&cache->nr_migrations); +- +- /* +- * Wake the worker in case we're suspending the target. +- */ +- wake_up(&cache->migration_wait); ++ atomic_dec(&cache->nr_io_migrations); + } + + static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, +@@ -852,11 +868,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, + wake_worker(cache); + } + +-static void cleanup_migration(struct dm_cache_migration *mg) ++static void free_io_migration(struct dm_cache_migration *mg) + { +- struct cache *cache = mg->cache; ++ dec_io_migrations(mg->cache); + free_migration(mg); +- dec_nr_migrations(cache); + } + + static void migration_failure(struct dm_cache_migration *mg) +@@ -881,7 +896,7 @@ static void migration_failure(struct dm_cache_migration *mg) + cell_defer(cache, mg->new_ocell, true); + } + +- cleanup_migration(mg); ++ free_io_migration(mg); + } + + static void migration_success_pre_commit(struct dm_cache_migration *mg) +@@ -892,7 +907,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) + if (mg->writeback) { + clear_dirty(cache, mg->old_oblock, mg->cblock); + cell_defer(cache, mg->old_ocell, false); +- cleanup_migration(mg); ++ free_io_migration(mg); + return; + + } else if (mg->demote) { +@@ -902,14 +917,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) + mg->old_oblock); + if (mg->promote) + cell_defer(cache, mg->new_ocell, true); +- cleanup_migration(mg); ++ free_io_migration(mg); + return; + } + } else { + if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { + DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); + policy_remove_mapping(cache->policy, mg->new_oblock); +- cleanup_migration(mg); ++ free_io_migration(mg); + return; + } + } +@@ -942,7 +957,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) + } else { + if (mg->invalidate) + policy_remove_mapping(cache->policy, mg->old_oblock); +- cleanup_migration(mg); ++ free_io_migration(mg); + } + + } else { +@@ -957,7 +972,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) + bio_endio(mg->new_ocell->holder, 0); + cell_defer(cache, mg->new_ocell, false); + } +- cleanup_migration(mg); ++ free_io_migration(mg); + } + } + +@@ -1169,7 +1184,7 @@ static void promote(struct cache *cache, struct prealloc *structs, + mg->new_ocell = cell; + mg->start_jiffies = jiffies; + +- inc_nr_migrations(cache); ++ inc_io_migrations(cache); + quiesce_migration(mg); + } + +@@ -1192,7 +1207,7 @@ static void writeback(struct cache *cache, struct prealloc *structs, + mg->new_ocell = NULL; + mg->start_jiffies = jiffies; + +- inc_nr_migrations(cache); ++ inc_io_migrations(cache); + quiesce_migration(mg); + } + +@@ -1218,7 +1233,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs, + mg->new_ocell = new_ocell; + mg->start_jiffies = jiffies; + +- inc_nr_migrations(cache); ++ inc_io_migrations(cache); + quiesce_migration(mg); + } + +@@ -1245,7 +1260,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs, + mg->new_ocell = NULL; + mg->start_jiffies = jiffies; + +- inc_nr_migrations(cache); ++ inc_io_migrations(cache); + quiesce_migration(mg); + } + +@@ -1306,7 +1321,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio) + + static bool spare_migration_bandwidth(struct cache *cache) + { +- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * ++ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * + cache->sectors_per_block; + return current_volume < cache->migration_threshold; + } +@@ -1661,7 +1676,7 @@ static void stop_quiescing(struct cache *cache) + + static void wait_for_migrations(struct cache *cache) + { +- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); ++ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); + } + + static void stop_worker(struct cache *cache) +@@ -1772,9 +1787,6 @@ static void destroy(struct cache *cache) + { + unsigned i; + +- if (cache->next_migration) +- mempool_free(cache->next_migration, cache->migration_pool); +- + if (cache->migration_pool) + mempool_destroy(cache->migration_pool); + +@@ -2282,7 +2294,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) + INIT_LIST_HEAD(&cache->quiesced_migrations); + INIT_LIST_HEAD(&cache->completed_migrations); + INIT_LIST_HEAD(&cache->need_commit_migrations); +- atomic_set(&cache->nr_migrations, 0); ++ atomic_set(&cache->nr_allocated_migrations, 0); ++ atomic_set(&cache->nr_io_migrations, 0); + init_waitqueue_head(&cache->migration_wait); + + init_waitqueue_head(&cache->quiescing_wait); +@@ -2342,8 +2355,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) + goto bad; + } + +- cache->next_migration = NULL; +- + cache->need_tick_bio = true; + cache->sized = false; + cache->invalidate = false; +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 4913c06..175584a 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2896,7 +2896,8 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, + (s->failed >= 2 && fdev[1]->toread) || + (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && + !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || +- (sh->raid_conf->level == 6 && s->failed && s->to_write))) { ++ ((sh->raid_conf->level == 6 || sh->sector >= sh->raid_conf->mddev->recovery_cp) ++ && s->failed && s->to_write))) { + /* we would like to get this block, possibly by computing it, + * otherwise read it if the backing disk is insync + */ +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index cc11f7f..1468c46 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -664,10 +664,14 @@ static int can_changelink(struct net_device *dev, + if (dev->flags & IFF_UP) + return -EBUSY; + cm = nla_data(data[IFLA_CAN_CTRLMODE]); +- if (cm->flags & ~priv->ctrlmode_supported) ++ ++ /* check whether changed bits are allowed to be modified */ ++ if (cm->mask & ~priv->ctrlmode_supported) + return -EOPNOTSUPP; ++ ++ /* clear bits to be modified and copy the flag values */ + priv->ctrlmode &= ~cm->mask; +- priv->ctrlmode |= cm->flags; ++ priv->ctrlmode |= (cm->flags & cm->mask); + } + + if (data[IFLA_CAN_RESTART_MS]) { +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index dae70d2..78c65d3 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -3187,7 +3187,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) + { + struct pci_dev *pdev; + +- if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) ++ if (pci_is_root_bus(dev->bus) || dev->subordinate || ++ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) + return -ENOTTY; + + list_for_each_entry(pdev, &dev->bus->devices, bus_list) +@@ -3221,7 +3222,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) + { + struct pci_dev *pdev; + +- if (dev->subordinate || !dev->slot) ++ if (dev->subordinate || !dev->slot || ++ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) + return -ENOTTY; + + list_for_each_entry(pdev, &dev->bus->devices, bus_list) +@@ -3452,6 +3454,20 @@ int pci_try_reset_function(struct pci_dev *dev) + } + EXPORT_SYMBOL_GPL(pci_try_reset_function); + ++/* Do any devices on or below this bus prevent a bus reset? */ ++static bool pci_bus_resetable(struct pci_bus *bus) ++{ ++ struct pci_dev *dev; ++ ++ list_for_each_entry(dev, &bus->devices, bus_list) { ++ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || ++ (dev->subordinate && !pci_bus_resetable(dev->subordinate))) ++ return false; ++ } ++ ++ return true; ++} ++ + /* Lock devices from the top of the tree down */ + static void pci_bus_lock(struct pci_bus *bus) + { +@@ -3502,6 +3518,22 @@ unlock: + return 0; + } + ++/* Do any devices on or below this slot prevent a bus reset? */ ++static bool pci_slot_resetable(struct pci_slot *slot) ++{ ++ struct pci_dev *dev; ++ ++ list_for_each_entry(dev, &slot->bus->devices, bus_list) { ++ if (!dev->slot || dev->slot != slot) ++ continue; ++ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || ++ (dev->subordinate && !pci_bus_resetable(dev->subordinate))) ++ return false; ++ } ++ ++ return true; ++} ++ + /* Lock devices from the top of the tree down */ + static void pci_slot_lock(struct pci_slot *slot) + { +@@ -3623,7 +3655,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe) + { + int rc; + +- if (!slot) ++ if (!slot || !pci_slot_resetable(slot)) + return -ENOTTY; + + if (!probe) +@@ -3715,7 +3747,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot); + + static int pci_bus_reset(struct pci_bus *bus, int probe) + { +- if (!bus->self) ++ if (!bus->self || !pci_bus_resetable(bus)) + return -ENOTTY; + + if (probe) +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 6e8776b..27abeb4 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3008,6 +3008,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030, + DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ + quirk_broken_intx_masking); + ++static void quirk_no_bus_reset(struct pci_dev *dev) ++{ ++ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; ++} ++ ++/* ++ * Atheros AR93xx chips do not behave after a bus reset. The device will ++ * throw a Link Down error on AER-capable systems and regardless of AER, ++ * config space of the device is never accessible again and typically ++ * causes the system to hang or reset when access is attempted. ++ * http://www.spinics.net/lists/linux-pci/msg34797.html ++ */ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); ++ + static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, + struct pci_fixup *end) + { +diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c +index c0fe609..988f5e1 100644 +--- a/drivers/pinctrl/core.c ++++ b/drivers/pinctrl/core.c +@@ -1812,14 +1812,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev) + if (pctldev == NULL) + return; + +- mutex_lock(&pinctrldev_list_mutex); + mutex_lock(&pctldev->mutex); +- + pinctrl_remove_device_debugfs(pctldev); ++ mutex_unlock(&pctldev->mutex); + + if (!IS_ERR(pctldev->p)) + pinctrl_put(pctldev->p); + ++ mutex_lock(&pinctrldev_list_mutex); ++ mutex_lock(&pctldev->mutex); + /* TODO: check that no pinmuxes are still active? */ + list_del(&pctldev->node); + /* Destroy descriptor tree */ +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c +index ab3baa7..86ade85 100644 +--- a/drivers/s390/crypto/ap_bus.c ++++ b/drivers/s390/crypto/ap_bus.c +@@ -44,6 +44,7 @@ + #include + #include + #include ++#include + + #include "ap_bus.h" + +@@ -71,7 +72,7 @@ MODULE_AUTHOR("IBM Corporation"); + MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ + "Copyright IBM Corp. 2006, 2012"); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("z90crypt"); ++MODULE_ALIAS_CRYPTO("z90crypt"); + + /* + * Module parameter +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c +index 3f5b56a..b4ddb73 100644 +--- a/drivers/scsi/ipr.c ++++ b/drivers/scsi/ipr.c +@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, + ipr_reinit_ipr_cmnd(ipr_cmd); + ipr_cmd->u.scratch = 0; + ipr_cmd->sibling = NULL; ++ ipr_cmd->eh_comp = NULL; + ipr_cmd->fast_done = fast_done; + init_timer(&ipr_cmd->timer); + } +@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) + + scsi_dma_unmap(ipr_cmd->scsi_cmd); + scsi_cmd->scsi_done(scsi_cmd); ++ if (ipr_cmd->eh_comp) ++ complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + } + +@@ -4805,6 +4808,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev) + return rc; + } + ++/** ++ * ipr_match_lun - Match function for specified LUN ++ * @ipr_cmd: ipr command struct ++ * @device: device to match (sdev) ++ * ++ * Returns: ++ * 1 if command matches sdev / 0 if command does not match sdev ++ **/ ++static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) ++{ ++ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) ++ return 1; ++ return 0; ++} ++ ++/** ++ * ipr_wait_for_ops - Wait for matching commands to complete ++ * @ipr_cmd: ipr command struct ++ * @device: device to match (sdev) ++ * @match: match function to use ++ * ++ * Returns: ++ * SUCCESS / FAILED ++ **/ ++static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, ++ int (*match)(struct ipr_cmnd *, void *)) ++{ ++ struct ipr_cmnd *ipr_cmd; ++ int wait; ++ unsigned long flags; ++ struct ipr_hrr_queue *hrrq; ++ signed long timeout = IPR_ABORT_TASK_TIMEOUT; ++ DECLARE_COMPLETION_ONSTACK(comp); ++ ++ ENTER; ++ do { ++ wait = 0; ++ ++ for_each_hrrq(hrrq, ioa_cfg) { ++ spin_lock_irqsave(hrrq->lock, flags); ++ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { ++ if (match(ipr_cmd, device)) { ++ ipr_cmd->eh_comp = ∁ ++ wait++; ++ } ++ } ++ spin_unlock_irqrestore(hrrq->lock, flags); ++ } ++ ++ if (wait) { ++ timeout = wait_for_completion_timeout(&comp, timeout); ++ ++ if (!timeout) { ++ wait = 0; ++ ++ for_each_hrrq(hrrq, ioa_cfg) { ++ spin_lock_irqsave(hrrq->lock, flags); ++ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { ++ if (match(ipr_cmd, device)) { ++ ipr_cmd->eh_comp = NULL; ++ wait++; ++ } ++ } ++ spin_unlock_irqrestore(hrrq->lock, flags); ++ } ++ ++ if (wait) ++ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); ++ LEAVE; ++ return wait ? FAILED : SUCCESS; ++ } ++ } ++ } while (wait); ++ ++ LEAVE; ++ return SUCCESS; ++} ++ + static int ipr_eh_host_reset(struct scsi_cmnd *cmd) + { + struct ipr_ioa_cfg *ioa_cfg; +@@ -5023,11 +5104,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) + static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) + { + int rc; ++ struct ipr_ioa_cfg *ioa_cfg; ++ ++ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; + + spin_lock_irq(cmd->device->host->host_lock); + rc = __ipr_eh_dev_reset(cmd); + spin_unlock_irq(cmd->device->host->host_lock); + ++ if (rc == SUCCESS) ++ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); ++ + return rc; + } + +@@ -5205,13 +5292,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) + { + unsigned long flags; + int rc; ++ struct ipr_ioa_cfg *ioa_cfg; + + ENTER; + ++ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; ++ + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); + rc = ipr_cancel_op(scsi_cmd); + spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); + ++ if (rc == SUCCESS) ++ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); + LEAVE; + return rc; + } +diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h +index 9ce38a2..0801f3d 100644 +--- a/drivers/scsi/ipr.h ++++ b/drivers/scsi/ipr.h +@@ -1585,6 +1585,7 @@ struct ipr_cmnd { + struct scsi_device *sdev; + } u; + ++ struct completion *eh_comp; + struct ipr_hrr_queue *hrrq; + struct ipr_ioa_cfg *ioa_cfg; + }; +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index d46b4cc..850e232 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -2417,12 +2417,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, + + poll_wait(file, &tty->read_wait, wait); + poll_wait(file, &tty->write_wait, wait); ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) ++ mask |= POLLHUP; + if (input_available_p(tty, 1)) + mask |= POLLIN | POLLRDNORM; ++ else if (mask & POLLHUP) { ++ tty_flush_to_ldisc(tty); ++ if (input_available_p(tty, 1)) ++ mask |= POLLIN | POLLRDNORM; ++ } + if (tty->packet && tty->link->ctrl_status) + mask |= POLLPRI | POLLIN | POLLRDNORM; +- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) +- mask |= POLLHUP; + if (tty_hung_up_p(file)) + mask |= POLLHUP; + if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c +index 9df5d6e..f3a9d83 100644 +--- a/drivers/xen/swiotlb-xen.c ++++ b/drivers/xen/swiotlb-xen.c +@@ -449,7 +449,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, + + /* NOTE: We use dev_addr here, not paddr! */ + if (is_xen_swiotlb_buffer(dev_addr)) { +- swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir); ++ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); + return; + } + +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 1a858947..fa9f900 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -4507,7 +4507,8 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) + spin_unlock(&eb->refs_lock); + } + +-static void mark_extent_buffer_accessed(struct extent_buffer *eb) ++static void mark_extent_buffer_accessed(struct extent_buffer *eb, ++ struct page *accessed) + { + unsigned long num_pages, i; + +@@ -4516,7 +4517,8 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb) + num_pages = num_extent_pages(eb->start, eb->len); + for (i = 0; i < num_pages; i++) { + struct page *p = extent_buffer_page(eb, i); +- mark_page_accessed(p); ++ if (p != accessed) ++ mark_page_accessed(p); + } + } + +@@ -4530,7 +4532,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, + start >> PAGE_CACHE_SHIFT); + if (eb && atomic_inc_not_zero(&eb->refs)) { + rcu_read_unlock(); +- mark_extent_buffer_accessed(eb); ++ mark_extent_buffer_accessed(eb, NULL); + return eb; + } + rcu_read_unlock(); +@@ -4578,7 +4580,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, + spin_unlock(&mapping->private_lock); + unlock_page(p); + page_cache_release(p); +- mark_extent_buffer_accessed(exists); ++ mark_extent_buffer_accessed(exists, p); + goto free_eb; + } + +@@ -4593,7 +4595,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, + attach_extent_buffer_page(eb, p); + spin_unlock(&mapping->private_lock); + WARN_ON(PageDirty(p)); +- mark_page_accessed(p); + eb->pages[i] = p; + if (!PageUptodate(p)) + uptodate = 0; +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index f6d00df..279b06e 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -470,11 +470,12 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages) + for (i = 0; i < num_pages; i++) { + /* page checked is some magic around finding pages that + * have been modified without going through btrfs_set_page_dirty +- * clear it here ++ * clear it here. There should be no need to mark the pages ++ * accessed as prepare_pages should have marked them accessed ++ * in prepare_pages via find_or_create_page() + */ + ClearPageChecked(pages[i]); + unlock_page(pages[i]); +- mark_page_accessed(pages[i]); + page_cache_release(pages[i]); + } + } +diff --git a/fs/buffer.c b/fs/buffer.c +index 4d06a57..eef21c6 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -227,7 +227,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) + int all_mapped = 1; + + index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); +- page = find_get_page(bd_mapping, index); ++ page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); + if (!page) + goto out; + +@@ -1368,12 +1368,13 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size) + struct buffer_head *bh = lookup_bh_lru(bdev, block, size); + + if (bh == NULL) { ++ /* __find_get_block_slow will mark the page accessed */ + bh = __find_get_block_slow(bdev, block); + if (bh) + bh_lru_install(bh); +- } +- if (bh) ++ } else + touch_buffer(bh); ++ + return bh; + } + EXPORT_SYMBOL(__find_get_block); +@@ -1485,16 +1486,27 @@ EXPORT_SYMBOL(set_bh_page); + /* + * Called when truncating a buffer on a page completely. + */ ++ ++/* Bits that are cleared during an invalidate */ ++#define BUFFER_FLAGS_DISCARD \ ++ (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ ++ 1 << BH_Delay | 1 << BH_Unwritten) ++ + static void discard_buffer(struct buffer_head * bh) + { ++ unsigned long b_state, b_state_old; ++ + lock_buffer(bh); + clear_buffer_dirty(bh); + bh->b_bdev = NULL; +- clear_buffer_mapped(bh); +- clear_buffer_req(bh); +- clear_buffer_new(bh); +- clear_buffer_delay(bh); +- clear_buffer_unwritten(bh); ++ b_state = bh->b_state; ++ for (;;) { ++ b_state_old = cmpxchg(&bh->b_state, b_state, ++ (b_state & ~BUFFER_FLAGS_DISCARD)); ++ if (b_state_old == b_state) ++ break; ++ b_state = b_state_old; ++ } + unlock_buffer(bh); + } + +diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c +index 7749230..dfc9564 100644 +--- a/fs/cifs/ioctl.c ++++ b/fs/cifs/ioctl.c +@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, + } + + src_inode = src_file.file->f_dentry->d_inode; ++ rc = -EINVAL; ++ if (S_ISDIR(src_inode->i_mode)) ++ goto out_fput; + + /* + * Note: cifs case is easier than btrfs since server responsible for + * checks for proper open modes and file type and if it wants + * server could even support copy of range where source = target + */ +- +- /* so we do not deadlock racing two ioctls on same files */ +- if (target_inode < src_inode) { +- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT); +- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD); +- } else { +- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT); +- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD); +- } ++ lock_two_nondirectories(target_inode, src_inode); + + /* determine range to clone */ + rc = -EINVAL; +@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, + out_unlock: + /* although unlocking in the reverse order from locking is not + strictly necessary here it is a little cleaner to be consistent */ +- if (target_inode < src_inode) { +- mutex_unlock(&src_inode->i_mutex); +- mutex_unlock(&target_inode->i_mutex); +- } else { +- mutex_unlock(&target_inode->i_mutex); +- mutex_unlock(&src_inode->i_mutex); +- } ++ unlock_two_nondirectories(src_inode, target_inode); + out_fput: + fdput(src_file); + out_drop_write: +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 242226a..7620133 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -1044,6 +1044,8 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + * allocating. If we are looking at the buddy cache we would + * have taken a reference using ext4_mb_load_buddy and that + * would have pinned buddy page to page cache. ++ * The call to ext4_mb_get_buddy_page_lock will mark the ++ * page accessed. + */ + ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); + if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { +@@ -1062,7 +1064,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + ret = -EIO; + goto err; + } +- mark_page_accessed(page); + + if (e4b.bd_buddy_page == NULL) { + /* +@@ -1082,7 +1083,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + ret = -EIO; + goto err; + } +- mark_page_accessed(page); + err: + ext4_mb_put_buddy_page_lock(&e4b); + return ret; +@@ -1141,7 +1141,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + + /* we could use find_or_create_page(), but it locks page + * what we'd like to avoid in fast path ... */ +- page = find_get_page(inode->i_mapping, pnum); ++ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); + if (page == NULL || !PageUptodate(page)) { + if (page) + /* +@@ -1172,15 +1172,16 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + ret = -EIO; + goto err; + } ++ ++ /* Pages marked accessed already */ + e4b->bd_bitmap_page = page; + e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); +- mark_page_accessed(page); + + block++; + pnum = block / blocks_per_page; + poff = block % blocks_per_page; + +- page = find_get_page(inode->i_mapping, pnum); ++ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); + if (page == NULL || !PageUptodate(page)) { + if (page) + page_cache_release(page); +@@ -1201,9 +1202,10 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + ret = -EIO; + goto err; + } ++ ++ /* Pages marked accessed already */ + e4b->bd_buddy_page = page; + e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); +- mark_page_accessed(page); + + BUG_ON(e4b->bd_bitmap_page == NULL); + BUG_ON(e4b->bd_buddy_page == NULL); +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c +index 293d048..5c6fe27 100644 +--- a/fs/f2fs/checkpoint.c ++++ b/fs/f2fs/checkpoint.c +@@ -71,7 +71,6 @@ repeat: + goto repeat; + } + out: +- mark_page_accessed(page); + return page; + } + +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c +index b0649b7..bb6478a 100644 +--- a/fs/f2fs/node.c ++++ b/fs/f2fs/node.c +@@ -969,7 +969,6 @@ repeat: + } + got_it: + f2fs_bug_on(nid != nid_of_node(page)); +- mark_page_accessed(page); + return page; + } + +@@ -1024,7 +1023,6 @@ page_hit: + f2fs_put_page(page, 1); + return ERR_PTR(-EIO); + } +- mark_page_accessed(page); + return page; + } + +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index 0a648bb..6eb13c6 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -1614,7 +1614,7 @@ out_finish: + + static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) + { +- release_pages(req->pages, req->num_pages, 0); ++ release_pages(req->pages, req->num_pages, false); + } + + static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index a91d3b4..d8a6027 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -1006,8 +1006,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, + tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); + flush_dcache_page(page); + +- mark_page_accessed(page); +- + if (!tmp) { + unlock_page(page); + page_cache_release(page); +diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c +index 49436fa..4ccb60d 100644 +--- a/fs/gfs2/aops.c ++++ b/fs/gfs2/aops.c +@@ -517,7 +517,6 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, + p = kmap_atomic(page); + memcpy(buf + copied, p + offset, amt); + kunmap_atomic(p); +- mark_page_accessed(page); + page_cache_release(page); + copied += amt; + index++; +diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c +index b82a9c9..e7b1496 100644 +--- a/fs/gfs2/meta_io.c ++++ b/fs/gfs2/meta_io.c +@@ -136,7 +136,8 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) + yield(); + } + } else { +- page = find_lock_page(mapping, index); ++ page = find_get_page_flags(mapping, index, ++ FGP_LOCK|FGP_ACCESSED); + if (!page) + return NULL; + } +@@ -153,7 +154,6 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) + map_bh(bh, sdp->sd_vfs, blkno); + + unlock_page(page); +- mark_page_accessed(page); + page_cache_release(page); + + return bh; +diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c +index a27e3fe..250ed5b 100644 +--- a/fs/ntfs/attrib.c ++++ b/fs/ntfs/attrib.c +@@ -1748,7 +1748,6 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) + if (page) { + set_page_dirty(page); + unlock_page(page); +- mark_page_accessed(page); + page_cache_release(page); + } + ntfs_debug("Done."); +diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c +index db9bd8a..86ddab9 100644 +--- a/fs/ntfs/file.c ++++ b/fs/ntfs/file.c +@@ -2060,7 +2060,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, + } + do { + unlock_page(pages[--do_pages]); +- mark_page_accessed(pages[do_pages]); + page_cache_release(pages[do_pages]); + } while (do_pages); + if (unlikely(status)) +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h +index b19d3dc..ade2390 100644 +--- a/include/linux/cpuset.h ++++ b/include/linux/cpuset.h +@@ -12,10 +12,31 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_CPUSETS + +-extern int number_of_cpusets; /* How many cpusets are defined in system? */ ++extern struct static_key cpusets_enabled_key; ++static inline bool cpusets_enabled(void) ++{ ++ return static_key_false(&cpusets_enabled_key); ++} ++ ++static inline int nr_cpusets(void) ++{ ++ /* jump label reference count + the top-level cpuset */ ++ return static_key_count(&cpusets_enabled_key) + 1; ++} ++ ++static inline void cpuset_inc(void) ++{ ++ static_key_slow_inc(&cpusets_enabled_key); ++} ++ ++static inline void cpuset_dec(void) ++{ ++ static_key_slow_dec(&cpusets_enabled_key); ++} + + extern int cpuset_init(void); + extern void cpuset_init_smp(void); +@@ -32,13 +53,13 @@ extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); + + static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) + { +- return number_of_cpusets <= 1 || ++ return nr_cpusets() <= 1 || + __cpuset_node_allowed_softwall(node, gfp_mask); + } + + static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) + { +- return number_of_cpusets <= 1 || ++ return nr_cpusets() <= 1 || + __cpuset_node_allowed_hardwall(node, gfp_mask); + } + +@@ -124,6 +145,8 @@ static inline void set_mems_allowed(nodemask_t nodemask) + + #else /* !CONFIG_CPUSETS */ + ++static inline bool cpusets_enabled(void) { return false; } ++ + static inline int cpuset_init(void) { return 0; } + static inline void cpuset_init_smp(void) {} + +diff --git a/include/linux/crypto.h b/include/linux/crypto.h +index b92eadf..2b00d92 100644 +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -26,6 +26,19 @@ + #include + + /* ++ * Autoloaded crypto modules should only use a prefixed name to avoid allowing ++ * arbitrary modules to be loaded. Loading from userspace may still need the ++ * unprefixed names, so retains those aliases as well. ++ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 ++ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro ++ * expands twice on the same line. Instead, use a separate base name for the ++ * alias. ++ */ ++#define MODULE_ALIAS_CRYPTO(name) \ ++ __MODULE_INFO(alias, alias_userspace, name); \ ++ __MODULE_INFO(alias, alias_crypto, "crypto-" name) ++ ++/* + * Algorithm masks and types. + */ + #define CRYPTO_ALG_TYPE_MASK 0x0000000f +diff --git a/include/linux/gfp.h b/include/linux/gfp.h +index 39b81dc..3824ac6 100644 +--- a/include/linux/gfp.h ++++ b/include/linux/gfp.h +@@ -369,8 +369,8 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); + + extern void __free_pages(struct page *page, unsigned int order); + extern void free_pages(unsigned long addr, unsigned int order); +-extern void free_hot_cold_page(struct page *page, int cold); +-extern void free_hot_cold_page_list(struct list_head *list, int cold); ++extern void free_hot_cold_page(struct page *page, bool cold); ++extern void free_hot_cold_page_list(struct list_head *list, bool cold); + + extern void __free_memcg_kmem_pages(struct page *page, unsigned int order); + extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order); +diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h +index b826239..63579cb8 100644 +--- a/include/linux/huge_mm.h ++++ b/include/linux/huge_mm.h +@@ -93,10 +93,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma); + #endif /* CONFIG_DEBUG_VM */ + + extern unsigned long transparent_hugepage_flags; +-extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, +- pmd_t *dst_pmd, pmd_t *src_pmd, +- struct vm_area_struct *vma, +- unsigned long addr, unsigned long end); + extern int split_huge_page_to_list(struct page *page, struct list_head *list); + static inline int split_huge_page(struct page *page) + { +diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h +index 5c1dfb2..784304b 100644 +--- a/include/linux/jump_label.h ++++ b/include/linux/jump_label.h +@@ -69,6 +69,10 @@ struct static_key { + + # include + # define HAVE_JUMP_LABEL ++#else ++struct static_key { ++ atomic_t enabled; ++}; + #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ + + enum jump_label_type { +@@ -79,6 +83,12 @@ enum jump_label_type { + struct module; + + #include ++ ++static inline int static_key_count(struct static_key *key) ++{ ++ return atomic_read(&key->enabled); ++} ++ + #ifdef HAVE_JUMP_LABEL + + #define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL +@@ -134,10 +144,6 @@ extern void jump_label_apply_nops(struct module *mod); + + #else /* !HAVE_JUMP_LABEL */ + +-struct static_key { +- atomic_t enabled; +-}; +- + static __always_inline void jump_label_init(void) + { + static_key_initialized = true; +@@ -145,14 +151,14 @@ static __always_inline void jump_label_init(void) + + static __always_inline bool static_key_false(struct static_key *key) + { +- if (unlikely(atomic_read(&key->enabled) > 0)) ++ if (unlikely(static_key_count(key) > 0)) + return true; + return false; + } + + static __always_inline bool static_key_true(struct static_key *key) + { +- if (likely(atomic_read(&key->enabled) > 0)) ++ if (likely(static_key_count(key) > 0)) + return true; + return false; + } +@@ -194,7 +200,7 @@ static inline int jump_label_apply_nops(struct module *mod) + + static inline bool static_key_enabled(struct static_key *key) + { +- return (atomic_read(&key->enabled) > 0); ++ return static_key_count(key) > 0; + } + + #endif /* _LINUX_JUMP_LABEL_H */ +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index 1884353..ac819bf 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -78,10 +78,15 @@ extern int page_group_by_mobility_disabled; + #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) + #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) + +-static inline int get_pageblock_migratetype(struct page *page) ++#define get_pageblock_migratetype(page) \ ++ get_pfnblock_flags_mask(page, page_to_pfn(page), \ ++ PB_migrate_end, MIGRATETYPE_MASK) ++ ++static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) + { + BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); +- return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK); ++ return get_pfnblock_flags_mask(page, pfn, PB_migrate_end, ++ MIGRATETYPE_MASK); + } + + struct free_area { +@@ -138,6 +143,7 @@ enum zone_stat_item { + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ + NR_DIRTIED, /* page dirtyings since bootup */ + NR_WRITTEN, /* page writings since bootup */ ++ NR_PAGES_SCANNED, /* pages scanned since last reclaim */ + #ifdef CONFIG_NUMA + NUMA_HIT, /* allocated in intended node */ + NUMA_MISS, /* allocated in non intended node */ +@@ -316,19 +322,12 @@ enum zone_type { + #ifndef __GENERATING_BOUNDS_H + + struct zone { +- /* Fields commonly accessed by the page allocator */ ++ /* Read-mostly fields */ + + /* zone watermarks, access with *_wmark_pages(zone) macros */ + unsigned long watermark[NR_WMARK]; + + /* +- * When free pages are below this point, additional steps are taken +- * when reading the number of free pages to avoid per-cpu counter +- * drift allowing watermarks to be breached +- */ +- unsigned long percpu_drift_mark; +- +- /* + * We don't know if the memory that we're going to allocate will be freeable + * or/and it will be released eventually, so to avoid totally wasting several + * GB of ram we must reserve some of the lower zone memory (otherwise we risk +@@ -336,41 +335,26 @@ struct zone { + * on the higher zones). This array is recalculated at runtime if the + * sysctl_lowmem_reserve_ratio sysctl changes. + */ +- unsigned long lowmem_reserve[MAX_NR_ZONES]; +- +- /* +- * This is a per-zone reserve of pages that should not be +- * considered dirtyable memory. +- */ +- unsigned long dirty_balance_reserve; ++ long lowmem_reserve[MAX_NR_ZONES]; + + #ifdef CONFIG_NUMA + int node; ++#endif ++ + /* +- * zone reclaim becomes active if more unmapped pages exist. ++ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on ++ * this zone's LRU. Maintained by the pageout code. + */ +- unsigned long min_unmapped_pages; +- unsigned long min_slab_pages; +-#endif ++ unsigned int inactive_ratio; ++ ++ struct pglist_data *zone_pgdat; + struct per_cpu_pageset __percpu *pageset; ++ + /* +- * free areas of different sizes ++ * This is a per-zone reserve of pages that should not be ++ * considered dirtyable memory. + */ +- spinlock_t lock; +-#if defined CONFIG_COMPACTION || defined CONFIG_CMA +- /* Set to true when the PG_migrate_skip bits should be cleared */ +- bool compact_blockskip_flush; +- +- /* pfn where compaction free scanner should start */ +- unsigned long compact_cached_free_pfn; +- /* pfn where async and sync compaction migration scanner should start */ +- unsigned long compact_cached_migrate_pfn[2]; +-#endif +-#ifdef CONFIG_MEMORY_HOTPLUG +- /* see spanned/present_pages for more description */ +- seqlock_t span_seqlock; +-#endif +- struct free_area free_area[MAX_ORDER]; ++ unsigned long dirty_balance_reserve; + + #ifndef CONFIG_SPARSEMEM + /* +@@ -380,71 +364,14 @@ struct zone { + unsigned long *pageblock_flags; + #endif /* CONFIG_SPARSEMEM */ + +-#ifdef CONFIG_COMPACTION +- /* +- * On compaction failure, 1<> PAGE_SHIFT */ + unsigned long zone_start_pfn; + +@@ -490,9 +417,11 @@ struct zone { + * adjust_managed_page_count() should be used instead of directly + * touching zone->managed_pages and totalram_pages. + */ ++ unsigned long managed_pages; + unsigned long spanned_pages; + unsigned long present_pages; +- unsigned long managed_pages; ++ ++ const char *name; + + /* + * Number of MIGRATE_RESEVE page block. To maintain for just +@@ -500,10 +429,91 @@ struct zone { + */ + int nr_migrate_reserve_block; + ++#ifdef CONFIG_MEMORY_HOTPLUG ++ /* see spanned/present_pages for more description */ ++ seqlock_t span_seqlock; ++#endif ++ + /* +- * rarely used fields: ++ * wait_table -- the array holding the hash table ++ * wait_table_hash_nr_entries -- the size of the hash table array ++ * wait_table_bits -- wait_table_size == (1 << wait_table_bits) ++ * ++ * The purpose of all these is to keep track of the people ++ * waiting for a page to become available and make them ++ * runnable again when possible. The trouble is that this ++ * consumes a lot of space, especially when so few things ++ * wait on pages at a given time. So instead of using ++ * per-page waitqueues, we use a waitqueue hash table. ++ * ++ * The bucket discipline is to sleep on the same queue when ++ * colliding and wake all in that wait queue when removing. ++ * When something wakes, it must check to be sure its page is ++ * truly available, a la thundering herd. The cost of a ++ * collision is great, but given the expected load of the ++ * table, they should be so rare as to be outweighed by the ++ * benefits from the saved space. ++ * ++ * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the ++ * primary users of these fields, and in mm/page_alloc.c ++ * free_area_init_core() performs the initialization of them. + */ +- const char *name; ++ wait_queue_head_t *wait_table; ++ unsigned long wait_table_hash_nr_entries; ++ unsigned long wait_table_bits; ++ ++ ZONE_PADDING(_pad1_) ++ ++ /* Write-intensive fields used from the page allocator */ ++ spinlock_t lock; ++ ++ /* free areas of different sizes */ ++ struct free_area free_area[MAX_ORDER]; ++ ++ /* zone flags, see below */ ++ unsigned long flags; ++ ++ ZONE_PADDING(_pad2_) ++ ++ /* Write-intensive fields used by page reclaim */ ++ ++ /* Fields commonly accessed by the page reclaim scanner */ ++ spinlock_t lru_lock; ++ struct lruvec lruvec; ++ ++ /* ++ * When free pages are below this point, additional steps are taken ++ * when reading the number of free pages to avoid per-cpu counter ++ * drift allowing watermarks to be breached ++ */ ++ unsigned long percpu_drift_mark; ++ ++#if defined CONFIG_COMPACTION || defined CONFIG_CMA ++ /* pfn where compaction free scanner should start */ ++ unsigned long compact_cached_free_pfn; ++ /* pfn where async and sync compaction migration scanner should start */ ++ unsigned long compact_cached_migrate_pfn[2]; ++#endif ++ ++#ifdef CONFIG_COMPACTION ++ /* ++ * On compaction failure, 1<flags); + } + ++static inline int zone_is_fair_depleted(const struct zone *zone) ++{ ++ return test_bit(ZONE_FAIR_DEPLETED, &zone->flags); ++} ++ + static inline int zone_is_oom_locked(const struct zone *zone) + { + return test_bit(ZONE_OOM_LOCKED, &zone->flags); +@@ -807,10 +823,10 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat) + extern struct mutex zonelists_mutex; + void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); + void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, +- int classzone_idx, int alloc_flags); +-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, +- int classzone_idx, int alloc_flags); ++bool zone_watermark_ok(struct zone *z, unsigned int order, ++ unsigned long mark, int classzone_idx, int alloc_flags); ++bool zone_watermark_ok_safe(struct zone *z, unsigned int order, ++ unsigned long mark, int classzone_idx, int alloc_flags); + enum memmap_context { + MEMMAP_EARLY, + MEMMAP_HOTPLUG, +diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h +index ca71a1d..3c545b4 100644 +--- a/include/linux/page-flags.h ++++ b/include/linux/page-flags.h +@@ -198,6 +198,7 @@ struct page; /* forward declaration */ + TESTPAGEFLAG(Locked, locked) + PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) + PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) ++ __SETPAGEFLAG(Referenced, referenced) + PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) + PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) + PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) +@@ -208,6 +209,7 @@ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ + PAGEFLAG(SavePinned, savepinned); /* Xen */ + PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) + PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) ++ __SETPAGEFLAG(SwapBacked, swapbacked) + + __PAGEFLAG(SlobFree, slob_free) + +diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h +index c08730c..2baeee1 100644 +--- a/include/linux/pageblock-flags.h ++++ b/include/linux/pageblock-flags.h +@@ -65,33 +65,26 @@ extern int pageblock_order; + /* Forward declaration */ + struct page; + +-unsigned long get_pageblock_flags_mask(struct page *page, ++unsigned long get_pfnblock_flags_mask(struct page *page, ++ unsigned long pfn, + unsigned long end_bitidx, + unsigned long mask); +-void set_pageblock_flags_mask(struct page *page, ++ ++void set_pfnblock_flags_mask(struct page *page, + unsigned long flags, ++ unsigned long pfn, + unsigned long end_bitidx, + unsigned long mask); + + /* Declarations for getting and setting flags. See mm/page_alloc.c */ +-static inline unsigned long get_pageblock_flags_group(struct page *page, +- int start_bitidx, int end_bitidx) +-{ +- unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; +- unsigned long mask = (1 << nr_flag_bits) - 1; +- +- return get_pageblock_flags_mask(page, end_bitidx, mask); +-} +- +-static inline void set_pageblock_flags_group(struct page *page, +- unsigned long flags, +- int start_bitidx, int end_bitidx) +-{ +- unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; +- unsigned long mask = (1 << nr_flag_bits) - 1; +- +- set_pageblock_flags_mask(page, flags, end_bitidx, mask); +-} ++#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ ++ get_pfnblock_flags_mask(page, page_to_pfn(page), \ ++ end_bitidx, \ ++ (1 << (end_bitidx - start_bitidx + 1)) - 1) ++#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ ++ set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ ++ end_bitidx, \ ++ (1 << (end_bitidx - start_bitidx + 1)) - 1) + + #ifdef CONFIG_COMPACTION + #define get_pageblock_skip(page) \ +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index 09c1b03..fcebdda 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -99,7 +99,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) + + #define page_cache_get(page) get_page(page) + #define page_cache_release(page) put_page(page) +-void release_pages(struct page **pages, int nr, int cold); ++void release_pages(struct page **pages, int nr, bool cold); + + /* + * speculatively take a reference to a page. +@@ -248,12 +248,108 @@ pgoff_t page_cache_next_hole(struct address_space *mapping, + pgoff_t page_cache_prev_hole(struct address_space *mapping, + pgoff_t index, unsigned long max_scan); + ++#define FGP_ACCESSED 0x00000001 ++#define FGP_LOCK 0x00000002 ++#define FGP_CREAT 0x00000004 ++#define FGP_WRITE 0x00000008 ++#define FGP_NOFS 0x00000010 ++#define FGP_NOWAIT 0x00000020 ++ ++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, ++ int fgp_flags, gfp_t cache_gfp_mask); ++ ++/** ++ * find_get_page - find and get a page reference ++ * @mapping: the address_space to search ++ * @offset: the page index ++ * ++ * Looks up the page cache slot at @mapping & @offset. If there is a ++ * page cache page, it is returned with an increased refcount. ++ * ++ * Otherwise, %NULL is returned. ++ */ ++static inline struct page *find_get_page(struct address_space *mapping, ++ pgoff_t offset) ++{ ++ return pagecache_get_page(mapping, offset, 0, 0); ++} ++ ++static inline struct page *find_get_page_flags(struct address_space *mapping, ++ pgoff_t offset, int fgp_flags) ++{ ++ return pagecache_get_page(mapping, offset, fgp_flags, 0); ++} ++ ++/** ++ * find_lock_page - locate, pin and lock a pagecache page ++ * pagecache_get_page - find and get a page reference ++ * @mapping: the address_space to search ++ * @offset: the page index ++ * ++ * Looks up the page cache slot at @mapping & @offset. If there is a ++ * page cache page, it is returned locked and with an increased ++ * refcount. ++ * ++ * Otherwise, %NULL is returned. ++ * ++ * find_lock_page() may sleep. ++ */ ++static inline struct page *find_lock_page(struct address_space *mapping, ++ pgoff_t offset) ++{ ++ return pagecache_get_page(mapping, offset, FGP_LOCK, 0); ++} ++ ++/** ++ * find_or_create_page - locate or add a pagecache page ++ * @mapping: the page's address_space ++ * @index: the page's index into the mapping ++ * @gfp_mask: page allocation mode ++ * ++ * Looks up the page cache slot at @mapping & @offset. If there is a ++ * page cache page, it is returned locked and with an increased ++ * refcount. ++ * ++ * If the page is not present, a new page is allocated using @gfp_mask ++ * and added to the page cache and the VM's LRU list. The page is ++ * returned locked and with an increased refcount. ++ * ++ * On memory exhaustion, %NULL is returned. ++ * ++ * find_or_create_page() may sleep, even if @gfp_flags specifies an ++ * atomic allocation! ++ */ ++static inline struct page *find_or_create_page(struct address_space *mapping, ++ pgoff_t offset, gfp_t gfp_mask) ++{ ++ return pagecache_get_page(mapping, offset, ++ FGP_LOCK|FGP_ACCESSED|FGP_CREAT, ++ gfp_mask); ++} ++ ++/** ++ * grab_cache_page_nowait - returns locked page at given index in given cache ++ * @mapping: target address_space ++ * @index: the page index ++ * ++ * Same as grab_cache_page(), but do not wait if the page is unavailable. ++ * This is intended for speculative data generators, where the data can ++ * be regenerated if the page couldn't be grabbed. This routine should ++ * be safe to call while holding the lock for another page. ++ * ++ * Clear __GFP_FS when allocating the page to avoid recursion into the fs ++ * and deadlock against the caller's locked page. ++ */ ++static inline struct page *grab_cache_page_nowait(struct address_space *mapping, ++ pgoff_t index) ++{ ++ return pagecache_get_page(mapping, index, ++ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, ++ mapping_gfp_mask(mapping)); ++} ++ + struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); +-struct page *find_get_page(struct address_space *mapping, pgoff_t offset); + struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); +-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset); +-struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, +- gfp_t gfp_mask); + unsigned find_get_entries(struct address_space *mapping, pgoff_t start, + unsigned int nr_entries, struct page **entries, + pgoff_t *indices); +@@ -276,8 +372,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping, + return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); + } + +-extern struct page * grab_cache_page_nowait(struct address_space *mapping, +- pgoff_t index); + extern struct page * read_cache_page(struct address_space *mapping, + pgoff_t index, filler_t *filler, void *data); + extern struct page * read_cache_page_gfp(struct address_space *mapping, +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 0e5e16c..d662546 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -170,6 +170,8 @@ enum pci_dev_flags { + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, + /* Provide indication device is assigned by a Virtual Machine Manager */ + PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, ++ /* Do not use bus resets for device */ ++ PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), + }; + + enum pci_irq_reroute_variant { +diff --git a/include/linux/swap.h b/include/linux/swap.h +index 7893249..241bf09 100644 +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -268,12 +268,14 @@ extern unsigned long nr_free_pagecache_pages(void); + + + /* linux/mm/swap.c */ +-extern void __lru_cache_add(struct page *); + extern void lru_cache_add(struct page *); ++extern void lru_cache_add_anon(struct page *page); ++extern void lru_cache_add_file(struct page *page); + extern void lru_add_page_tail(struct page *page, struct page *page_tail, + struct lruvec *lruvec, struct list_head *head); + extern void activate_page(struct page *); + extern void mark_page_accessed(struct page *); ++extern void init_page_accessed(struct page *page); + extern void lru_add_drain(void); + extern void lru_add_drain_cpu(int cpu); + extern void lru_add_drain_all(void); +@@ -283,22 +285,6 @@ extern void swap_setup(void); + + extern void add_page_to_unevictable_list(struct page *page); + +-/** +- * lru_cache_add: add a page to the page lists +- * @page: the page to add +- */ +-static inline void lru_cache_add_anon(struct page *page) +-{ +- ClearPageActive(page); +- __lru_cache_add(page); +-} +- +-static inline void lru_cache_add_file(struct page *page) +-{ +- ClearPageActive(page); +- __lru_cache_add(page); +-} +- + /* linux/mm/vmscan.c */ + extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, + gfp_t gfp_mask, nodemask_t *mask); +@@ -456,7 +442,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) + #define free_page_and_swap_cache(page) \ + page_cache_release(page) + #define free_pages_and_swap_cache(pages, nr) \ +- release_pages((pages), (nr), 0); ++ release_pages((pages), (nr), false); + + static inline void show_swap_cache_info(void) + { +diff --git a/include/linux/time.h b/include/linux/time.h +index d5d229b..7d532a3 100644 +--- a/include/linux/time.h ++++ b/include/linux/time.h +@@ -173,6 +173,19 @@ extern void getboottime(struct timespec *ts); + extern void monotonic_to_bootbased(struct timespec *ts); + extern void get_monotonic_boottime(struct timespec *ts); + ++static inline bool timeval_valid(const struct timeval *tv) ++{ ++ /* Dates before 1970 are bogus */ ++ if (tv->tv_sec < 0) ++ return false; ++ ++ /* Can't have more microseconds then a second */ ++ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) ++ return false; ++ ++ return true; ++} ++ + extern struct timespec timespec_trunc(struct timespec t, unsigned gran); + extern int timekeeping_valid_for_hres(void); + extern u64 timekeeping_max_deferment(void); +diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h +index 1c9fabd..ce0803b 100644 +--- a/include/trace/events/pagemap.h ++++ b/include/trace/events/pagemap.h +@@ -28,12 +28,10 @@ TRACE_EVENT(mm_lru_insertion, + + TP_PROTO( + struct page *page, +- unsigned long pfn, +- int lru, +- unsigned long flags ++ int lru + ), + +- TP_ARGS(page, pfn, lru, flags), ++ TP_ARGS(page, lru), + + TP_STRUCT__entry( + __field(struct page *, page ) +@@ -44,9 +42,9 @@ TRACE_EVENT(mm_lru_insertion, + + TP_fast_assign( + __entry->page = page; +- __entry->pfn = pfn; ++ __entry->pfn = page_to_pfn(page); + __entry->lru = lru; +- __entry->flags = flags; ++ __entry->flags = trace_pagemap_flags(page); + ), + + /* Flag format is based on page-types.c formatting for pagemap */ +@@ -64,9 +62,9 @@ TRACE_EVENT(mm_lru_insertion, + + TRACE_EVENT(mm_lru_activate, + +- TP_PROTO(struct page *page, unsigned long pfn), ++ TP_PROTO(struct page *page), + +- TP_ARGS(page, pfn), ++ TP_ARGS(page), + + TP_STRUCT__entry( + __field(struct page *, page ) +@@ -75,7 +73,7 @@ TRACE_EVENT(mm_lru_activate, + + TP_fast_assign( + __entry->page = page; +- __entry->pfn = pfn; ++ __entry->pfn = page_to_pfn(page); + ), + + /* Flag format is based on page-types.c formatting for pagemap */ +diff --git a/kernel/cpuset.c b/kernel/cpuset.c +index 15b3ea6..2fb2877 100644 +--- a/kernel/cpuset.c ++++ b/kernel/cpuset.c +@@ -61,12 +61,7 @@ + #include + #include + +-/* +- * Tracks how many cpusets are currently defined in system. +- * When there is only one cpuset (the root cpuset) we can +- * short circuit some hooks. +- */ +-int number_of_cpusets __read_mostly; ++struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE; + + /* See "Frequency meter" comments, below. */ + +@@ -611,7 +606,7 @@ static int generate_sched_domains(cpumask_var_t **domains, + goto done; + } + +- csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); ++ csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL); + if (!csa) + goto done; + csn = 0; +@@ -1961,7 +1956,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) + if (is_spread_slab(parent)) + set_bit(CS_SPREAD_SLAB, &cs->flags); + +- number_of_cpusets++; ++ cpuset_inc(); + + if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) + goto out_unlock; +@@ -2012,7 +2007,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) + if (is_sched_load_balance(cs)) + update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); + +- number_of_cpusets--; ++ cpuset_dec(); + clear_bit(CS_ONLINE, &cs->flags); + + mutex_unlock(&cpuset_mutex); +@@ -2067,7 +2062,6 @@ int __init cpuset_init(void) + if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)) + BUG(); + +- number_of_cpusets = 1; + return 0; + } + +diff --git a/kernel/time.c b/kernel/time.c +index 3c49ab4..3eb322e 100644 +--- a/kernel/time.c ++++ b/kernel/time.c +@@ -195,6 +195,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv, + if (tv) { + if (copy_from_user(&user_tv, tv, sizeof(*tv))) + return -EFAULT; ++ ++ if (!timeval_valid(&user_tv)) ++ return -EINVAL; ++ + new_ts.tv_sec = user_tv.tv_sec; + new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; + } +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c +index af8d1d4..28db9be 100644 +--- a/kernel/time/ntp.c ++++ b/kernel/time/ntp.c +@@ -631,6 +631,13 @@ int ntp_validate_timex(struct timex *txc) + if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) + return -EPERM; + ++ if (txc->modes & ADJ_FREQUENCY) { ++ if (LONG_MIN / PPM_SCALE > txc->freq) ++ return -EINVAL; ++ if (LONG_MAX / PPM_SCALE < txc->freq) ++ return -EINVAL; ++ } ++ + return 0; + } + +diff --git a/mm/filemap.c b/mm/filemap.c +index bdaa215..217cfd3 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -644,8 +644,17 @@ EXPORT_SYMBOL(unlock_page); + */ + void end_page_writeback(struct page *page) + { +- if (TestClearPageReclaim(page)) ++ /* ++ * TestClearPageReclaim could be used here but it is an atomic ++ * operation and overkill in this particular case. Failing to ++ * shuffle a page marked for immediate reclaim is too mild to ++ * justify taking an atomic operation penalty at the end of ++ * ever page writeback. ++ */ ++ if (PageReclaim(page)) { ++ ClearPageReclaim(page); + rotate_reclaimable_page(page); ++ } + + if (!test_clear_page_writeback(page)) + BUG(); +@@ -848,26 +857,6 @@ out: + EXPORT_SYMBOL(find_get_entry); + + /** +- * find_get_page - find and get a page reference +- * @mapping: the address_space to search +- * @offset: the page index +- * +- * Looks up the page cache slot at @mapping & @offset. If there is a +- * page cache page, it is returned with an increased refcount. +- * +- * Otherwise, %NULL is returned. +- */ +-struct page *find_get_page(struct address_space *mapping, pgoff_t offset) +-{ +- struct page *page = find_get_entry(mapping, offset); +- +- if (radix_tree_exceptional_entry(page)) +- page = NULL; +- return page; +-} +-EXPORT_SYMBOL(find_get_page); +- +-/** + * find_lock_entry - locate, pin and lock a page cache entry + * @mapping: the address_space to search + * @offset: the page cache index +@@ -904,66 +893,83 @@ repeat: + EXPORT_SYMBOL(find_lock_entry); + + /** +- * find_lock_page - locate, pin and lock a pagecache page ++ * pagecache_get_page - find and get a page reference + * @mapping: the address_space to search + * @offset: the page index ++ * @fgp_flags: PCG flags ++ * @gfp_mask: gfp mask to use for the page cache data page allocation + * +- * Looks up the page cache slot at @mapping & @offset. If there is a +- * page cache page, it is returned locked and with an increased +- * refcount. +- * +- * Otherwise, %NULL is returned. ++ * Looks up the page cache slot at @mapping & @offset. + * +- * find_lock_page() may sleep. +- */ +-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) +-{ +- struct page *page = find_lock_entry(mapping, offset); +- +- if (radix_tree_exceptional_entry(page)) +- page = NULL; +- return page; +-} +-EXPORT_SYMBOL(find_lock_page); +- +-/** +- * find_or_create_page - locate or add a pagecache page +- * @mapping: the page's address_space +- * @index: the page's index into the mapping +- * @gfp_mask: page allocation mode ++ * PCG flags modify how the page is returned + * +- * Looks up the page cache slot at @mapping & @offset. If there is a +- * page cache page, it is returned locked and with an increased +- * refcount. +- * +- * If the page is not present, a new page is allocated using @gfp_mask +- * and added to the page cache and the VM's LRU list. The page is +- * returned locked and with an increased refcount. ++ * FGP_ACCESSED: the page will be marked accessed ++ * FGP_LOCK: Page is return locked ++ * FGP_CREAT: If page is not present then a new page is allocated using ++ * @gfp_mask and added to the page cache and the VM's LRU ++ * list. The page is returned locked and with an increased ++ * refcount. Otherwise, %NULL is returned. + * +- * On memory exhaustion, %NULL is returned. ++ * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even ++ * if the GFP flags specified for FGP_CREAT are atomic. + * +- * find_or_create_page() may sleep, even if @gfp_flags specifies an +- * atomic allocation! ++ * If there is a page cache page, it is returned with an increased refcount. + */ +-struct page *find_or_create_page(struct address_space *mapping, +- pgoff_t index, gfp_t gfp_mask) ++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, ++ int fgp_flags, gfp_t gfp_mask) + { + struct page *page; +- int err; ++ + repeat: +- page = find_lock_page(mapping, index); +- if (!page) { ++ page = find_get_entry(mapping, offset); ++ if (radix_tree_exceptional_entry(page)) ++ page = NULL; ++ if (!page) ++ goto no_page; ++ ++ if (fgp_flags & FGP_LOCK) { ++ if (fgp_flags & FGP_NOWAIT) { ++ if (!trylock_page(page)) { ++ page_cache_release(page); ++ return NULL; ++ } ++ } else { ++ lock_page(page); ++ } ++ ++ /* Has the page been truncated? */ ++ if (unlikely(page->mapping != mapping)) { ++ unlock_page(page); ++ page_cache_release(page); ++ goto repeat; ++ } ++ VM_BUG_ON(page->index != offset); ++ } ++ ++ if (page && (fgp_flags & FGP_ACCESSED)) ++ mark_page_accessed(page); ++ ++no_page: ++ if (!page && (fgp_flags & FGP_CREAT)) { ++ int err; ++ if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) ++ gfp_mask |= __GFP_WRITE; ++ if (fgp_flags & FGP_NOFS) ++ gfp_mask &= ~__GFP_FS; ++ + page = __page_cache_alloc(gfp_mask); + if (!page) + return NULL; +- /* +- * We want a regular kernel memory (not highmem or DMA etc) +- * allocation for the radix tree nodes, but we need to honour +- * the context-specific requirements the caller has asked for. +- * GFP_RECLAIM_MASK collects those requirements. +- */ +- err = add_to_page_cache_lru(page, mapping, index, +- (gfp_mask & GFP_RECLAIM_MASK)); ++ ++ if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) ++ fgp_flags |= FGP_LOCK; ++ ++ /* Init accessed so avoit atomic mark_page_accessed later */ ++ if (fgp_flags & FGP_ACCESSED) ++ init_page_accessed(page); ++ ++ err = add_to_page_cache_lru(page, mapping, offset, ++ gfp_mask & GFP_RECLAIM_MASK); + if (unlikely(err)) { + page_cache_release(page); + page = NULL; +@@ -971,9 +977,10 @@ repeat: + goto repeat; + } + } ++ + return page; + } +-EXPORT_SYMBOL(find_or_create_page); ++EXPORT_SYMBOL(pagecache_get_page); + + /** + * find_get_entries - gang pagecache lookup +@@ -1263,39 +1270,6 @@ repeat: + } + EXPORT_SYMBOL(find_get_pages_tag); + +-/** +- * grab_cache_page_nowait - returns locked page at given index in given cache +- * @mapping: target address_space +- * @index: the page index +- * +- * Same as grab_cache_page(), but do not wait if the page is unavailable. +- * This is intended for speculative data generators, where the data can +- * be regenerated if the page couldn't be grabbed. This routine should +- * be safe to call while holding the lock for another page. +- * +- * Clear __GFP_FS when allocating the page to avoid recursion into the fs +- * and deadlock against the caller's locked page. +- */ +-struct page * +-grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) +-{ +- struct page *page = find_get_page(mapping, index); +- +- if (page) { +- if (trylock_page(page)) +- return page; +- page_cache_release(page); +- return NULL; +- } +- page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); +- if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) { +- page_cache_release(page); +- page = NULL; +- } +- return page; +-} +-EXPORT_SYMBOL(grab_cache_page_nowait); +- + /* + * CD/DVDs are error prone. When a medium error occurs, the driver may fail + * a _large_ part of the i/o request. Imagine the worst scenario: +@@ -2397,7 +2371,6 @@ int pagecache_write_end(struct file *file, struct address_space *mapping, + { + const struct address_space_operations *aops = mapping->a_ops; + +- mark_page_accessed(page); + return aops->write_end(file, mapping, pos, len, copied, page, fsdata); + } + EXPORT_SYMBOL(pagecache_write_end); +@@ -2479,34 +2452,17 @@ EXPORT_SYMBOL(generic_file_direct_write); + struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags) + { +- int status; +- gfp_t gfp_mask; + struct page *page; +- gfp_t gfp_notmask = 0; ++ int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; + +- gfp_mask = mapping_gfp_mask(mapping); +- if (mapping_cap_account_dirty(mapping)) +- gfp_mask |= __GFP_WRITE; + if (flags & AOP_FLAG_NOFS) +- gfp_notmask = __GFP_FS; +-repeat: +- page = find_lock_page(mapping, index); ++ fgp_flags |= FGP_NOFS; ++ ++ page = pagecache_get_page(mapping, index, fgp_flags, ++ mapping_gfp_mask(mapping)); + if (page) +- goto found; ++ wait_for_stable_page(page); + +- page = __page_cache_alloc(gfp_mask & ~gfp_notmask); +- if (!page) +- return NULL; +- status = add_to_page_cache_lru(page, mapping, index, +- GFP_KERNEL & ~gfp_notmask); +- if (unlikely(status)) { +- page_cache_release(page); +- if (status == -EEXIST) +- goto repeat; +- return NULL; +- } +-found: +- wait_for_stable_page(page); + return page; + } + EXPORT_SYMBOL(grab_cache_page_write_begin); +@@ -2555,7 +2511,7 @@ again: + + status = a_ops->write_begin(file, mapping, pos, bytes, flags, + &page, &fsdata); +- if (unlikely(status)) ++ if (unlikely(status < 0)) + break; + + if (mapping_writably_mapped(mapping)) +@@ -2564,7 +2520,6 @@ again: + copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); + flush_dcache_page(page); + +- mark_page_accessed(page); + status = a_ops->write_end(file, mapping, pos, bytes, copied, + page, fsdata); + if (unlikely(status < 0)) +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 331faa5..adce656 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2273,6 +2273,30 @@ static void khugepaged_alloc_sleep(void) + + static int khugepaged_node_load[MAX_NUMNODES]; + ++static bool khugepaged_scan_abort(int nid) ++{ ++ int i; ++ ++ /* ++ * If zone_reclaim_mode is disabled, then no extra effort is made to ++ * allocate memory locally. ++ */ ++ if (!zone_reclaim_mode) ++ return false; ++ ++ /* If there is a count for this node already, it must be acceptable */ ++ if (khugepaged_node_load[nid]) ++ return false; ++ ++ for (i = 0; i < MAX_NUMNODES; i++) { ++ if (!khugepaged_node_load[i]) ++ continue; ++ if (node_distance(nid, i) > RECLAIM_DISTANCE) ++ return true; ++ } ++ return false; ++} ++ + #ifdef CONFIG_NUMA + static int khugepaged_find_target_node(void) + { +@@ -2589,6 +2613,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, + * hit record. + */ + node = page_to_nid(page); ++ if (khugepaged_scan_abort(node)) ++ goto out_unmap; + khugepaged_node_load[node]++; + VM_BUG_ON_PAGE(PageCompound(page), page); + if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) +diff --git a/mm/memory.c b/mm/memory.c +index 924429e..7f30bea 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -878,7 +878,7 @@ out_set_pte: + return 0; + } + +-int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ++static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, + unsigned long addr, unsigned long end) + { +@@ -3646,7 +3646,7 @@ static int handle_pte_fault(struct mm_struct *mm, + pte_t entry; + spinlock_t *ptl; + +- entry = *pte; ++ entry = ACCESS_ONCE(*pte); + if (!pte_present(entry)) { + if (pte_none(entry)) { + if (vma->vm_ops) { +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 4b25829..ea41913 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -408,7 +408,8 @@ static int destroy_compound_page(struct page *page, unsigned long order) + return bad; + } + +-static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) ++static inline void prep_zero_page(struct page *page, unsigned int order, ++ gfp_t gfp_flags) + { + int i; + +@@ -452,7 +453,7 @@ static inline void set_page_guard_flag(struct page *page) { } + static inline void clear_page_guard_flag(struct page *page) { } + #endif + +-static inline void set_page_order(struct page *page, int order) ++static inline void set_page_order(struct page *page, unsigned int order) + { + set_page_private(page, order); + __SetPageBuddy(page); +@@ -503,21 +504,31 @@ __find_buddy_index(unsigned long page_idx, unsigned int order) + * For recording page's order, we use page_private(page). + */ + static inline int page_is_buddy(struct page *page, struct page *buddy, +- int order) ++ unsigned int order) + { + if (!pfn_valid_within(page_to_pfn(buddy))) + return 0; + +- if (page_zone_id(page) != page_zone_id(buddy)) +- return 0; +- + if (page_is_guard(buddy) && page_order(buddy) == order) { + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); ++ ++ if (page_zone_id(page) != page_zone_id(buddy)) ++ return 0; ++ + return 1; + } + + if (PageBuddy(buddy) && page_order(buddy) == order) { + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); ++ ++ /* ++ * zone check is done late to avoid uselessly ++ * calculating zone/node ids for pages that could ++ * never merge. ++ */ ++ if (page_zone_id(page) != page_zone_id(buddy)) ++ return 0; ++ + return 1; + } + return 0; +@@ -549,6 +560,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, + */ + + static inline void __free_one_page(struct page *page, ++ unsigned long pfn, + struct zone *zone, unsigned int order, + int migratetype) + { +@@ -565,7 +577,7 @@ static inline void __free_one_page(struct page *page, + + VM_BUG_ON(migratetype == -1); + +- page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); ++ page_idx = pfn & ((1 << MAX_ORDER) - 1); + + VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); + VM_BUG_ON_PAGE(bad_range(zone, page), page); +@@ -666,9 +678,12 @@ static void free_pcppages_bulk(struct zone *zone, int count, + int migratetype = 0; + int batch_free = 0; + int to_free = count; ++ unsigned long nr_scanned; + + spin_lock(&zone->lock); +- zone->pages_scanned = 0; ++ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); ++ if (nr_scanned) ++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); + + while (to_free) { + struct page *page; +@@ -700,7 +715,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, + list_del(&page->lru); + mt = get_freepage_migratetype(page); + /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ +- __free_one_page(page, zone, 0, mt); ++ __free_one_page(page, page_to_pfn(page), zone, 0, mt); + trace_mm_page_pcpu_drain(page, 0, mt); + if (likely(!is_migrate_isolate_page(page))) { + __mod_zone_page_state(zone, NR_FREE_PAGES, 1); +@@ -712,13 +727,18 @@ static void free_pcppages_bulk(struct zone *zone, int count, + spin_unlock(&zone->lock); + } + +-static void free_one_page(struct zone *zone, struct page *page, int order, ++static void free_one_page(struct zone *zone, ++ struct page *page, unsigned long pfn, ++ unsigned int order, + int migratetype) + { ++ unsigned long nr_scanned; + spin_lock(&zone->lock); +- zone->pages_scanned = 0; ++ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); ++ if (nr_scanned) ++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); + +- __free_one_page(page, zone, order, migratetype); ++ __free_one_page(page, pfn, zone, order, migratetype); + if (unlikely(!is_migrate_isolate(migratetype))) + __mod_zone_freepage_state(zone, 1 << order, migratetype); + spin_unlock(&zone->lock); +@@ -755,15 +775,16 @@ static void __free_pages_ok(struct page *page, unsigned int order) + { + unsigned long flags; + int migratetype; ++ unsigned long pfn = page_to_pfn(page); + + if (!free_pages_prepare(page, order)) + return; + ++ migratetype = get_pfnblock_migratetype(page, pfn); + local_irq_save(flags); + __count_vm_events(PGFREE, 1 << order); +- migratetype = get_pageblock_migratetype(page); + set_freepage_migratetype(page, migratetype); +- free_one_page(page_zone(page), page, order, migratetype); ++ free_one_page(page_zone(page), page, pfn, order, migratetype); + local_irq_restore(flags); + } + +@@ -894,7 +915,7 @@ static inline int check_new_page(struct page *page) + return 0; + } + +-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) ++static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags) + { + int i; + +@@ -1105,16 +1126,17 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page, + + /* Remove an element from the buddy allocator from the fallback list */ + static inline struct page * +-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) ++__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) + { + struct free_area *area; +- int current_order; ++ unsigned int current_order; + struct page *page; + int migratetype, new_type, i; + + /* Find the largest possible block of pages in the other list */ +- for (current_order = MAX_ORDER-1; current_order >= order; +- --current_order) { ++ for (current_order = MAX_ORDER-1; ++ current_order >= order && current_order <= MAX_ORDER-1; ++ --current_order) { + for (i = 0;; i++) { + migratetype = fallbacks[start_migratetype][i]; + +@@ -1194,7 +1216,7 @@ retry_reserve: + */ + static int rmqueue_bulk(struct zone *zone, unsigned int order, + unsigned long count, struct list_head *list, +- int migratetype, int cold) ++ int migratetype, bool cold) + { + int i; + +@@ -1213,7 +1235,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, + * merge IO requests if the physical pages are ordered + * properly. + */ +- if (likely(cold == 0)) ++ if (likely(!cold)) + list_add(&page->lru, list); + else + list_add_tail(&page->lru, list); +@@ -1342,7 +1364,7 @@ void mark_free_pages(struct zone *zone) + { + unsigned long pfn, max_zone_pfn; + unsigned long flags; +- int order, t; ++ unsigned int order, t; + struct list_head *curr; + + if (zone_is_empty(zone)) +@@ -1374,19 +1396,20 @@ void mark_free_pages(struct zone *zone) + + /* + * Free a 0-order page +- * cold == 1 ? free a cold page : free a hot page ++ * cold == true ? free a cold page : free a hot page + */ +-void free_hot_cold_page(struct page *page, int cold) ++void free_hot_cold_page(struct page *page, bool cold) + { + struct zone *zone = page_zone(page); + struct per_cpu_pages *pcp; + unsigned long flags; ++ unsigned long pfn = page_to_pfn(page); + int migratetype; + + if (!free_pages_prepare(page, 0)) + return; + +- migratetype = get_pageblock_migratetype(page); ++ migratetype = get_pfnblock_migratetype(page, pfn); + set_freepage_migratetype(page, migratetype); + local_irq_save(flags); + __count_vm_event(PGFREE); +@@ -1400,17 +1423,17 @@ void free_hot_cold_page(struct page *page, int cold) + */ + if (migratetype >= MIGRATE_PCPTYPES) { + if (unlikely(is_migrate_isolate(migratetype))) { +- free_one_page(zone, page, 0, migratetype); ++ free_one_page(zone, page, pfn, 0, migratetype); + goto out; + } + migratetype = MIGRATE_MOVABLE; + } + + pcp = &this_cpu_ptr(zone->pageset)->pcp; +- if (cold) +- list_add_tail(&page->lru, &pcp->lists[migratetype]); +- else ++ if (!cold) + list_add(&page->lru, &pcp->lists[migratetype]); ++ else ++ list_add_tail(&page->lru, &pcp->lists[migratetype]); + pcp->count++; + if (pcp->count >= pcp->high) { + unsigned long batch = ACCESS_ONCE(pcp->batch); +@@ -1425,7 +1448,7 @@ out: + /* + * Free a list of 0-order pages + */ +-void free_hot_cold_page_list(struct list_head *list, int cold) ++void free_hot_cold_page_list(struct list_head *list, bool cold) + { + struct page *page, *next; + +@@ -1537,12 +1560,12 @@ int split_free_page(struct page *page) + */ + static inline + struct page *buffered_rmqueue(struct zone *preferred_zone, +- struct zone *zone, int order, gfp_t gfp_flags, +- int migratetype) ++ struct zone *zone, unsigned int order, ++ gfp_t gfp_flags, int migratetype) + { + unsigned long flags; + struct page *page; +- int cold = !!(gfp_flags & __GFP_COLD); ++ bool cold = ((gfp_flags & __GFP_COLD) != 0); + + again: + if (likely(order == 0)) { +@@ -1591,6 +1614,9 @@ again: + } + + __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); ++ if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && ++ !zone_is_fair_depleted(zone)) ++ zone_set_flag(zone, ZONE_FAIR_DEPLETED); + + __count_zone_vm_events(PGALLOC, zone, 1 << order); + zone_statistics(preferred_zone, zone, gfp_flags); +@@ -1687,12 +1713,12 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) + * Return true if free pages are above 'mark'. This takes into account the order + * of the allocation. + */ +-static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, +- int classzone_idx, int alloc_flags, long free_pages) ++static bool __zone_watermark_ok(struct zone *z, unsigned int order, ++ unsigned long mark, int classzone_idx, int alloc_flags, ++ long free_pages) + { + /* free_pages my go negative - that's OK */ + long min = mark; +- long lowmem_reserve = z->lowmem_reserve[classzone_idx]; + int o; + long free_cma = 0; + +@@ -1707,7 +1733,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, + free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); + #endif + +- if (free_pages - free_cma <= min + lowmem_reserve) ++ if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx]) + return false; + for (o = 0; o < order; o++) { + /* At the next order, this order's pages become unavailable */ +@@ -1722,15 +1748,15 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, + return true; + } + +-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, ++bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, + int classzone_idx, int alloc_flags) + { + return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, + zone_page_state(z, NR_FREE_PAGES)); + } + +-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, +- int classzone_idx, int alloc_flags) ++bool zone_watermark_ok_safe(struct zone *z, unsigned int order, ++ unsigned long mark, int classzone_idx, int alloc_flags) + { + long free_pages = zone_page_state(z, NR_FREE_PAGES); + +@@ -1915,6 +1941,18 @@ static inline void init_zone_allows_reclaim(int nid) + } + #endif /* CONFIG_NUMA */ + ++static void reset_alloc_batches(struct zone *preferred_zone) ++{ ++ struct zone *zone = preferred_zone->zone_pgdat->node_zones; ++ ++ do { ++ mod_zone_page_state(zone, NR_ALLOC_BATCH, ++ high_wmark_pages(zone) - low_wmark_pages(zone) - ++ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); ++ zone_clear_flag(zone, ZONE_FAIR_DEPLETED); ++ } while (zone++ != preferred_zone); ++} ++ + /* + * get_page_from_freelist goes through the zonelist trying to allocate + * a page. +@@ -1922,18 +1960,22 @@ static inline void init_zone_allows_reclaim(int nid) + static struct page * + get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, + struct zonelist *zonelist, int high_zoneidx, int alloc_flags, +- struct zone *preferred_zone, int migratetype) ++ struct zone *preferred_zone, int classzone_idx, int migratetype) + { + struct zoneref *z; + struct page *page = NULL; +- int classzone_idx; + struct zone *zone; + nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ + int zlc_active = 0; /* set if using zonelist_cache */ + int did_zlc_setup = 0; /* just call zlc_setup() one time */ ++ bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) && ++ (gfp_mask & __GFP_WRITE); ++ int nr_fair_skipped = 0; ++ bool zonelist_rescan; + +- classzone_idx = zone_idx(preferred_zone); + zonelist_scan: ++ zonelist_rescan = false; ++ + /* + * Scan zonelist, looking for a zone with enough free. + * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c. +@@ -1945,12 +1987,10 @@ zonelist_scan: + if (IS_ENABLED(CONFIG_NUMA) && zlc_active && + !zlc_zone_worth_trying(zonelist, z, allowednodes)) + continue; +- if ((alloc_flags & ALLOC_CPUSET) && ++ if (cpusets_enabled() && ++ (alloc_flags & ALLOC_CPUSET) && + !cpuset_zone_allowed_softwall(zone, gfp_mask)) + continue; +- BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); +- if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS)) +- goto try_this_zone; + /* + * Distribute pages in proportion to the individual + * zone size to ensure fair page aging. The zone a +@@ -1959,9 +1999,11 @@ zonelist_scan: + */ + if (alloc_flags & ALLOC_FAIR) { + if (!zone_local(preferred_zone, zone)) ++ break; ++ if (zone_is_fair_depleted(zone)) { ++ nr_fair_skipped++; + continue; +- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0) +- continue; ++ } + } + /* + * When allocating a page cache page for writing, we +@@ -1989,15 +2031,19 @@ zonelist_scan: + * will require awareness of zones in the + * dirty-throttling and the flusher threads. + */ +- if ((alloc_flags & ALLOC_WMARK_LOW) && +- (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone)) +- goto this_zone_full; ++ if (consider_zone_dirty && !zone_dirty_ok(zone)) ++ continue; + + mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; + if (!zone_watermark_ok(zone, order, mark, + classzone_idx, alloc_flags)) { + int ret; + ++ /* Checked here to keep the fast path fast */ ++ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); ++ if (alloc_flags & ALLOC_NO_WATERMARKS) ++ goto try_this_zone; ++ + if (IS_ENABLED(CONFIG_NUMA) && + !did_zlc_setup && nr_online_nodes > 1) { + /* +@@ -2059,17 +2105,11 @@ try_this_zone: + if (page) + break; + this_zone_full: +- if (IS_ENABLED(CONFIG_NUMA)) ++ if (IS_ENABLED(CONFIG_NUMA) && zlc_active) + zlc_mark_zone_full(zonelist, z); + } + +- if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) { +- /* Disable zlc cache for second zonelist scan */ +- zlc_active = 0; +- goto zonelist_scan; +- } +- +- if (page) ++ if (page) { + /* + * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was + * necessary to allocate the page. The expectation is +@@ -2078,8 +2118,37 @@ this_zone_full: + * for !PFMEMALLOC purposes. + */ + page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); ++ return page; ++ } + +- return page; ++ /* ++ * The first pass makes sure allocations are spread fairly within the ++ * local node. However, the local node might have free pages left ++ * after the fairness batches are exhausted, and remote zones haven't ++ * even been considered yet. Try once more without fairness, and ++ * include remote zones now, before entering the slowpath and waking ++ * kswapd: prefer spilling to a remote zone over swapping locally. ++ */ ++ if (alloc_flags & ALLOC_FAIR) { ++ alloc_flags &= ~ALLOC_FAIR; ++ if (nr_fair_skipped) { ++ zonelist_rescan = true; ++ reset_alloc_batches(preferred_zone); ++ } ++ if (nr_online_nodes > 1) ++ zonelist_rescan = true; ++ } ++ ++ if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) { ++ /* Disable zlc cache for second zonelist scan */ ++ zlc_active = 0; ++ zonelist_rescan = true; ++ } ++ ++ if (zonelist_rescan) ++ goto zonelist_scan; ++ ++ return NULL; + } + + /* +@@ -2188,7 +2257,7 @@ static inline struct page * + __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, enum zone_type high_zoneidx, + nodemask_t *nodemask, struct zone *preferred_zone, +- int migratetype) ++ int classzone_idx, int migratetype) + { + struct page *page; + +@@ -2214,7 +2283,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, + page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, + order, zonelist, high_zoneidx, + ALLOC_WMARK_HIGH|ALLOC_CPUSET, +- preferred_zone, migratetype); ++ preferred_zone, classzone_idx, migratetype); + if (page) + goto out; + +@@ -2249,7 +2318,7 @@ static struct page * + __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, enum zone_type high_zoneidx, + nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, +- int migratetype, enum migrate_mode mode, ++ int classzone_idx, int migratetype, enum migrate_mode mode, + bool *contended_compaction, bool *deferred_compaction, + unsigned long *did_some_progress) + { +@@ -2277,7 +2346,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, + page = get_page_from_freelist(gfp_mask, nodemask, + order, zonelist, high_zoneidx, + alloc_flags & ~ALLOC_NO_WATERMARKS, +- preferred_zone, migratetype); ++ preferred_zone, classzone_idx, migratetype); + if (page) { + preferred_zone->compact_blockskip_flush = false; + compaction_defer_reset(preferred_zone, order, true); +@@ -2309,7 +2378,8 @@ static inline struct page * + __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, enum zone_type high_zoneidx, + nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, +- int migratetype, enum migrate_mode mode, bool *contended_compaction, ++ int classzone_idx, int migratetype, ++ enum migrate_mode mode, bool *contended_compaction, + bool *deferred_compaction, unsigned long *did_some_progress) + { + return NULL; +@@ -2349,7 +2419,7 @@ static inline struct page * + __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, enum zone_type high_zoneidx, + nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, +- int migratetype, unsigned long *did_some_progress) ++ int classzone_idx, int migratetype, unsigned long *did_some_progress) + { + struct page *page = NULL; + bool drained = false; +@@ -2367,7 +2437,8 @@ retry: + page = get_page_from_freelist(gfp_mask, nodemask, order, + zonelist, high_zoneidx, + alloc_flags & ~ALLOC_NO_WATERMARKS, +- preferred_zone, migratetype); ++ preferred_zone, classzone_idx, ++ migratetype); + + /* + * If an allocation failed after direct reclaim, it could be because +@@ -2390,14 +2461,14 @@ static inline struct page * + __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, enum zone_type high_zoneidx, + nodemask_t *nodemask, struct zone *preferred_zone, +- int migratetype) ++ int classzone_idx, int migratetype) + { + struct page *page; + + do { + page = get_page_from_freelist(gfp_mask, nodemask, order, + zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, +- preferred_zone, migratetype); ++ preferred_zone, classzone_idx, migratetype); + + if (!page && gfp_mask & __GFP_NOFAIL) + wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); +@@ -2406,28 +2477,6 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, + return page; + } + +-static void reset_alloc_batches(struct zonelist *zonelist, +- enum zone_type high_zoneidx, +- struct zone *preferred_zone) +-{ +- struct zoneref *z; +- struct zone *zone; +- +- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { +- /* +- * Only reset the batches of zones that were actually +- * considered in the fairness pass, we don't want to +- * trash fairness information for zones that are not +- * actually part of this zonelist's round-robin cycle. +- */ +- if (!zone_local(preferred_zone, zone)) +- continue; +- mod_zone_page_state(zone, NR_ALLOC_BATCH, +- high_wmark_pages(zone) - low_wmark_pages(zone) - +- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); +- } +-} +- + static void wake_all_kswapds(unsigned int order, + struct zonelist *zonelist, + enum zone_type high_zoneidx, +@@ -2498,7 +2547,7 @@ static inline struct page * + __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, enum zone_type high_zoneidx, + nodemask_t *nodemask, struct zone *preferred_zone, +- int migratetype) ++ int classzone_idx, int migratetype) + { + const gfp_t wait = gfp_mask & __GFP_WAIT; + struct page *page = NULL; +@@ -2547,15 +2596,19 @@ restart: + * Find the true preferred zone if the allocation is unconstrained by + * cpusets. + */ +- if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) +- first_zones_zonelist(zonelist, high_zoneidx, NULL, +- &preferred_zone); ++ if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) { ++ struct zoneref *preferred_zoneref; ++ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx, ++ NULL, ++ &preferred_zone); ++ classzone_idx = zonelist_zone_idx(preferred_zoneref); ++ } + + rebalance: + /* This is the last chance, in general, before the goto nopage. */ + page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, + high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, +- preferred_zone, migratetype); ++ preferred_zone, classzone_idx, migratetype); + if (page) + goto got_pg; + +@@ -2570,7 +2623,7 @@ rebalance: + + page = __alloc_pages_high_priority(gfp_mask, order, + zonelist, high_zoneidx, nodemask, +- preferred_zone, migratetype); ++ preferred_zone, classzone_idx, migratetype); + if (page) { + goto got_pg; + } +@@ -2601,7 +2654,8 @@ rebalance: + */ + page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, + high_zoneidx, nodemask, alloc_flags, +- preferred_zone, migratetype, ++ preferred_zone, ++ classzone_idx, migratetype, + migration_mode, &contended_compaction, + &deferred_compaction, + &did_some_progress); +@@ -2624,7 +2678,8 @@ rebalance: + zonelist, high_zoneidx, + nodemask, + alloc_flags, preferred_zone, +- migratetype, &did_some_progress); ++ classzone_idx, migratetype, ++ &did_some_progress); + if (page) + goto got_pg; + +@@ -2643,7 +2698,7 @@ rebalance: + page = __alloc_pages_may_oom(gfp_mask, order, + zonelist, high_zoneidx, + nodemask, preferred_zone, +- migratetype); ++ classzone_idx, migratetype); + if (page) + goto got_pg; + +@@ -2684,7 +2739,8 @@ rebalance: + */ + page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, + high_zoneidx, nodemask, alloc_flags, +- preferred_zone, migratetype, ++ preferred_zone, ++ classzone_idx, migratetype, + migration_mode, &contended_compaction, + &deferred_compaction, + &did_some_progress); +@@ -2711,11 +2767,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, + { + enum zone_type high_zoneidx = gfp_zone(gfp_mask); + struct zone *preferred_zone; ++ struct zoneref *preferred_zoneref; + struct page *page = NULL; + int migratetype = allocflags_to_migratetype(gfp_mask); + unsigned int cpuset_mems_cookie; + int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; + struct mem_cgroup *memcg = NULL; ++ int classzone_idx; + + gfp_mask &= gfp_allowed_mask; + +@@ -2745,39 +2803,23 @@ retry_cpuset: + cpuset_mems_cookie = read_mems_allowed_begin(); + + /* The preferred zone is used for statistics later */ +- first_zones_zonelist(zonelist, high_zoneidx, ++ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx, + nodemask ? : &cpuset_current_mems_allowed, + &preferred_zone); + if (!preferred_zone) + goto out; ++ classzone_idx = zonelist_zone_idx(preferred_zoneref); + + #ifdef CONFIG_CMA + if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; + #endif +-retry: + /* First allocation attempt */ + page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, + zonelist, high_zoneidx, alloc_flags, +- preferred_zone, migratetype); ++ preferred_zone, classzone_idx, migratetype); + if (unlikely(!page)) { + /* +- * The first pass makes sure allocations are spread +- * fairly within the local node. However, the local +- * node might have free pages left after the fairness +- * batches are exhausted, and remote zones haven't +- * even been considered yet. Try once more without +- * fairness, and include remote zones now, before +- * entering the slowpath and waking kswapd: prefer +- * spilling to a remote zone over swapping locally. +- */ +- if (alloc_flags & ALLOC_FAIR) { +- reset_alloc_batches(zonelist, high_zoneidx, +- preferred_zone); +- alloc_flags &= ~ALLOC_FAIR; +- goto retry; +- } +- /* + * Runtime PM, block IO and its error handling path + * can deadlock because I/O on the device might not + * complete. +@@ -2785,7 +2827,7 @@ retry: + gfp_mask = memalloc_noio_flags(gfp_mask); + page = __alloc_pages_slowpath(gfp_mask, order, + zonelist, high_zoneidx, nodemask, +- preferred_zone, migratetype); ++ preferred_zone, classzone_idx, migratetype); + } + + trace_mm_page_alloc(page, order, gfp_mask, migratetype); +@@ -2836,7 +2878,7 @@ void __free_pages(struct page *page, unsigned int order) + { + if (put_page_testzero(page)) { + if (order == 0) +- free_hot_cold_page(page, 0); ++ free_hot_cold_page(page, false); + else + __free_pages_ok(page, order); + } +@@ -3220,12 +3262,12 @@ void show_free_areas(unsigned int filter) + K(zone_page_state(zone, NR_BOUNCE)), + K(zone_page_state(zone, NR_FREE_CMA_PAGES)), + K(zone_page_state(zone, NR_WRITEBACK_TEMP)), +- zone->pages_scanned, ++ K(zone_page_state(zone, NR_PAGES_SCANNED)), + (!zone_reclaimable(zone) ? "yes" : "no") + ); + printk("lowmem_reserve[]:"); + for (i = 0; i < MAX_NR_ZONES; i++) +- printk(" %lu", zone->lowmem_reserve[i]); ++ printk(" %ld", zone->lowmem_reserve[i]); + printk("\n"); + } + +@@ -4113,7 +4155,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, + + static void __meminit zone_init_free_lists(struct zone *zone) + { +- int order, t; ++ unsigned int order, t; + for_each_migratetype_order(order, t) { + INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); + zone->free_area[order].nr_free = 0; +@@ -5553,7 +5595,7 @@ static void calculate_totalreserve_pages(void) + for_each_online_pgdat(pgdat) { + for (i = 0; i < MAX_NR_ZONES; i++) { + struct zone *zone = pgdat->node_zones + i; +- unsigned long max = 0; ++ long max = 0; + + /* Find valid and maximum lowmem_reserve in the zone */ + for (j = i; j < MAX_NR_ZONES; j++) { +@@ -6041,17 +6083,16 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) + * @end_bitidx: The last bit of interest + * returns pageblock_bits flags + */ +-unsigned long get_pageblock_flags_mask(struct page *page, ++unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, + unsigned long end_bitidx, + unsigned long mask) + { + struct zone *zone; + unsigned long *bitmap; +- unsigned long pfn, bitidx, word_bitidx; ++ unsigned long bitidx, word_bitidx; + unsigned long word; + + zone = page_zone(page); +- pfn = page_to_pfn(page); + bitmap = get_pageblock_bitmap(zone, pfn); + bitidx = pfn_to_bitidx(zone, pfn); + word_bitidx = bitidx / BITS_PER_LONG; +@@ -6063,25 +6104,25 @@ unsigned long get_pageblock_flags_mask(struct page *page, + } + + /** +- * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages ++ * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages + * @page: The page within the block of interest + * @start_bitidx: The first bit of interest + * @end_bitidx: The last bit of interest + * @flags: The flags to set + */ +-void set_pageblock_flags_mask(struct page *page, unsigned long flags, ++void set_pfnblock_flags_mask(struct page *page, unsigned long flags, ++ unsigned long pfn, + unsigned long end_bitidx, + unsigned long mask) + { + struct zone *zone; + unsigned long *bitmap; +- unsigned long pfn, bitidx, word_bitidx; ++ unsigned long bitidx, word_bitidx; + unsigned long old_word, word; + + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); + + zone = page_zone(page); +- pfn = page_to_pfn(page); + bitmap = get_pageblock_bitmap(zone, pfn); + bitidx = pfn_to_bitidx(zone, pfn); + word_bitidx = bitidx / BITS_PER_LONG; +@@ -6453,7 +6494,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) + { + struct page *page; + struct zone *zone; +- int order, i; ++ unsigned int order, i; + unsigned long pfn; + unsigned long flags; + /* find the first valid pfn */ +@@ -6505,7 +6546,7 @@ bool is_free_buddy_page(struct page *page) + struct zone *zone = page_zone(page); + unsigned long pfn = page_to_pfn(page); + unsigned long flags; +- int order; ++ unsigned int order; + + spin_lock_irqsave(&zone->lock, flags); + for (order = 0; order < MAX_ORDER; order++) { +diff --git a/mm/shmem.c b/mm/shmem.c +index 0f14475..85d8a1a 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -1035,6 +1035,9 @@ repeat: + goto failed; + } + ++ if (page && sgp == SGP_WRITE) ++ mark_page_accessed(page); ++ + /* fallocated page? */ + if (page && !PageUptodate(page)) { + if (sgp != SGP_READ) +@@ -1116,6 +1119,9 @@ repeat: + shmem_recalc_inode(inode); + spin_unlock(&info->lock); + ++ if (sgp == SGP_WRITE) ++ mark_page_accessed(page); ++ + delete_from_swap_cache(page); + set_page_dirty(page); + swap_free(swap); +@@ -1140,8 +1146,11 @@ repeat: + goto decused; + } + +- SetPageSwapBacked(page); ++ __SetPageSwapBacked(page); + __set_page_locked(page); ++ if (sgp == SGP_WRITE) ++ init_page_accessed(page); ++ + error = mem_cgroup_cache_charge(page, current->mm, + gfp & GFP_RECLAIM_MASK); + if (error) +diff --git a/mm/swap.c b/mm/swap.c +index c8048d7..d2ceddf 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -67,7 +67,7 @@ static void __page_cache_release(struct page *page) + static void __put_single_page(struct page *page) + { + __page_cache_release(page); +- free_hot_cold_page(page, 0); ++ free_hot_cold_page(page, false); + } + + static void __put_compound_page(struct page *page) +@@ -469,7 +469,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, + SetPageActive(page); + lru += LRU_ACTIVE; + add_page_to_lru_list(page, lruvec, lru); +- trace_mm_lru_activate(page, page_to_pfn(page)); ++ trace_mm_lru_activate(page); + + __count_vm_event(PGACTIVATE); + update_page_reclaim_stat(lruvec, file, 1); +@@ -581,12 +581,17 @@ void mark_page_accessed(struct page *page) + EXPORT_SYMBOL(mark_page_accessed); + + /* +- * Queue the page for addition to the LRU via pagevec. The decision on whether +- * to add the page to the [in]active [file|anon] list is deferred until the +- * pagevec is drained. This gives a chance for the caller of __lru_cache_add() +- * have the page added to the active list using mark_page_accessed(). ++ * Used to mark_page_accessed(page) that is not visible yet and when it is ++ * still safe to use non-atomic ops + */ +-void __lru_cache_add(struct page *page) ++void init_page_accessed(struct page *page) ++{ ++ if (!PageReferenced(page)) ++ __SetPageReferenced(page); ++} ++EXPORT_SYMBOL(init_page_accessed); ++ ++static void __lru_cache_add(struct page *page) + { + struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + +@@ -596,11 +601,34 @@ void __lru_cache_add(struct page *page) + pagevec_add(pvec, page); + put_cpu_var(lru_add_pvec); + } +-EXPORT_SYMBOL(__lru_cache_add); ++ ++/** ++ * lru_cache_add: add a page to the page lists ++ * @page: the page to add ++ */ ++void lru_cache_add_anon(struct page *page) ++{ ++ if (PageActive(page)) ++ ClearPageActive(page); ++ __lru_cache_add(page); ++} ++ ++void lru_cache_add_file(struct page *page) ++{ ++ if (PageActive(page)) ++ ClearPageActive(page); ++ __lru_cache_add(page); ++} ++EXPORT_SYMBOL(lru_cache_add_file); + + /** + * lru_cache_add - add a page to a page list + * @page: the page to be added to the LRU. ++ * ++ * Queue the page for addition to the LRU via pagevec. The decision on whether ++ * to add the page to the [in]active [file|anon] list is deferred until the ++ * pagevec is drained. This gives a chance for the caller of lru_cache_add() ++ * have the page added to the active list using mark_page_accessed(). + */ + void lru_cache_add(struct page *page) + { +@@ -811,7 +839,7 @@ void lru_add_drain_all(void) + * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() + * will free it. + */ +-void release_pages(struct page **pages, int nr, int cold) ++void release_pages(struct page **pages, int nr, bool cold) + { + int i; + LIST_HEAD(pages_to_free); +@@ -852,7 +880,7 @@ void release_pages(struct page **pages, int nr, int cold) + } + + /* Clear Active bit in case of parallel mark_page_accessed */ +- ClearPageActive(page); ++ __ClearPageActive(page); + + list_add(&page->lru, &pages_to_free); + } +@@ -934,7 +962,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, + SetPageLRU(page); + add_page_to_lru_list(page, lruvec, lru); + update_page_reclaim_stat(lruvec, file, active); +- trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page)); ++ trace_mm_lru_insertion(page, lru); + } + + /* +diff --git a/mm/swap_state.c b/mm/swap_state.c +index e76ace3..2972eee 100644 +--- a/mm/swap_state.c ++++ b/mm/swap_state.c +@@ -270,7 +270,7 @@ void free_pages_and_swap_cache(struct page **pages, int nr) + + for (i = 0; i < todo; i++) + free_swap_cache(pagep[i]); +- release_pages(pagep, todo, 0); ++ release_pages(pagep, todo, false); + pagep += todo; + nr -= todo; + } +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 0fdf968..aa3891e 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -2681,14 +2681,14 @@ void get_vmalloc_info(struct vmalloc_info *vmi) + + prev_end = VMALLOC_START; + +- spin_lock(&vmap_area_lock); ++ rcu_read_lock(); + + if (list_empty(&vmap_area_list)) { + vmi->largest_chunk = VMALLOC_TOTAL; + goto out; + } + +- list_for_each_entry(va, &vmap_area_list, list) { ++ list_for_each_entry_rcu(va, &vmap_area_list, list) { + unsigned long addr = va->va_start; + + /* +@@ -2715,7 +2715,7 @@ void get_vmalloc_info(struct vmalloc_info *vmi) + vmi->largest_chunk = VMALLOC_END - prev_end; + + out: +- spin_unlock(&vmap_area_lock); ++ rcu_read_unlock(); + } + #endif + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index be6a689..b850ced6 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -163,7 +163,8 @@ static unsigned long zone_reclaimable_pages(struct zone *zone) + + bool zone_reclaimable(struct zone *zone) + { +- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; ++ return zone_page_state(zone, NR_PAGES_SCANNED) < ++ zone_reclaimable_pages(zone) * 6; + } + + static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) +@@ -1107,7 +1108,7 @@ keep: + VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); + } + +- free_hot_cold_page_list(&free_pages, 1); ++ free_hot_cold_page_list(&free_pages, true); + + list_splice(&ret_pages, page_list); + count_vm_events(PGACTIVATE, pgactivate); +@@ -1470,7 +1471,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, + __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); + + if (global_reclaim(sc)) { +- zone->pages_scanned += nr_scanned; ++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); + if (current_is_kswapd()) + __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); + else +@@ -1505,7 +1506,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, + + spin_unlock_irq(&zone->lru_lock); + +- free_hot_cold_page_list(&page_list, 1); ++ free_hot_cold_page_list(&page_list, true); + + /* + * If reclaim is isolating dirty pages under writeback, it implies +@@ -1659,7 +1660,7 @@ static void shrink_active_list(unsigned long nr_to_scan, + nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, + &nr_scanned, sc, isolate_mode, lru); + if (global_reclaim(sc)) +- zone->pages_scanned += nr_scanned; ++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); + + reclaim_stat->recent_scanned[file] += nr_taken; + +@@ -1725,7 +1726,7 @@ static void shrink_active_list(unsigned long nr_to_scan, + __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); + spin_unlock_irq(&zone->lru_lock); + +- free_hot_cold_page_list(&l_hold, 1); ++ free_hot_cold_page_list(&l_hold, true); + } + + #ifdef CONFIG_SWAP +@@ -1847,7 +1848,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, + struct zone *zone = lruvec_zone(lruvec); + unsigned long anon_prio, file_prio; + enum scan_balance scan_balance; +- unsigned long anon, file, free; ++ unsigned long anon, file; + bool force_scan = false; + unsigned long ap, fp; + enum lru_list lru; +@@ -1895,11 +1896,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, + goto out; + } + +- anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) + +- get_lru_size(lruvec, LRU_INACTIVE_ANON); +- file = get_lru_size(lruvec, LRU_ACTIVE_FILE) + +- get_lru_size(lruvec, LRU_INACTIVE_FILE); +- + /* + * If it's foreseeable that reclaiming the file cache won't be + * enough to get the zone back into a desirable shape, we have +@@ -1907,8 +1903,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, + * thrashing - remaining file pages alone. + */ + if (global_reclaim(sc)) { +- free = zone_page_state(zone, NR_FREE_PAGES); +- if (unlikely(file + free <= high_wmark_pages(zone))) { ++ unsigned long zonefile; ++ unsigned long zonefree; ++ ++ zonefree = zone_page_state(zone, NR_FREE_PAGES); ++ zonefile = zone_page_state(zone, NR_ACTIVE_FILE) + ++ zone_page_state(zone, NR_INACTIVE_FILE); ++ ++ if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) { + scan_balance = SCAN_ANON; + goto out; + } +@@ -1943,6 +1945,12 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, + * + * anon in [0], file in [1] + */ ++ ++ anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) + ++ get_lru_size(lruvec, LRU_INACTIVE_ANON); ++ file = get_lru_size(lruvec, LRU_ACTIVE_FILE) + ++ get_lru_size(lruvec, LRU_INACTIVE_FILE); ++ + spin_lock_irq(&zone->lru_lock); + if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { + reclaim_stat->recent_scanned[0] /= 2; +diff --git a/mm/vmstat.c b/mm/vmstat.c +index def5dd2..eded190 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -200,7 +200,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat, + continue; + + threshold = (*calculate_pressure)(zone); +- for_each_possible_cpu(cpu) ++ for_each_online_cpu(cpu) + per_cpu_ptr(zone->pageset, cpu)->stat_threshold + = threshold; + } +@@ -761,6 +761,7 @@ const char * const vmstat_text[] = { + "nr_shmem", + "nr_dirtied", + "nr_written", ++ "nr_pages_scanned", + + #ifdef CONFIG_NUMA + "numa_hit", +@@ -1055,7 +1056,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, + min_wmark_pages(zone), + low_wmark_pages(zone), + high_wmark_pages(zone), +- zone->pages_scanned, ++ zone_page_state(zone, NR_PAGES_SCANNED), + zone->spanned_pages, + zone->present_pages, + zone->managed_pages); +@@ -1065,10 +1066,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, + zone_page_state(zone, i)); + + seq_printf(m, +- "\n protection: (%lu", ++ "\n protection: (%ld", + zone->lowmem_reserve[0]); + for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) +- seq_printf(m, ", %lu", zone->lowmem_reserve[i]); ++ seq_printf(m, ", %ld", zone->lowmem_reserve[i]); + seq_printf(m, + ")" + "\n pagesets"); +diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c +index 77c1732..4a662f1 100644 +--- a/net/netfilter/ipvs/ip_vs_ftp.c ++++ b/net/netfilter/ipvs/ip_vs_ftp.c +@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, + struct nf_conn *ct; + struct net *net; + ++ *diff = 0; ++ + #ifdef CONFIG_IP_VS_IPV6 + /* This application helper doesn't work with IPv6 yet, + * so turn this into a no-op for IPv6 packets +@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, + return 1; + #endif + +- *diff = 0; +- + /* Only useful for established sessions */ + if (cp->state != IP_VS_TCP_S_ESTABLISHED) + return 1; +@@ -321,6 +321,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, + struct ip_vs_conn *n_cp; + struct net *net; + ++ /* no diff required for incoming packets */ ++ *diff = 0; ++ + #ifdef CONFIG_IP_VS_IPV6 + /* This application helper doesn't work with IPv6 yet, + * so turn this into a no-op for IPv6 packets +@@ -329,9 +332,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, + return 1; + #endif + +- /* no diff required for incoming packets */ +- *diff = 0; +- + /* Only useful for established sessions */ + if (cp->state != IP_VS_TCP_S_ESTABLISHED) + return 1; +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c +index bf8a108..6cf2f07 100644 +--- a/net/netfilter/nfnetlink.c ++++ b/net/netfilter/nfnetlink.c +@@ -265,7 +265,8 @@ replay: + nlh = nlmsg_hdr(skb); + err = 0; + +- if (nlh->nlmsg_len < NLMSG_HDRLEN) { ++ if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || ++ skb->len < nlh->nlmsg_len) { + err = -EINVAL; + goto ack; + } +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl +index 91280b8..513f7bd 100755 +--- a/scripts/recordmcount.pl ++++ b/scripts/recordmcount.pl +@@ -262,7 +262,6 @@ if ($arch eq "x86_64") { + # force flags for this arch + $ld .= " -m shlelf_linux"; + $objcopy .= " -O elf32-sh-linux"; +- $cc .= " -m32"; + + } elsif ($arch eq "powerpc") { + $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)"; +diff --git a/security/keys/gc.c b/security/keys/gc.c +index d3222b6..009d937 100644 +--- a/security/keys/gc.c ++++ b/security/keys/gc.c +@@ -157,12 +157,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys) + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) + atomic_dec(&key->user->nikeys); + +- key_user_put(key->user); +- + /* now throw away the key memory */ + if (key->type->destroy) + key->type->destroy(key); + ++ key_user_put(key->user); ++ + kfree(key->description); + + #ifdef KEY_DEBUGGING +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 1bed780..2d37b3f 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -886,6 +886,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ + case USB_ID(0x046d, 0x0808): + case USB_ID(0x046d, 0x0809): ++ case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */ + case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ + case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ + case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.31-32.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.31-32.patch new file mode 100644 index 0000000000..b768d72cfd --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.31-32.patch @@ -0,0 +1,1098 @@ +diff --git a/Makefile b/Makefile +index 5abf670..00fffa3 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 31 ++SUBLEVEL = 32 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c +index 11b3914..42f2fb8 100644 +--- a/arch/arm/mm/dma-mapping.c ++++ b/arch/arm/mm/dma-mapping.c +@@ -464,12 +464,21 @@ void __init dma_contiguous_remap(void) + map.type = MT_MEMORY_DMA_READY; + + /* +- * Clear previous low-memory mapping ++ * Clear previous low-memory mapping to ensure that the ++ * TLB does not see any conflicting entries, then flush ++ * the TLB of the old entries before creating new mappings. ++ * ++ * This ensures that any speculatively loaded TLB entries ++ * (even though they may be rare) can not cause any problems, ++ * and ensures that this code is architecturally compliant. + */ + for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); + addr += PMD_SIZE) + pmd_clear(pmd_off_k(addr)); + ++ flush_tlb_kernel_range(__phys_to_virt(start), ++ __phys_to_virt(end)); ++ + iotable_init(&map, 1); + } + } +diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c +index bc5fbc2..f89389f 100644 +--- a/arch/powerpc/xmon/xmon.c ++++ b/arch/powerpc/xmon/xmon.c +@@ -288,6 +288,7 @@ static inline void disable_surveillance(void) + args.token = rtas_token("set-indicator"); + if (args.token == RTAS_UNKNOWN_SERVICE) + return; ++ args.token = cpu_to_be32(args.token); + args.nargs = cpu_to_be32(3); + args.nret = cpu_to_be32(1); + args.rets = &args.args[3]; +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index b5bb498..67e9f5c 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -76,7 +76,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo + suffix-$(CONFIG_KERNEL_LZ4) := lz4 + + RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ +- perl $(srctree)/arch/x86/tools/calc_run_size.pl) ++ $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh) + quiet_cmd_mkpiggy = MKPIGGY $@ + cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) + +diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl +deleted file mode 100644 +index 23210ba..0000000 +--- a/arch/x86/tools/calc_run_size.pl ++++ /dev/null +@@ -1,39 +0,0 @@ +-#!/usr/bin/perl +-# +-# Calculate the amount of space needed to run the kernel, including room for +-# the .bss and .brk sections. +-# +-# Usage: +-# objdump -h a.out | perl calc_run_size.pl +-use strict; +- +-my $mem_size = 0; +-my $file_offset = 0; +- +-my $sections=" *[0-9]+ \.(?:bss|brk) +"; +-while (<>) { +- if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) { +- my $size = hex($1); +- my $offset = hex($2); +- $mem_size += $size; +- if ($file_offset == 0) { +- $file_offset = $offset; +- } elsif ($file_offset != $offset) { +- # BFD linker shows the same file offset in ELF. +- # Gold linker shows them as consecutive. +- next if ($file_offset + $mem_size == $offset + $size); +- +- printf STDERR "file_offset: 0x%lx\n", $file_offset; +- printf STDERR "mem_size: 0x%lx\n", $mem_size; +- printf STDERR "offset: 0x%lx\n", $offset; +- printf STDERR "size: 0x%lx\n", $size; +- +- die ".bss and .brk are non-contiguous\n"; +- } +- } +-} +- +-if ($file_offset == 0) { +- die "Never found .bss or .brk file offset\n"; +-} +-printf("%d\n", $mem_size + $file_offset); +diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh +new file mode 100644 +index 0000000..1a4c17b +--- /dev/null ++++ b/arch/x86/tools/calc_run_size.sh +@@ -0,0 +1,42 @@ ++#!/bin/sh ++# ++# Calculate the amount of space needed to run the kernel, including room for ++# the .bss and .brk sections. ++# ++# Usage: ++# objdump -h a.out | sh calc_run_size.sh ++ ++NUM='\([0-9a-fA-F]*[ \t]*\)' ++OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p') ++if [ -z "$OUT" ] ; then ++ echo "Never found .bss or .brk file offset" >&2 ++ exit 1 ++fi ++ ++OUT=$(echo ${OUT# }) ++sizeA=$(printf "%d" 0x${OUT%% *}) ++OUT=${OUT#* } ++offsetA=$(printf "%d" 0x${OUT%% *}) ++OUT=${OUT#* } ++sizeB=$(printf "%d" 0x${OUT%% *}) ++OUT=${OUT#* } ++offsetB=$(printf "%d" 0x${OUT%% *}) ++ ++run_size=$(( $offsetA + $sizeA + $sizeB )) ++ ++# BFD linker shows the same file offset in ELF. ++if [ "$offsetA" -ne "$offsetB" ] ; then ++ # Gold linker shows them as consecutive. ++ endB=$(( $offsetB + $sizeB )) ++ if [ "$endB" != "$run_size" ] ; then ++ printf "sizeA: 0x%x\n" $sizeA >&2 ++ printf "offsetA: 0x%x\n" $offsetA >&2 ++ printf "sizeB: 0x%x\n" $sizeB >&2 ++ printf "offsetB: 0x%x\n" $offsetB >&2 ++ echo ".bss and .brk are non-contiguous" >&2 ++ exit 1 ++ fi ++fi ++ ++printf "%d\n" $run_size ++exit 0 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index 255ca23..275a7dc 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -1926,32 +1926,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev) + * If an image has a non-zero parent overlap, get a reference to its + * parent. + * +- * We must get the reference before checking for the overlap to +- * coordinate properly with zeroing the parent overlap in +- * rbd_dev_v2_parent_info() when an image gets flattened. We +- * drop it again if there is no overlap. +- * + * Returns true if the rbd device has a parent with a non-zero + * overlap and a reference for it was successfully taken, or + * false otherwise. + */ + static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) + { +- int counter; ++ int counter = 0; + + if (!rbd_dev->parent_spec) + return false; + +- counter = atomic_inc_return_safe(&rbd_dev->parent_ref); +- if (counter > 0 && rbd_dev->parent_overlap) +- return true; +- +- /* Image was flattened, but parent is not yet torn down */ ++ down_read(&rbd_dev->header_rwsem); ++ if (rbd_dev->parent_overlap) ++ counter = atomic_inc_return_safe(&rbd_dev->parent_ref); ++ up_read(&rbd_dev->header_rwsem); + + if (counter < 0) + rbd_warn(rbd_dev, "parent reference overflow\n"); + +- return false; ++ return counter > 0; + } + + /* +@@ -3904,7 +3898,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) + */ + if (rbd_dev->parent_overlap) { + rbd_dev->parent_overlap = 0; +- smp_mb(); + rbd_dev_parent_put(rbd_dev); + pr_info("%s: clone image has been flattened\n", + rbd_dev->disk->disk_name); +@@ -3948,7 +3941,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) + * treat it specially. + */ + rbd_dev->parent_overlap = overlap; +- smp_mb(); + if (!overlap) { + + /* A null parent_spec indicates it's the initial probe */ +@@ -4764,10 +4756,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) + { + struct rbd_image_header *header; + +- /* Drop parent reference unless it's already been done (or none) */ +- +- if (rbd_dev->parent_overlap) +- rbd_dev_parent_put(rbd_dev); ++ rbd_dev_parent_put(rbd_dev); + + /* Free dynamic fields from the header, then zero it out */ + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 3153eab..de5ab48 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -2893,6 +2893,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, + u32 size = i915_gem_obj_ggtt_size(obj); + uint64_t val; + ++ /* Adjust fence size to match tiled area */ ++ if (obj->tiling_mode != I915_TILING_NONE) { ++ uint32_t row_size = obj->stride * ++ (obj->tiling_mode == I915_TILING_Y ? 32 : 8); ++ size = (size / row_size) * row_size; ++ } ++ + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & + 0xfffff000) << 32; + val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c +index 93ec376..79a2117 100644 +--- a/drivers/i2c/busses/i2c-s3c2410.c ++++ b/drivers/i2c/busses/i2c-s3c2410.c +@@ -753,14 +753,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, + int ret; + + pm_runtime_get_sync(&adap->dev); +- clk_prepare_enable(i2c->clk); ++ ret = clk_enable(i2c->clk); ++ if (ret) ++ return ret; + + for (retry = 0; retry < adap->retries; retry++) { + + ret = s3c24xx_i2c_doxfer(i2c, msgs, num); + + if (ret != -EAGAIN) { +- clk_disable_unprepare(i2c->clk); ++ clk_disable(i2c->clk); + pm_runtime_put(&adap->dev); + return ret; + } +@@ -770,7 +772,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, + udelay(100); + } + +- clk_disable_unprepare(i2c->clk); ++ clk_disable(i2c->clk); + pm_runtime_put(&adap->dev); + return -EREMOTEIO; + } +@@ -1153,7 +1155,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) + + clk_prepare_enable(i2c->clk); + ret = s3c24xx_i2c_init(i2c); +- clk_disable_unprepare(i2c->clk); ++ clk_disable(i2c->clk); + if (ret != 0) { + dev_err(&pdev->dev, "I2C controller init failed\n"); + return ret; +@@ -1166,6 +1168,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) + i2c->irq = ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + dev_err(&pdev->dev, "cannot find IRQ\n"); ++ clk_unprepare(i2c->clk); + return ret; + } + +@@ -1174,6 +1177,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) + + if (ret != 0) { + dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); ++ clk_unprepare(i2c->clk); + return ret; + } + } +@@ -1181,6 +1185,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) + ret = s3c24xx_i2c_register_cpufreq(i2c); + if (ret < 0) { + dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); ++ clk_unprepare(i2c->clk); + return ret; + } + +@@ -1197,6 +1202,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) + if (ret < 0) { + dev_err(&pdev->dev, "failed to add bus to i2c core\n"); + s3c24xx_i2c_deregister_cpufreq(i2c); ++ clk_unprepare(i2c->clk); + return ret; + } + +@@ -1218,6 +1224,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) + { + struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); + ++ clk_unprepare(i2c->clk); ++ + pm_runtime_disable(&i2c->adap.dev); + pm_runtime_disable(&pdev->dev); + +@@ -1246,10 +1254,13 @@ static int s3c24xx_i2c_resume(struct device *dev) + { + struct platform_device *pdev = to_platform_device(dev); + struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); ++ int ret; + +- clk_prepare_enable(i2c->clk); ++ ret = clk_enable(i2c->clk); ++ if (ret) ++ return ret; + s3c24xx_i2c_init(i2c); +- clk_disable_unprepare(i2c->clk); ++ clk_disable(i2c->clk); + i2c->suspended = 0; + + return 0; +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index a3769cf..b00e282 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -132,8 +132,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = { + 1232, 5710, 1156, 4696 + }, + { +- (const char * const []){"LEN0034", "LEN0036", "LEN0039", +- "LEN2002", "LEN2004", NULL}, ++ (const char * const []){"LEN0034", "LEN0036", "LEN0037", ++ "LEN0039", "LEN2002", "LEN2004", ++ NULL}, + 1024, 5112, 2024, 4832 + }, + { +@@ -162,7 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = { + "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ + "LEN0035", /* X240 */ + "LEN0036", /* T440 */ +- "LEN0037", ++ "LEN0037", /* X1 Carbon 2nd */ + "LEN0038", + "LEN0039", /* T440s */ + "LEN0041", +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index c43c46f..dd6d14d 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + }, + }, + { ++ /* Medion Akoya E7225 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Medion"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), ++ }, ++ }, ++ { + /* Blue FB5601 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "blue"), +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c +index d290e83..b950a80 100644 +--- a/drivers/md/dm-cache-metadata.c ++++ b/drivers/md/dm-cache-metadata.c +@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev, + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + DMERR("could not allocate metadata struct"); +- return NULL; ++ return ERR_PTR(-ENOMEM); + } + + atomic_set(&cmd->ref_count, 1); +@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, + return cmd; + + cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); +- if (cmd) { ++ if (!IS_ERR(cmd)) { + mutex_lock(&table_lock); + cmd2 = lookup(bdev); + if (cmd2) { +@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, + { + struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, + may_format_device, policy_hint_size); +- if (cmd && !same_params(cmd, data_block_size)) { ++ ++ if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) { + dm_cache_metadata_close(cmd); +- return NULL; ++ return ERR_PTR(-EINVAL); + } + + return cmd; +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index f7e052c..c1120eb 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -2744,6 +2744,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + ++ if (get_pool_mode(pool) >= PM_READ_ONLY) { ++ DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", ++ dm_device_name(pool->pool_md)); ++ return -EINVAL; ++ } ++ + if (!strcasecmp(argv[0], "create_thin")) + r = process_create_thin_mesg(argc, argv, pool); + +diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c +index 4e65b35..3d19867 100644 +--- a/drivers/net/can/usb/kvaser_usb.c ++++ b/drivers/net/can/usb/kvaser_usb.c +@@ -578,7 +578,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, + usb_sndbulkpipe(dev->udev, + dev->bulk_out->bEndpointAddress), + buf, msg->len, +- kvaser_usb_simple_msg_callback, priv); ++ kvaser_usb_simple_msg_callback, netdev); + usb_anchor_urb(urb, &priv->tx_submitted); + + err = usb_submit_urb(urb, GFP_ATOMIC); +@@ -653,11 +653,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, + priv = dev->nets[channel]; + stats = &priv->netdev->stats; + +- if (status & M16C_STATE_BUS_RESET) { +- kvaser_usb_unlink_tx_urbs(priv); +- return; +- } +- + skb = alloc_can_err_skb(priv->netdev, &cf); + if (!skb) { + stats->rx_dropped++; +@@ -668,7 +663,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, + + netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); + +- if (status & M16C_STATE_BUS_OFF) { ++ if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { + cf->can_id |= CAN_ERR_BUSOFF; + + priv->can.can_stats.bus_off++; +@@ -694,9 +689,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, + } + + new_state = CAN_STATE_ERROR_PASSIVE; +- } +- +- if (status == M16C_STATE_BUS_ERROR) { ++ } else if (status & M16C_STATE_BUS_ERROR) { + if ((priv->can.state < CAN_STATE_ERROR_WARNING) && + ((txerr >= 96) || (rxerr >= 96))) { + cf->can_id |= CAN_ERR_CRTL; +@@ -706,7 +699,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, + + priv->can.can_stats.error_warning++; + new_state = CAN_STATE_ERROR_WARNING; +- } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { ++ } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) && ++ ((txerr < 96) && (rxerr < 96))) { + cf->can_id |= CAN_ERR_PROT; + cf->data[2] = CAN_ERR_PROT_ACTIVE; + +@@ -1582,7 +1576,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, + { + struct kvaser_usb *dev; + int err = -ENOMEM; +- int i; ++ int i, retry = 3; + + dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) +@@ -1600,7 +1594,15 @@ static int kvaser_usb_probe(struct usb_interface *intf, + + usb_set_intfdata(intf, dev); + +- err = kvaser_usb_get_software_info(dev); ++ /* On some x86 laptops, plugging a Kvaser device again after ++ * an unplug makes the firmware always ignore the very first ++ * command. For such a case, provide some room for retries ++ * instead of completely exiting the driver. ++ */ ++ do { ++ err = kvaser_usb_get_software_info(dev); ++ } while (--retry && err == -ETIMEDOUT); ++ + if (err) { + dev_err(&intf->dev, + "Cannot get software infos, error %d\n", err); +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c +index 316650c..4eb091d 100644 +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -1610,6 +1610,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + if (vid == priv->data.default_vlan) + return 0; + ++ if (priv->data.dual_emac) { ++ /* In dual EMAC, reserved VLAN id should not be used for ++ * creating VLAN interfaces as this can break the dual ++ * EMAC port separation ++ */ ++ int i; ++ ++ for (i = 0; i < priv->data.slaves; i++) { ++ if (vid == priv->slaves[i].port_vlan) ++ return -EINVAL; ++ } ++ } ++ + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); + return cpsw_add_vlan_ale_entry(priv, vid); + } +@@ -1623,6 +1636,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, + if (vid == priv->data.default_vlan) + return 0; + ++ if (priv->data.dual_emac) { ++ int i; ++ ++ for (i = 0; i < priv->data.slaves; i++) { ++ if (vid == priv->slaves[i].port_vlan) ++ return -EINVAL; ++ } ++ } ++ + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); + ret = cpsw_ale_del_vlan(priv->ale, vid, 0); + if (ret != 0) +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index afca1bc..b798404 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1479,7 +1479,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id) + } + EXPORT_SYMBOL_GPL(regulator_get_optional); + +-/* Locks held by regulator_put() */ ++/* regulator_list_mutex lock held by regulator_put() */ + static void _regulator_put(struct regulator *regulator) + { + struct regulator_dev *rdev; +@@ -1494,12 +1494,14 @@ static void _regulator_put(struct regulator *regulator) + /* remove any sysfs entries */ + if (regulator->dev) + sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); ++ mutex_lock(&rdev->mutex); + kfree(regulator->supply_name); + list_del(®ulator->list); + kfree(regulator); + + rdev->open_count--; + rdev->exclusive = 0; ++ mutex_unlock(&rdev->mutex); + + module_put(rdev->owner); + } +diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c +index a4c45ea..996e16d 100644 +--- a/drivers/spi/spi-dw-mid.c ++++ b/drivers/spi/spi-dw-mid.c +@@ -222,7 +222,6 @@ int dw_spi_mid_init(struct dw_spi *dws) + iounmap(clk_reg); + + dws->num_cs = 16; +- dws->fifo_len = 40; /* FIFO has 40 words buffer */ + + #ifdef CONFIG_SPI_DW_MID_DMA + dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 7ab3ccb..458a148 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -400,8 +400,8 @@ static void giveback(struct driver_data *drv_data) + cs_deassert(drv_data); + } + +- spi_finalize_current_message(drv_data->master); + drv_data->cur_chip = NULL; ++ spi_finalize_current_message(drv_data->master); + } + + static void reset_sccr1(struct driver_data *drv_data) +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 38b4be2..26ae688 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -1153,10 +1153,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) + " changed for TCM/pSCSI\n", dev); + return -EINVAL; + } +- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { ++ if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { + pr_err("dev[%p]: Passed optimal_sectors %u cannot be" +- " greater than fabric_max_sectors: %u\n", dev, +- optimal_sectors, dev->dev_attrib.fabric_max_sectors); ++ " greater than hw_max_sectors: %u\n", dev, ++ optimal_sectors, dev->dev_attrib.hw_max_sectors); + return -EINVAL; + } + +@@ -1565,7 +1565,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) + DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; + dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; + dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; +- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; + + xcopy_lun = &dev->xcopy_lun; + xcopy_lun->lun_se_dev = dev; +@@ -1606,6 +1605,7 @@ int target_configure_device(struct se_device *dev) + dev->dev_attrib.hw_max_sectors = + se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, + dev->dev_attrib.hw_block_size); ++ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; + + dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); + dev->creation_time = get_jiffies_64(); +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c +index cf991a9..41eff7d 100644 +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -620,7 +620,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + struct fd_prot fd_prot; + sense_reason_t rc; + int ret = 0; +- ++ /* ++ * We are currently limited by the number of iovecs (2048) per ++ * single vfs_[writev,readv] call. ++ */ ++ if (cmd->data_length > FD_MAX_BYTES) { ++ pr_err("FILEIO: Not able to process I/O of %u bytes due to" ++ "FD_MAX_BYTES: %u iovec count limitiation\n", ++ cmd->data_length, FD_MAX_BYTES); ++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ++ } + /* + * Call vectorized fileio functions to map struct scatterlist + * physical memory addresses to struct iovec virtual memory. +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c +index 9e0232c..feefe24 100644 +--- a/drivers/target/target_core_iblock.c ++++ b/drivers/target/target_core_iblock.c +@@ -123,7 +123,7 @@ static int iblock_configure_device(struct se_device *dev) + q = bdev_get_queue(bd); + + dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); +- dev->dev_attrib.hw_max_sectors = UINT_MAX; ++ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); + dev->dev_attrib.hw_queue_depth = q->nr_requests; + + /* +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index 379033f..5216acd 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -910,21 +910,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { + unsigned long long end_lba; + +- if (sectors > dev->dev_attrib.fabric_max_sectors) { +- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" +- " big sectors %u exceeds fabric_max_sectors:" +- " %u\n", cdb[0], sectors, +- dev->dev_attrib.fabric_max_sectors); +- return TCM_INVALID_CDB_FIELD; +- } +- if (sectors > dev->dev_attrib.hw_max_sectors) { +- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" +- " big sectors %u exceeds backend hw_max_sectors:" +- " %u\n", cdb[0], sectors, +- dev->dev_attrib.hw_max_sectors); +- return TCM_INVALID_CDB_FIELD; +- } +- + end_lba = dev->transport->get_blocks(dev) + 1; + if (cmd->t_task_lba + sectors > end_lba) { + pr_err("cmd exceeds last lba %llu " +diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c +index fcdf98f..12a74f6 100644 +--- a/drivers/target/target_core_spc.c ++++ b/drivers/target/target_core_spc.c +@@ -503,7 +503,6 @@ static sense_reason_t + spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) + { + struct se_device *dev = cmd->se_dev; +- u32 max_sectors; + int have_tp = 0; + int opt, min; + +@@ -537,9 +536,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) + /* + * Set MAXIMUM TRANSFER LENGTH + */ +- max_sectors = min(dev->dev_attrib.fabric_max_sectors, +- dev->dev_attrib.hw_max_sectors); +- put_unaligned_be32(max_sectors, &buf[8]); ++ put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); + + /* + * Set OPTIMAL TRANSFER LENGTH +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index de2543d..7ececa1 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -123,6 +123,12 @@ static inline int put_dreq(struct nfs_direct_req *dreq) + */ + ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) + { ++ struct inode *inode = iocb->ki_filp->f_mapping->host; ++ ++ /* we only support swap file calling nfs_direct_IO */ ++ if (!IS_SWAPFILE(inode)) ++ return 0; ++ + #ifndef CONFIG_NFS_SWAP + dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", + iocb->ki_filp, (long long) pos, nr_segs); +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c +index 037f957..d3f6062 100644 +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -633,7 +633,7 @@ int nfs41_walk_client_list(struct nfs_client *new, + prev = pos; + + status = nfs_wait_client_init_complete(pos); +- if (status == 0) { ++ if (pos->cl_cons_state == NFS_CS_SESSION_INITING) { + nfs4_schedule_lease_recovery(pos); + status = nfs4_wait_clnt_recover(pos); + } +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c +index e7d95f9..fe68d8a 100644 +--- a/fs/pstore/ram.c ++++ b/fs/pstore/ram.c +@@ -92,6 +92,7 @@ struct ramoops_context { + struct persistent_ram_ecc_info ecc_info; + unsigned int max_dump_cnt; + unsigned int dump_write_cnt; ++ /* _read_cnt need clear on ramoops_pstore_open */ + unsigned int dump_read_cnt; + unsigned int console_read_cnt; + unsigned int ftrace_read_cnt; +@@ -107,6 +108,7 @@ static int ramoops_pstore_open(struct pstore_info *psi) + + cxt->dump_read_cnt = 0; + cxt->console_read_cnt = 0; ++ cxt->ftrace_read_cnt = 0; + return 0; + } + +@@ -123,13 +125,15 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max, + return NULL; + + prz = przs[i]; ++ if (!prz) ++ return NULL; + +- if (update) { +- /* Update old/shadowed buffer. */ ++ /* Update old/shadowed buffer. */ ++ if (update) + persistent_ram_save_old(prz); +- if (!persistent_ram_old_size(prz)) +- return NULL; +- } ++ ++ if (!persistent_ram_old_size(prz)) ++ return NULL; + + *typep = type; + *id = i; +@@ -435,7 +439,6 @@ static int ramoops_probe(struct platform_device *pdev) + if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) + pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); + +- cxt->dump_read_cnt = 0; + cxt->size = pdata->mem_size; + cxt->phys_addr = pdata->mem_address; + cxt->memtype = pdata->mem_type; +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index b4defde..f6f31d8 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1962,17 +1962,13 @@ static void pool_mayday_timeout(unsigned long __pool) + * spin_lock_irq(pool->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. Called only from + * manager. +- * +- * Return: +- * %false if no action was taken and pool->lock stayed locked, %true +- * otherwise. + */ +-static bool maybe_create_worker(struct worker_pool *pool) ++static void maybe_create_worker(struct worker_pool *pool) + __releases(&pool->lock) + __acquires(&pool->lock) + { + if (!need_to_create_worker(pool)) +- return false; ++ return; + restart: + spin_unlock_irq(&pool->lock); + +@@ -1989,7 +1985,7 @@ restart: + start_worker(worker); + if (WARN_ON_ONCE(need_to_create_worker(pool))) + goto restart; +- return true; ++ return; + } + + if (!need_to_create_worker(pool)) +@@ -2006,7 +2002,7 @@ restart: + spin_lock_irq(&pool->lock); + if (need_to_create_worker(pool)) + goto restart; +- return true; ++ return; + } + + /** +@@ -2019,15 +2015,9 @@ restart: + * LOCKING: + * spin_lock_irq(pool->lock) which may be released and regrabbed + * multiple times. Called only from manager. +- * +- * Return: +- * %false if no action was taken and pool->lock stayed locked, %true +- * otherwise. + */ +-static bool maybe_destroy_workers(struct worker_pool *pool) ++static void maybe_destroy_workers(struct worker_pool *pool) + { +- bool ret = false; +- + while (too_many_workers(pool)) { + struct worker *worker; + unsigned long expires; +@@ -2041,10 +2031,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool) + } + + destroy_worker(worker); +- ret = true; + } +- +- return ret; + } + + /** +@@ -2064,16 +2051,14 @@ static bool maybe_destroy_workers(struct worker_pool *pool) + * multiple times. Does GFP_KERNEL allocations. + * + * Return: +- * %false if the pool don't need management and the caller can safely start +- * processing works, %true indicates that the function released pool->lock +- * and reacquired it to perform some management function and that the +- * conditions that the caller verified while holding the lock before +- * calling the function might no longer be true. ++ * %false if the pool doesn't need management and the caller can safely ++ * start processing works, %true if management function was performed and ++ * the conditions that the caller verified before calling the function may ++ * no longer be true. + */ + static bool manage_workers(struct worker *worker) + { + struct worker_pool *pool = worker->pool; +- bool ret = false; + + /* + * Managership is governed by two mutexes - manager_arb and +@@ -2097,7 +2082,7 @@ static bool manage_workers(struct worker *worker) + * manager_mutex. + */ + if (!mutex_trylock(&pool->manager_arb)) +- return ret; ++ return false; + + /* + * With manager arbitration won, manager_mutex would be free in +@@ -2107,7 +2092,6 @@ static bool manage_workers(struct worker *worker) + spin_unlock_irq(&pool->lock); + mutex_lock(&pool->manager_mutex); + spin_lock_irq(&pool->lock); +- ret = true; + } + + pool->flags &= ~POOL_MANAGE_WORKERS; +@@ -2116,12 +2100,12 @@ static bool manage_workers(struct worker *worker) + * Destroy and then create so that may_start_working() is true + * on return. + */ +- ret |= maybe_destroy_workers(pool); +- ret |= maybe_create_worker(pool); ++ maybe_destroy_workers(pool); ++ maybe_create_worker(pool); + + mutex_unlock(&pool->manager_mutex); + mutex_unlock(&pool->manager_arb); +- return ret; ++ return true; + } + + /** +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 1e4dc4e..815ca56 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -261,7 +261,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, + else if (rate && rate->flags & IEEE80211_RATE_ERP_G) + channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; + else if (rate) +- channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; ++ channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; + else + channel_flags |= IEEE80211_CHAN_2GHZ; + put_unaligned_le16(channel_flags, pos); +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index df33156..18d73df 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -2697,6 +2697,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) + if (!rdev->ops->get_key) + return -EOPNOTSUPP; + ++ if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) ++ return -ENOENT; ++ + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; +@@ -2716,10 +2719,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) + goto nla_put_failure; + +- if (pairwise && mac_addr && +- !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) +- return -ENOENT; +- + err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, + get_key_callback); + +@@ -2890,7 +2889,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) + wdev_lock(dev->ieee80211_ptr); + err = nl80211_key_allowed(dev->ieee80211_ptr); + +- if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && ++ if (key.type == NL80211_KEYTYPE_GROUP && mac_addr && + !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) + err = -ENOENT; + +diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c +index dbc5507..f60d814 100644 +--- a/sound/core/seq/seq_dummy.c ++++ b/sound/core/seq/seq_dummy.c +@@ -82,36 +82,6 @@ struct snd_seq_dummy_port { + static int my_client = -1; + + /* +- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events +- * to subscribers. +- * Note: this callback is called only after all subscribers are removed. +- */ +-static int +-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info) +-{ +- struct snd_seq_dummy_port *p; +- int i; +- struct snd_seq_event ev; +- +- p = private_data; +- memset(&ev, 0, sizeof(ev)); +- if (p->duplex) +- ev.source.port = p->connect; +- else +- ev.source.port = p->port; +- ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; +- ev.type = SNDRV_SEQ_EVENT_CONTROLLER; +- for (i = 0; i < 16; i++) { +- ev.data.control.channel = i; +- ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF; +- snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); +- ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; +- snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); +- } +- return 0; +-} +- +-/* + * event input callback - just redirect events to subscribers + */ + static int +@@ -175,7 +145,6 @@ create_port(int idx, int type) + | SNDRV_SEQ_PORT_TYPE_PORT; + memset(&pcb, 0, sizeof(pcb)); + pcb.owner = THIS_MODULE; +- pcb.unuse = dummy_unuse; + pcb.event_input = dummy_input; + pcb.private_free = dummy_free; + pcb.private_data = rec; +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c +index f156010..942ef84 100644 +--- a/sound/soc/codecs/wm8960.c ++++ b/sound/soc/codecs/wm8960.c +@@ -555,7 +555,7 @@ static struct { + { 22050, 2 }, + { 24000, 2 }, + { 16000, 3 }, +- { 11250, 4 }, ++ { 11025, 4 }, + { 12000, 4 }, + { 8000, 5 }, + }; +diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h +index 75e1403..dfdbaa0 100644 +--- a/sound/soc/fsl/fsl_esai.h ++++ b/sound/soc/fsl/fsl_esai.h +@@ -302,7 +302,7 @@ + #define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT) + #define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK) + #define ESAI_xCCR_xDC_SHIFT 9 +-#define ESAI_xCCR_xDC_WIDTH 4 ++#define ESAI_xCCR_xDC_WIDTH 5 + #define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT) + #define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK) + #define ESAI_xCCR_xPSR_SHIFT 8 +diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c +index 6c19bba..6a339fb 100644 +--- a/sound/soc/omap/omap-mcbsp.c ++++ b/sound/soc/omap/omap-mcbsp.c +@@ -436,7 +436,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai, + case SND_SOC_DAIFMT_CBM_CFS: + /* McBSP slave. FS clock as output */ + regs->srgr2 |= FSGM; +- regs->pcr0 |= FSXM; ++ regs->pcr0 |= FSXM | FSRM; + break; + case SND_SOC_DAIFMT_CBM_CFM: + /* McBSP slave */ +diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c +index 5e9690c..4f98ff1 100644 +--- a/sound/soc/soc-compress.c ++++ b/sound/soc/soc-compress.c +@@ -696,7 +696,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) + rtd->dai_link->stream_name); + + ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, +- 1, 0, &be_pcm); ++ rtd->dai_link->dpcm_playback, ++ rtd->dai_link->dpcm_capture, &be_pcm); + if (ret < 0) { + dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n", + rtd->dai_link->name); +@@ -705,8 +706,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) + + rtd->pcm = be_pcm; + rtd->fe_compr = 1; +- be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; +- be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; ++ if (rtd->dai_link->dpcm_playback) ++ be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; ++ else if (rtd->dai_link->dpcm_capture) ++ be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; + memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); + } else + memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops)); diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.32-33.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.32-33.patch new file mode 100644 index 0000000000..1d89fbe3fc --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.32-33.patch @@ -0,0 +1,942 @@ +diff --git a/Makefile b/Makefile +index 00fffa3..b0963ca 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 32 ++SUBLEVEL = 33 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c +index 6eb97b3..4370933 100644 +--- a/arch/arm/mm/context.c ++++ b/arch/arm/mm/context.c +@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu) + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + for_each_possible_cpu(i) { +- if (i == cpu) { +- asid = 0; +- } else { +- asid = atomic64_xchg(&per_cpu(active_asids, i), 0); +- /* +- * If this CPU has already been through a +- * rollover, but hasn't run another task in +- * the meantime, we must preserve its reserved +- * ASID, as this is the only trace we have of +- * the process it is still running. +- */ +- if (asid == 0) +- asid = per_cpu(reserved_asids, i); +- __set_bit(asid & ~ASID_MASK, asid_map); +- } ++ asid = atomic64_xchg(&per_cpu(active_asids, i), 0); ++ /* ++ * If this CPU has already been through a ++ * rollover, but hasn't run another task in ++ * the meantime, we must preserve its reserved ++ * ASID, as this is the only trace we have of ++ * the process it is still running. ++ */ ++ if (asid == 0) ++ asid = per_cpu(reserved_asids, i); ++ __set_bit(asid & ~ASID_MASK, asid_map); + per_cpu(reserved_asids, i) = asid; + } + +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index c404fb0..64bc6c6 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -77,6 +77,8 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void) + return read_cpuid(CTR_EL0); + } + ++void cpuinfo_store_cpu(void); ++ + #endif /* __ASSEMBLY__ */ + + #endif +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c +index 071c382..1e01d80 100644 +--- a/arch/arm64/kernel/setup.c ++++ b/arch/arm64/kernel/setup.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -73,7 +74,6 @@ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; + #endif + + static const char *cpu_name; +-static const char *machine_name; + phys_addr_t __fdt_pointer __initdata; + + /* +@@ -193,6 +193,19 @@ static void __init smp_build_mpidr_hash(void) + } + #endif + ++struct cpuinfo_arm64 { ++ struct cpu cpu; ++ u32 reg_midr; ++}; ++ ++static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); ++ ++void cpuinfo_store_cpu(void) ++{ ++ struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); ++ info->reg_midr = read_cpuid_id(); ++} ++ + static void __init setup_processor(void) + { + struct cpu_info *cpu_info; +@@ -213,6 +226,8 @@ static void __init setup_processor(void) + sprintf(init_utsname()->machine, ELF_PLATFORM); + elf_hwcap = 0; + ++ cpuinfo_store_cpu(); ++ + /* + * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. + * The blocks we test below represent incremental functionality +@@ -257,8 +272,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) + while (true) + cpu_relax(); + } +- +- machine_name = of_flat_dt_get_machine_name(); + } + + /* +@@ -363,14 +376,12 @@ static int __init arm64_device_init(void) + } + arch_initcall(arm64_device_init); + +-static DEFINE_PER_CPU(struct cpu, cpu_data); +- + static int __init topology_init(void) + { + int i; + + for_each_possible_cpu(i) { +- struct cpu *cpu = &per_cpu(cpu_data, i); ++ struct cpu *cpu = &per_cpu(cpu_data.cpu, i); + cpu->hotpluggable = 1; + register_cpu(cpu, i); + } +@@ -391,14 +402,41 @@ static const char *hwcap_str[] = { + NULL + }; + ++#ifdef CONFIG_COMPAT ++static const char *compat_hwcap_str[] = { ++ "swp", ++ "half", ++ "thumb", ++ "26bit", ++ "fastmult", ++ "fpa", ++ "vfp", ++ "edsp", ++ "java", ++ "iwmmxt", ++ "crunch", ++ "thumbee", ++ "neon", ++ "vfpv3", ++ "vfpv3d16", ++ "tls", ++ "vfpv4", ++ "idiva", ++ "idivt", ++ "vfpd32", ++ "lpae", ++ "evtstrm" ++}; ++#endif /* CONFIG_COMPAT */ ++ + static int c_show(struct seq_file *m, void *v) + { +- int i; +- +- seq_printf(m, "Processor\t: %s rev %d (%s)\n", +- cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); ++ int i, j; + + for_each_online_cpu(i) { ++ struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); ++ u32 midr = cpuinfo->reg_midr; ++ + /* + * glibc reads /proc/cpuinfo to determine the number of + * online processors, looking for lines beginning with +@@ -407,24 +445,33 @@ static int c_show(struct seq_file *m, void *v) + #ifdef CONFIG_SMP + seq_printf(m, "processor\t: %d\n", i); + #endif +- } +- +- /* dump out the processor features */ +- seq_puts(m, "Features\t: "); +- +- for (i = 0; hwcap_str[i]; i++) +- if (elf_hwcap & (1 << i)) +- seq_printf(m, "%s ", hwcap_str[i]); + +- seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); +- seq_printf(m, "CPU architecture: AArch64\n"); +- seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); +- seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); +- seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); +- +- seq_puts(m, "\n"); ++ /* ++ * Dump out the common processor features in a single line. ++ * Userspace should read the hwcaps with getauxval(AT_HWCAP) ++ * rather than attempting to parse this, but there's a body of ++ * software which does already (at least for 32-bit). ++ */ ++ seq_puts(m, "Features\t:"); ++ if (personality(current->personality) == PER_LINUX32) { ++#ifdef CONFIG_COMPAT ++ for (j = 0; compat_hwcap_str[j]; j++) ++ if (compat_elf_hwcap & (1 << j)) ++ seq_printf(m, " %s", compat_hwcap_str[j]); ++#endif /* CONFIG_COMPAT */ ++ } else { ++ for (j = 0; hwcap_str[j]; j++) ++ if (elf_hwcap & (1 << j)) ++ seq_printf(m, " %s", hwcap_str[j]); ++ } ++ seq_puts(m, "\n"); + +- seq_printf(m, "Hardware\t: %s\n", machine_name); ++ seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24)); ++ seq_printf(m, "CPU architecture: 8\n"); ++ seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf)); ++ seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff)); ++ seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf)); ++ } + + return 0; + } +diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c +index 7cfb92a..7b9542b 100644 +--- a/arch/arm64/kernel/smp.c ++++ b/arch/arm64/kernel/smp.c +@@ -148,6 +148,11 @@ asmlinkage void secondary_start_kernel(void) + cpu_ops[cpu]->cpu_postboot(); + + /* ++ * Log the CPU info before it is marked online and might get read. ++ */ ++ cpuinfo_store_cpu(); ++ ++ /* + * Enable GIC and timers. + */ + notify_cpu_starting(cpu); +diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c +index 67a078f..34467ac 100644 +--- a/arch/mips/cavium-octeon/smp.c ++++ b/arch/mips/cavium-octeon/smp.c +@@ -263,9 +263,7 @@ static int octeon_cpu_disable(void) + + set_cpu_online(cpu, false); + cpu_clear(cpu, cpu_callin_map); +- local_irq_disable(); + octeon_fixup_irqs(); +- local_irq_enable(); + + flush_cache_all(); + local_flush_tlb_all(); +diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c +index e498f2b..f5598e2 100644 +--- a/arch/mips/kernel/irq_cpu.c ++++ b/arch/mips/kernel/irq_cpu.c +@@ -56,6 +56,8 @@ static struct irq_chip mips_cpu_irq_controller = { + .irq_mask_ack = mask_mips_irq, + .irq_unmask = unmask_mips_irq, + .irq_eoi = unmask_mips_irq, ++ .irq_disable = mask_mips_irq, ++ .irq_enable = unmask_mips_irq, + }; + + /* +@@ -92,6 +94,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = { + .irq_mask_ack = mips_mt_cpu_irq_ack, + .irq_unmask = unmask_mips_irq, + .irq_eoi = unmask_mips_irq, ++ .irq_disable = mask_mips_irq, ++ .irq_enable = unmask_mips_irq, + }; + + void __init mips_cpu_irq_init(void) +diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c +index 0a022ee..18ed112 100644 +--- a/arch/mips/kernel/smp.c ++++ b/arch/mips/kernel/smp.c +@@ -109,10 +109,10 @@ asmlinkage void start_secondary(void) + else + #endif /* CONFIG_MIPS_MT_SMTC */ + cpu_probe(); +- cpu_report(); + per_cpu_trap_init(false); + mips_clockevent_init(); + mp_ops->init_secondary(); ++ cpu_report(); + + /* + * XXX parity protection should be folded in here when it's converted +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index de42688..80c22a3 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -441,6 +441,7 @@ struct vcpu_vmx { + #endif + int gs_ldt_reload_needed; + int fs_reload_needed; ++ unsigned long vmcs_host_cr4; /* May not match real cr4 */ + } host_state; + struct { + int vm86_active; +@@ -4165,11 +4166,16 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) + u32 low32, high32; + unsigned long tmpl; + struct desc_ptr dt; ++ unsigned long cr4; + + vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ +- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ + ++ /* Save the most likely value for this task's CR4 in the VMCS. */ ++ cr4 = read_cr4(); ++ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ ++ vmx->host_state.vmcs_host_cr4 = cr4; ++ + vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ + #ifdef CONFIG_X86_64 + /* +@@ -7196,7 +7202,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) + static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); +- unsigned long debugctlmsr; ++ unsigned long debugctlmsr, cr4; + + /* Record the guest's net vcpu time for enforced NMI injections. */ + if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) +@@ -7217,6 +7223,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) + vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); + ++ cr4 = read_cr4(); ++ if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { ++ vmcs_writel(HOST_CR4, cr4); ++ vmx->host_state.vmcs_host_cr4 = cr4; ++ } ++ + /* When single-stepping over STI and MOV SS, we must clear the + * corresponding interruptibility bits in the guest state. Otherwise + * vmentry fails as it then expects bit 14 (BS) in pending debug +diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c +index 981c2db..88f143d 100644 +--- a/arch/x86/pci/common.c ++++ b/arch/x86/pci/common.c +@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), + }, + }, ++ { ++ .callback = set_scan_all, ++ .ident = "Stratus/NEC ftServer", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "NEC"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"), ++ }, ++ }, ++ { ++ .callback = set_scan_all, ++ .ident = "Stratus/NEC ftServer", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "NEC"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"), ++ }, ++ }, + {} + }; + +diff --git a/crypto/crc32c.c b/crypto/crc32c.c +index 06f7018..238f0e6 100644 +--- a/crypto/crc32c.c ++++ b/crypto/crc32c.c +@@ -170,3 +170,4 @@ module_exit(crc32c_mod_fini); + MODULE_AUTHOR("Clay Haapala "); + MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS_CRYPTO("crc32c"); +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index ccbffd0..8f42bd7 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -911,6 +911,7 @@ int gpiod_export_link(struct device *dev, const char *name, + if (tdev != NULL) { + status = sysfs_create_link(&dev->kobj, &tdev->kobj, + name); ++ put_device(tdev); + } else { + status = -ENODEV; + } +@@ -958,7 +959,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) + } + + status = sysfs_set_active_low(desc, dev, value); +- ++ put_device(dev); + unlock: + mutex_unlock(&sysfs_lock); + +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index d375322..0218a9b 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) + struct cifsLockInfo *li, *tmp; + struct cifs_fid fid; + struct cifs_pending_open open; ++ bool oplock_break_cancelled; + + spin_lock(&cifs_file_list_lock); + if (--cifs_file->count > 0) { +@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) + } + spin_unlock(&cifs_file_list_lock); + +- cancel_work_sync(&cifs_file->oplock_break); ++ oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); + + if (!tcon->need_reconnect && !cifs_file->invalidHandle) { + struct TCP_Server_Info *server = tcon->ses->server; +@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) + _free_xid(xid); + } + ++ if (oplock_break_cancelled) ++ cifs_done_oplock_break(cifsi); ++ + cifs_del_pending_open(&open); + + /* +diff --git a/fs/ext4/file.c b/fs/ext4/file.c +index 2a8b2e1..589117e 100644 +--- a/fs/ext4/file.c ++++ b/fs/ext4/file.c +@@ -100,7 +100,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov, + struct blk_plug plug; + int unaligned_aio = 0; + ssize_t ret; +- int overwrite = 0; ++ int *overwrite = iocb->private; + size_t length = iov_length(iov, nr_segs); + + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && +@@ -118,8 +118,6 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov, + mutex_lock(&inode->i_mutex); + blk_start_plug(&plug); + +- iocb->private = &overwrite; +- + /* check whether we do a DIO overwrite or not */ + if (ext4_should_dioread_nolock(inode) && !unaligned_aio && + !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { +@@ -143,7 +141,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov, + * So we should check these two conditions. + */ + if (err == len && (map.m_flags & EXT4_MAP_MAPPED)) +- overwrite = 1; ++ *overwrite = 1; + } + + ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); +@@ -170,6 +168,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, + { + struct inode *inode = file_inode(iocb->ki_filp); + ssize_t ret; ++ int overwrite = 0; + + /* + * If we have encountered a bitmap-format file, the size limit +@@ -190,6 +189,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, + } + } + ++ iocb->private = &overwrite; + if (unlikely(iocb->ki_filp->f_flags & O_DIRECT)) + ret = ext4_file_dio_write(iocb, iov, nr_segs, pos); + else +diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h +index 9bc72de..b02c202 100644 +--- a/fs/nilfs2/nilfs.h ++++ b/fs/nilfs2/nilfs.h +@@ -141,7 +141,6 @@ enum { + * @ti_save: Backup of journal_info field of task_struct + * @ti_flags: Flags + * @ti_count: Nest level +- * @ti_garbage: List of inode to be put when releasing semaphore + */ + struct nilfs_transaction_info { + u32 ti_magic; +@@ -150,7 +149,6 @@ struct nilfs_transaction_info { + one of other filesystems has a bug. */ + unsigned short ti_flags; + unsigned short ti_count; +- struct list_head ti_garbage; + }; + + /* ti_magic */ +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index a1a1916..5bee816 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb, + ti->ti_count = 0; + ti->ti_save = cur_ti; + ti->ti_magic = NILFS_TI_MAGIC; +- INIT_LIST_HEAD(&ti->ti_garbage); + current->journal_info = ti; + + for (;;) { +@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb) + + up_write(&nilfs->ns_segctor_sem); + current->journal_info = ti->ti_save; +- if (!list_empty(&ti->ti_garbage)) +- nilfs_dispose_list(nilfs, &ti->ti_garbage, 0); + } + + static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, +@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs, + } + } + ++static void nilfs_iput_work_func(struct work_struct *work) ++{ ++ struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, ++ sc_iput_work); ++ struct the_nilfs *nilfs = sci->sc_super->s_fs_info; ++ ++ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0); ++} ++ + static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, + struct nilfs_root *root) + { +@@ -1899,8 +1905,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, + static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, + struct the_nilfs *nilfs) + { +- struct nilfs_transaction_info *ti = current->journal_info; + struct nilfs_inode_info *ii, *n; ++ int defer_iput = false; + + spin_lock(&nilfs->ns_inode_lock); + list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { +@@ -1911,9 +1917,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, + clear_bit(NILFS_I_BUSY, &ii->i_state); + brelse(ii->i_bh); + ii->i_bh = NULL; +- list_move_tail(&ii->i_dirty, &ti->ti_garbage); ++ list_del_init(&ii->i_dirty); ++ if (!ii->vfs_inode.i_nlink) { ++ /* ++ * Defer calling iput() to avoid a deadlock ++ * over I_SYNC flag for inodes with i_nlink == 0 ++ */ ++ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); ++ defer_iput = true; ++ } else { ++ spin_unlock(&nilfs->ns_inode_lock); ++ iput(&ii->vfs_inode); ++ spin_lock(&nilfs->ns_inode_lock); ++ } + } + spin_unlock(&nilfs->ns_inode_lock); ++ ++ if (defer_iput) ++ schedule_work(&sci->sc_iput_work); + } + + /* +@@ -2580,6 +2601,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, + INIT_LIST_HEAD(&sci->sc_segbufs); + INIT_LIST_HEAD(&sci->sc_write_logs); + INIT_LIST_HEAD(&sci->sc_gc_inodes); ++ INIT_LIST_HEAD(&sci->sc_iput_queue); ++ INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func); + init_timer(&sci->sc_timer); + + sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; +@@ -2606,6 +2629,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) + ret = nilfs_segctor_construct(sci, SC_LSEG_SR); + nilfs_transaction_unlock(sci->sc_super); + ++ flush_work(&sci->sc_iput_work); ++ + } while (ret && retrycount-- > 0); + } + +@@ -2630,6 +2655,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) + || sci->sc_seq_request != sci->sc_seq_done); + spin_unlock(&sci->sc_state_lock); + ++ if (flush_work(&sci->sc_iput_work)) ++ flag = true; ++ + if (flag || !nilfs_segctor_confirm(sci)) + nilfs_segctor_write_out(sci); + +@@ -2639,6 +2667,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) + nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); + } + ++ if (!list_empty(&sci->sc_iput_queue)) { ++ nilfs_warning(sci->sc_super, __func__, ++ "iput queue is not empty\n"); ++ nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); ++ } ++ + WARN_ON(!list_empty(&sci->sc_segbufs)); + WARN_ON(!list_empty(&sci->sc_write_logs)); + +diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h +index 38a1d00..a48d6de 100644 +--- a/fs/nilfs2/segment.h ++++ b/fs/nilfs2/segment.h +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include "nilfs.h" + +@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer { + * @sc_nblk_inc: Block count of current generation + * @sc_dirty_files: List of files to be written + * @sc_gc_inodes: List of GC inodes having blocks to be written ++ * @sc_iput_queue: list of inodes for which iput should be done ++ * @sc_iput_work: work struct to defer iput call + * @sc_freesegs: array of segment numbers to be freed + * @sc_nfreesegs: number of segments on @sc_freesegs + * @sc_dsync_inode: inode whose data pages are written for a sync operation +@@ -135,6 +138,8 @@ struct nilfs_sc_info { + + struct list_head sc_dirty_files; + struct list_head sc_gc_inodes; ++ struct list_head sc_iput_queue; ++ struct work_struct sc_iput_work; + + __u64 *sc_freesegs; + size_t sc_nfreesegs; +diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h +index 2609048..3a34f6e 100644 +--- a/include/sound/ak4113.h ++++ b/include/sound/ak4113.h +@@ -286,7 +286,7 @@ struct ak4113 { + ak4113_write_t *write; + ak4113_read_t *read; + void *private_data; +- unsigned int init:1; ++ atomic_t wq_processing; + spinlock_t lock; + unsigned char regmap[AK4113_WRITABLE_REGS]; + struct snd_kcontrol *kctls[AK4113_CONTROLS]; +diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h +index 52f02a6..069299a 100644 +--- a/include/sound/ak4114.h ++++ b/include/sound/ak4114.h +@@ -168,7 +168,7 @@ struct ak4114 { + ak4114_write_t * write; + ak4114_read_t * read; + void * private_data; +- unsigned int init: 1; ++ atomic_t wq_processing; + spinlock_t lock; + unsigned char regmap[6]; + unsigned char txcsb[5]; +diff --git a/kernel/smpboot.c b/kernel/smpboot.c +index eb89e18..60d35ac5 100644 +--- a/kernel/smpboot.c ++++ b/kernel/smpboot.c +@@ -279,6 +279,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) + unsigned int cpu; + int ret = 0; + ++ get_online_cpus(); + mutex_lock(&smpboot_threads_lock); + for_each_online_cpu(cpu) { + ret = __smpboot_create_thread(plug_thread, cpu); +@@ -291,6 +292,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) + list_add(&plug_thread->list, &hotplug_threads); + out: + mutex_unlock(&smpboot_threads_lock); ++ put_online_cpus(); + return ret; + } + EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); +diff --git a/lib/checksum.c b/lib/checksum.c +index 129775e..8b39e86 100644 +--- a/lib/checksum.c ++++ b/lib/checksum.c +@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum) + EXPORT_SYMBOL(csum_partial_copy); + + #ifndef csum_tcpudp_nofold ++static inline u32 from64to32(u64 x) ++{ ++ /* add up 32-bit and 32-bit for 32+c bit */ ++ x = (x & 0xffffffff) + (x >> 32); ++ /* add up carry.. */ ++ x = (x & 0xffffffff) + (x >> 32); ++ return (u32)x; ++} ++ + __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, +@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + #else + s += (proto + len) << 8; + #endif +- s += (s >> 32); +- return (__force __wsum)s; ++ return (__force __wsum)from64to32(s); + } + EXPORT_SYMBOL(csum_tcpudp_nofold); + #endif +diff --git a/mm/pagewalk.c b/mm/pagewalk.c +index 2beeabf..9056d22 100644 +--- a/mm/pagewalk.c ++++ b/mm/pagewalk.c +@@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end, + */ + if ((vma->vm_start <= addr) && + (vma->vm_flags & VM_PFNMAP)) { +- next = vma->vm_end; ++ if (walk->pte_hole) ++ err = walk->pte_hole(addr, next, walk); ++ if (err) ++ break; + pgd = pgd_offset(walk->mm, next); + continue; + } +diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c +index db1512a..f53542b 100644 +--- a/scripts/kconfig/menu.c ++++ b/scripts/kconfig/menu.c +@@ -545,7 +545,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop, + { + int i, j; + struct menu *submenu[8], *menu, *location = NULL; +- struct jump_key *jump; ++ struct jump_key *jump = NULL; + + str_printf(r, _("Prompt: %s\n"), _(prop->text)); + menu = prop->menu->parent; +@@ -583,7 +583,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop, + str_printf(r, _(" Location:\n")); + for (j = 4; --i >= 0; j += 2) { + menu = submenu[i]; +- if (head && location && menu == location) ++ if (jump && menu == location) + jump->offset = strlen(r->s); + str_printf(r, "%*c-> %s", j, ' ', + _(menu_get_prompt(menu))); +diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c +index e04e750..7a9149b 100644 +--- a/sound/i2c/other/ak4113.c ++++ b/sound/i2c/other/ak4113.c +@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg) + + static void snd_ak4113_free(struct ak4113 *chip) + { +- chip->init = 1; /* don't schedule new work */ +- mb(); ++ atomic_inc(&chip->wq_processing); /* don't schedule new work */ + cancel_delayed_work_sync(&chip->work); + kfree(chip); + } +@@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, + chip->write = write; + chip->private_data = private_data; + INIT_DELAYED_WORK(&chip->work, ak4113_stats); ++ atomic_set(&chip->wq_processing, 0); + + for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++) + chip->regmap[reg] = pgm[reg]; +@@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip) + + void snd_ak4113_reinit(struct ak4113 *chip) + { +- chip->init = 1; +- mb(); +- flush_delayed_work(&chip->work); ++ if (atomic_inc_return(&chip->wq_processing) == 1) ++ cancel_delayed_work_sync(&chip->work); + ak4113_init_regs(chip); + /* bring up statistics / event queing */ +- chip->init = 0; +- if (chip->kctls[0]) ++ if (atomic_dec_and_test(&chip->wq_processing)) + schedule_delayed_work(&chip->work, HZ / 10); + } + EXPORT_SYMBOL_GPL(snd_ak4113_reinit); +@@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work) + { + struct ak4113 *chip = container_of(work, struct ak4113, work.work); + +- if (!chip->init) ++ if (atomic_inc_return(&chip->wq_processing) == 1) + snd_ak4113_check_rate_and_errors(chip, chip->check_flags); + +- schedule_delayed_work(&chip->work, HZ / 10); ++ if (atomic_dec_and_test(&chip->wq_processing)) ++ schedule_delayed_work(&chip->work, HZ / 10); + } +diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c +index 15ae025..bf515db 100644 +--- a/sound/i2c/other/ak4114.c ++++ b/sound/i2c/other/ak4114.c +@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114) + + static void snd_ak4114_free(struct ak4114 *chip) + { +- chip->init = 1; /* don't schedule new work */ +- mb(); ++ atomic_inc(&chip->wq_processing); /* don't schedule new work */ + cancel_delayed_work_sync(&chip->work); + kfree(chip); + } +@@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card, + chip->write = write; + chip->private_data = private_data; + INIT_DELAYED_WORK(&chip->work, ak4114_stats); ++ atomic_set(&chip->wq_processing, 0); + + for (reg = 0; reg < 6; reg++) + chip->regmap[reg] = pgm[reg]; +@@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip) + + void snd_ak4114_reinit(struct ak4114 *chip) + { +- chip->init = 1; +- mb(); +- flush_delayed_work(&chip->work); ++ if (atomic_inc_return(&chip->wq_processing) == 1) ++ cancel_delayed_work_sync(&chip->work); + ak4114_init_regs(chip); + /* bring up statistics / event queing */ +- chip->init = 0; +- if (chip->kctls[0]) ++ if (atomic_dec_and_test(&chip->wq_processing)) + schedule_delayed_work(&chip->work, HZ / 10); + } + +@@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work) + { + struct ak4114 *chip = container_of(work, struct ak4114, work.work); + +- if (!chip->init) ++ if (atomic_inc_return(&chip->wq_processing) == 1) + snd_ak4114_check_rate_and_errors(chip, chip->check_flags); +- +- schedule_delayed_work(&chip->work, HZ / 10); ++ if (atomic_dec_and_test(&chip->wq_processing)) ++ schedule_delayed_work(&chip->work, HZ / 10); + } + + EXPORT_SYMBOL(snd_ak4114_create); +diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c +index 1ead3c9..f20e703 100644 +--- a/sound/soc/atmel/atmel_ssc_dai.c ++++ b/sound/soc/atmel/atmel_ssc_dai.c +@@ -344,7 +344,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, + struct atmel_pcm_dma_params *dma_params; + int dir, channels, bits; + u32 tfmr, rfmr, tcmr, rcmr; +- int start_event; + int ret; + + /* +@@ -451,19 +450,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, + * The SSC transmit clock is obtained from the BCLK signal on + * on the TK line, and the SSC receive clock is + * generated from the transmit clock. +- * +- * For single channel data, one sample is transferred +- * on the falling edge of the LRC clock. +- * For two channel data, one sample is +- * transferred on both edges of the LRC clock. + */ +- start_event = ((channels == 1) +- ? SSC_START_FALLING_RF +- : SSC_START_EDGE_RF); +- + rcmr = SSC_BF(RCMR_PERIOD, 0) + | SSC_BF(RCMR_STTDLY, START_DELAY) +- | SSC_BF(RCMR_START, start_event) ++ | SSC_BF(RCMR_START, SSC_START_FALLING_RF) + | SSC_BF(RCMR_CKI, SSC_CKI_RISING) + | SSC_BF(RCMR_CKO, SSC_CKO_NONE) + | SSC_BF(RCMR_CKS, SSC_CKS_CLOCK); +@@ -471,14 +461,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, + rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE) + | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE) + | SSC_BF(RFMR_FSLEN, 0) +- | SSC_BF(RFMR_DATNB, 0) ++ | SSC_BF(RFMR_DATNB, (channels - 1)) + | SSC_BIT(RFMR_MSBF) + | SSC_BF(RFMR_LOOP, 0) + | SSC_BF(RFMR_DATLEN, (bits - 1)); + + tcmr = SSC_BF(TCMR_PERIOD, 0) + | SSC_BF(TCMR_STTDLY, START_DELAY) +- | SSC_BF(TCMR_START, start_event) ++ | SSC_BF(TCMR_START, SSC_START_FALLING_RF) + | SSC_BF(TCMR_CKI, SSC_CKI_FALLING) + | SSC_BF(TCMR_CKO, SSC_CKO_NONE) + | SSC_BF(TCMR_CKS, SSC_CKS_PIN); +@@ -487,7 +477,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, + | SSC_BF(TFMR_FSDEN, 0) + | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE) + | SSC_BF(TFMR_FSLEN, 0) +- | SSC_BF(TFMR_DATNB, 0) ++ | SSC_BF(TFMR_DATNB, (channels - 1)) + | SSC_BIT(TFMR_MSBF) + | SSC_BF(TFMR_DATDEF, 0) + | SSC_BF(TFMR_DATLEN, (bits - 1)); +diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c +index 12528e9..715589f 100644 +--- a/sound/soc/codecs/sgtl5000.c ++++ b/sound/soc/codecs/sgtl5000.c +@@ -1521,6 +1521,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, + if (ret) + return ret; + ++ /* Need 8 clocks before I2C accesses */ ++ udelay(1); ++ + /* read chip information */ + ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, ®); + if (ret) diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.33-34.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.33-34.patch new file mode 100644 index 0000000000..cb65bdfb30 --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.33-34.patch @@ -0,0 +1,584 @@ +diff --git a/Makefile b/Makefile +index b0963ca..5443481 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 33 ++SUBLEVEL = 34 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c +index ed2c8a1..98893a8 100644 +--- a/drivers/media/rc/ir-lirc-codec.c ++++ b/drivers/media/rc/ir-lirc-codec.c +@@ -42,11 +42,17 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev) + return -EINVAL; + + /* Packet start */ +- if (ev.reset) +- return 0; ++ if (ev.reset) { ++ /* Userspace expects a long space event before the start of ++ * the signal to use as a sync. This may be done with repeat ++ * packets and normal samples. But if a reset has been sent ++ * then we assume that a long time has passed, so we send a ++ * space with the maximum time value. */ ++ sample = LIRC_SPACE(LIRC_VALUE_MASK); ++ IR_dprintk(2, "delivering reset sync space to lirc_dev\n"); + + /* Carrier reports */ +- if (ev.carrier_report) { ++ } else if (ev.carrier_report) { + sample = LIRC_FREQUENCY(ev.carrier); + IR_dprintk(2, "carrier report (freq: %d)\n", sample); + +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +index afa4a1f..a830d42 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +@@ -3131,7 +3131,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) + } + #endif + if (!bnx2x_fp_lock_napi(fp)) +- return work_done; ++ return budget; + + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +index 70849de..5fa076f 100644 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +@@ -2390,7 +2390,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) + + work_done = netxen_process_rcv_ring(sds_ring, budget); + +- if ((work_done < budget) && tx_complete) { ++ if (!tx_complete) ++ work_done = budget; ++ ++ if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__NX_DEV_UP, &adapter->state)) + netxen_nic_enable_int(sds_ring); +diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c +index 602c625..b5edc7f 100644 +--- a/drivers/net/ppp/ppp_deflate.c ++++ b/drivers/net/ppp/ppp_deflate.c +@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, + /* + * See if we managed to reduce the size of the packet. + */ +- if (olen < isize) { ++ if (olen < isize && olen <= osize) { + state->stats.comp_bytes += olen; + state->stats.comp_packets++; + } else { +diff --git a/include/net/ip.h b/include/net/ip.h +index 937f196..3446cdd 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -38,11 +38,12 @@ struct inet_skb_parm { + struct ip_options opt; /* Compiled IP options */ + unsigned char flags; + +-#define IPSKB_FORWARDED 1 +-#define IPSKB_XFRM_TUNNEL_SIZE 2 +-#define IPSKB_XFRM_TRANSFORMED 4 +-#define IPSKB_FRAG_COMPLETE 8 +-#define IPSKB_REROUTED 16 ++#define IPSKB_FORWARDED BIT(0) ++#define IPSKB_XFRM_TUNNEL_SIZE BIT(1) ++#define IPSKB_XFRM_TRANSFORMED BIT(2) ++#define IPSKB_FRAG_COMPLETE BIT(3) ++#define IPSKB_REROUTED BIT(4) ++#define IPSKB_DOREDIRECT BIT(5) + + u16 frag_max_size; + }; +@@ -174,7 +175,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) + return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; + } + +-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, ++void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, + __be32 saddr, const struct ip_reply_arg *arg, + unsigned int len); + +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h +index 80f500a..57c2da9 100644 +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -47,6 +47,7 @@ struct netns_ipv4 { + struct inet_peer_base *peers; + struct tcpm_hash_bucket *tcp_metrics_hash; + unsigned int tcp_metrics_hash_log; ++ struct sock * __percpu *tcp_sk; + struct netns_frags frags; + #ifdef CONFIG_NETFILTER + struct xt_table *iptable_filter; +diff --git a/net/core/dev.c b/net/core/dev.c +index 86bb9cc..4ed77d7 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -6812,10 +6812,20 @@ static int dev_cpu_callback(struct notifier_block *nfb, + oldsd->output_queue = NULL; + oldsd->output_queue_tailp = &oldsd->output_queue; + } +- /* Append NAPI poll list from offline CPU. */ +- if (!list_empty(&oldsd->poll_list)) { +- list_splice_init(&oldsd->poll_list, &sd->poll_list); +- raise_softirq_irqoff(NET_RX_SOFTIRQ); ++ /* Append NAPI poll list from offline CPU, with one exception : ++ * process_backlog() must be called by cpu owning percpu backlog. ++ * We properly handle process_queue & input_pkt_queue later. ++ */ ++ while (!list_empty(&oldsd->poll_list)) { ++ struct napi_struct *napi = list_first_entry(&oldsd->poll_list, ++ struct napi_struct, ++ poll_list); ++ ++ list_del_init(&napi->poll_list); ++ if (napi->poll == process_backlog) ++ napi->state = 0; ++ else ++ ____napi_schedule(sd, napi); + } + + raise_softirq_irqoff(NET_TX_SOFTIRQ); +@@ -6826,7 +6836,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, + netif_rx_internal(skb); + input_queue_head_incr(oldsd); + } +- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { ++ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { + netif_rx_internal(skb); + input_queue_head_incr(oldsd); + } +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 4617586..a6613ff 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -2649,12 +2649,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags) + goto errout; + } + ++ if (!skb->len) ++ goto errout; ++ + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); + return 0; + errout: + WARN_ON(err == -EMSGSIZE); + kfree_skb(skb); +- rtnl_set_sk_err(net, RTNLGRP_LINK, err); ++ if (err) ++ rtnl_set_sk_err(net, RTNLGRP_LINK, err); + return err; + } + +diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c +index 1c6bd43..ecb34b5 100644 +--- a/net/ipv4/ip_forward.c ++++ b/net/ipv4/ip_forward.c +@@ -178,7 +178,8 @@ int ip_forward(struct sk_buff *skb) + * We now generate an ICMP HOST REDIRECT giving the route + * we calculated. + */ +- if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) ++ if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr && ++ !skb_sec_path(skb)) + ip_rt_send_redirect(skb); + + skb->priority = rt_tos2priority(iph->tos); +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 844323b..dd637fc 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -1460,23 +1460,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset, + /* + * Generic function to send a packet as reply to another packet. + * Used to send some TCP resets/acks so far. +- * +- * Use a fake percpu inet socket to avoid false sharing and contention. + */ +-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { +- .sk = { +- .__sk_common = { +- .skc_refcnt = ATOMIC_INIT(1), +- }, +- .sk_wmem_alloc = ATOMIC_INIT(1), +- .sk_allocation = GFP_ATOMIC, +- .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE), +- }, +- .pmtudisc = IP_PMTUDISC_WANT, +- .uc_ttl = -1, +-}; +- +-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, ++void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr, + __be32 saddr, const struct ip_reply_arg *arg, + unsigned int len) + { +@@ -1484,9 +1469,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, + struct ipcm_cookie ipc; + struct flowi4 fl4; + struct rtable *rt = skb_rtable(skb); ++ struct net *net = sock_net(sk); + struct sk_buff *nskb; +- struct sock *sk; +- struct inet_sock *inet; + int err; + + if (ip_options_echo(&replyopts.opt.opt, skb)) +@@ -1516,15 +1500,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, + if (IS_ERR(rt)) + return; + +- inet = &get_cpu_var(unicast_sock); ++ inet_sk(sk)->tos = arg->tos; + +- inet->tos = arg->tos; +- sk = &inet->sk; + sk->sk_priority = skb->priority; + sk->sk_protocol = ip_hdr(skb)->protocol; + sk->sk_bound_dev_if = arg->bound_dev_if; +- sock_net_set(sk, net); +- __skb_queue_head_init(&sk->sk_write_queue); + sk->sk_sndbuf = sysctl_wmem_default; + err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, + len, 0, &ipc, &rt, MSG_DONTWAIT); +@@ -1540,13 +1520,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, + arg->csumoffset) = csum_fold(csum_add(nskb->csum, + arg->csum)); + nskb->ip_summed = CHECKSUM_NONE; +- skb_orphan(nskb); + skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); + ip_push_pending_frames(sk, &fl4); + } + out: +- put_cpu_var(unicast_sock); +- + ip_rt_put(rt); + } + +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index 580dd96..135045e 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -426,15 +426,11 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) + + memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); + sin = &errhdr.offender; +- sin->sin_family = AF_UNSPEC; ++ memset(sin, 0, sizeof(*sin)); + if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) { +- struct inet_sock *inet = inet_sk(sk); +- + sin->sin_family = AF_INET; + sin->sin_addr.s_addr = ip_hdr(skb)->saddr; +- sin->sin_port = 0; +- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); +- if (inet->cmsg_flags) ++ if (inet_sk(sk)->cmsg_flags) + ip_cmsg_recv(msg, skb); + } + +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 0d33f94..04ce671 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -973,8 +973,11 @@ void ping_rcv(struct sk_buff *skb) + + sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); + if (sk != NULL) { ++ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); ++ + pr_debug("rcv on socket %p\n", sk); +- ping_queue_rcv_skb(sk, skb_get(skb)); ++ if (skb2) ++ ping_queue_rcv_skb(sk, skb2); + sock_put(sk); + return; + } +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 487bb62..b64330f 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb, + + do_cache = res->fi && !itag; + if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && ++ skb->protocol == htons(ETH_P_IP) && + (IN_DEV_SHARED_MEDIA(out_dev) || +- inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { +- flags |= RTCF_DOREDIRECT; +- do_cache = false; +- } ++ inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) ++ IPCB(skb)->flags |= IPSKB_DOREDIRECT; + + if (skb->protocol != htons(ETH_P_IP)) { + /* Not IP (i.e. ARP). Do not create route, if it is +@@ -2305,6 +2304,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, + r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; + if (rt->rt_flags & RTCF_NOTIFY) + r->rtm_flags |= RTM_F_NOTIFY; ++ if (IPCB(skb)->flags & IPSKB_DOREDIRECT) ++ r->rtm_flags |= RTCF_DOREDIRECT; + + if (nla_put_be32(skb, RTA_DST, dst)) + goto nla_put_failure; +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index a782d5b..b7effad 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -691,7 +691,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) + + net = dev_net(skb_dst(skb)->dev); + arg.tos = ip_hdr(skb)->tos; +- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr, ++ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), ++ skb, ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); + + TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); +@@ -774,7 +775,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, + if (oif) + arg.bound_dev_if = oif; + arg.tos = tos; +- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr, ++ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), ++ skb, ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); + + TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); +@@ -2769,14 +2771,39 @@ struct proto tcp_prot = { + }; + EXPORT_SYMBOL(tcp_prot); + ++static void __net_exit tcp_sk_exit(struct net *net) ++{ ++ int cpu; ++ ++ for_each_possible_cpu(cpu) ++ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); ++ free_percpu(net->ipv4.tcp_sk); ++} ++ + static int __net_init tcp_sk_init(struct net *net) + { ++ int res, cpu; ++ ++ net->ipv4.tcp_sk = alloc_percpu(struct sock *); ++ if (!net->ipv4.tcp_sk) ++ return -ENOMEM; ++ ++ for_each_possible_cpu(cpu) { ++ struct sock *sk; ++ ++ res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, ++ IPPROTO_TCP, net); ++ if (res) ++ goto fail; ++ *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; ++ } + net->ipv4.sysctl_tcp_ecn = 2; + return 0; +-} + +-static void __net_exit tcp_sk_exit(struct net *net) +-{ ++fail: ++ tcp_sk_exit(net); ++ ++ return res; + } + + static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) +diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c +index 7927db0..4a000f1 100644 +--- a/net/ipv4/udp_diag.c ++++ b/net/ipv4/udp_diag.c +@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin + s_slot = cb->args[0]; + num = s_num = cb->args[1]; + +- for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { ++ for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) { + struct sock *sk; + struct hlist_nulls_node *node; + struct udp_hslot *hslot = &table->hash[slot]; + ++ num = 0; ++ + if (hlist_nulls_empty(&hslot->head)) + continue; + +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c +index c3bf2d2..841cfa2 100644 +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -382,11 +382,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) + + memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); + sin = &errhdr.offender; +- sin->sin6_family = AF_UNSPEC; ++ memset(sin, 0, sizeof(*sin)); ++ + if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { + sin->sin6_family = AF_INET6; +- sin->sin6_flowinfo = 0; +- sin->sin6_port = 0; + if (np->rxopt.all) + ip6_datagram_recv_common_ctl(sk, msg, skb); + if (skb->protocol == htons(ETH_P_IPV6)) { +@@ -397,12 +396,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) + ipv6_iface_scope_id(&sin->sin6_addr, + IP6CB(skb)->iif); + } else { +- struct inet_sock *inet = inet_sk(sk); +- + ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, + &sin->sin6_addr); +- sin->sin6_scope_id = 0; +- if (inet->cmsg_flags) ++ if (inet_sk(sk)->cmsg_flags) + ip_cmsg_recv(msg, skb); + } + } +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c +index 1e55f5e..7daaeaf 100644 +--- a/net/ipv6/ip6_fib.c ++++ b/net/ipv6/ip6_fib.c +@@ -638,6 +638,29 @@ static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt) + RTF_GATEWAY; + } + ++static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, ++ struct net *net) ++{ ++ if (atomic_read(&rt->rt6i_ref) != 1) { ++ /* This route is used as dummy address holder in some split ++ * nodes. It is not leaked, but it still holds other resources, ++ * which must be released in time. So, scan ascendant nodes ++ * and replace dummy references to this route with references ++ * to still alive ones. ++ */ ++ while (fn) { ++ if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { ++ fn->leaf = fib6_find_prefix(net, fn); ++ atomic_inc(&fn->leaf->rt6i_ref); ++ rt6_release(rt); ++ } ++ fn = fn->parent; ++ } ++ /* No more references are possible at this point. */ ++ BUG_ON(atomic_read(&rt->rt6i_ref) != 1); ++ } ++} ++ + /* + * Insert routing information in a node. + */ +@@ -775,11 +798,12 @@ add: + rt->dst.rt6_next = iter->dst.rt6_next; + atomic_inc(&rt->rt6i_ref); + inet6_rt_notify(RTM_NEWROUTE, rt, info); +- rt6_release(iter); + if (!(fn->fn_flags & RTN_RTINFO)) { + info->nl_net->ipv6.rt6_stats->fib_route_nodes++; + fn->fn_flags |= RTN_RTINFO; + } ++ fib6_purge_rt(iter, fn, info->nl_net); ++ rt6_release(iter); + } + + return 0; +@@ -1284,24 +1308,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, + fn = fib6_repair_tree(net, fn); + } + +- if (atomic_read(&rt->rt6i_ref) != 1) { +- /* This route is used as dummy address holder in some split +- * nodes. It is not leaked, but it still holds other resources, +- * which must be released in time. So, scan ascendant nodes +- * and replace dummy references to this route with references +- * to still alive ones. +- */ +- while (fn) { +- if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { +- fn->leaf = fib6_find_prefix(net, fn); +- atomic_inc(&fn->leaf->rt6i_ref); +- rt6_release(rt); +- } +- fn = fn->parent; +- } +- /* No more references are possible at this point. */ +- BUG_ON(atomic_read(&rt->rt6i_ref) != 1); +- } ++ fib6_purge_rt(rt, fn, net); + + inet6_rt_notify(RTM_DELROUTE, rt, info); + rt6_release(rt); +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 7cc1102..6f1b850 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, + struct net *net = dev_net(dst->dev); + + rt6->rt6i_flags |= RTF_MODIFIED; +- if (mtu < IPV6_MIN_MTU) { +- u32 features = dst_metric(dst, RTAX_FEATURES); ++ if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; +- features |= RTAX_FEATURE_ALLFRAG; +- dst_metric_set(dst, RTAX_FEATURES, features); +- } ++ + dst_metric_set(dst, RTAX_MTU, mtu); + rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires); + } +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index d477d47..abc0922 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -1235,7 +1235,6 @@ void sctp_assoc_update(struct sctp_association *asoc, + asoc->peer.peer_hmacs = new->peer.peer_hmacs; + new->peer.peer_hmacs = NULL; + +- sctp_auth_key_put(asoc->asoc_shared_key); + sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); + } + +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c +index 43abb64..df06b13 100644 +--- a/net/sctp/sm_make_chunk.c ++++ b/net/sctp/sm_make_chunk.c +@@ -2608,7 +2608,7 @@ do_addr_param: + + addr_param = param.v + sizeof(sctp_addip_param_t); + +- af = sctp_get_af_specific(param_type2af(param.p->type)); ++ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); + if (af == NULL) + break; + +diff --git a/net/socket.c b/net/socket.c +index a19ae19..1b2c2d6 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -886,9 +886,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, + static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, + struct sock_iocb *siocb) + { +- if (!is_sync_kiocb(iocb)) +- BUG(); +- + siocb->kiocb = iocb; + iocb->private = siocb; + return siocb; diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.34-35.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.34-35.patch new file mode 100644 index 0000000000..c90d04e96c --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.34-35.patch @@ -0,0 +1,2036 @@ +diff --git a/Makefile b/Makefile +index 5443481..9720e86 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 34 ++SUBLEVEL = 35 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h +index 6b0b7f7e..7670f33 100644 +--- a/arch/arc/include/asm/pgtable.h ++++ b/arch/arc/include/asm/pgtable.h +@@ -259,7 +259,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) + #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) + + #define pte_page(x) (mem_map + \ +- (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT))) ++ (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ ++ PAGE_SHIFT))) + + #define mk_pte(page, pgprot) \ + ({ \ +diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi +index 2e7d932..b3eff40 100644 +--- a/arch/arm/boot/dts/am335x-bone-common.dtsi ++++ b/arch/arm/boot/dts/am335x-bone-common.dtsi +@@ -197,6 +197,7 @@ + + usb@47401000 { + status = "okay"; ++ dr_mode = "peripheral"; + }; + + usb@47401800 { +diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi +index 48d2a7f..ce978bc 100644 +--- a/arch/arm/boot/dts/tegra20.dtsi ++++ b/arch/arm/boot/dts/tegra20.dtsi +@@ -76,9 +76,9 @@ + reset-names = "2d"; + }; + +- gr3d@54140000 { ++ gr3d@54180000 { + compatible = "nvidia,tegra20-gr3d"; +- reg = <0x54140000 0x00040000>; ++ reg = <0x54180000 0x00040000>; + clocks = <&tegra_car TEGRA20_CLK_GR3D>; + resets = <&tegra_car 24>; + reset-names = "3d"; +@@ -138,9 +138,9 @@ + status = "disabled"; + }; + +- dsi@542c0000 { ++ dsi@54300000 { + compatible = "nvidia,tegra20-dsi"; +- reg = <0x542c0000 0x00040000>; ++ reg = <0x54300000 0x00040000>; + clocks = <&tegra_car TEGRA20_CLK_DSI>; + resets = <&tegra_car 48>; + reset-names = "dsi"; +diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +index 2e35ff9..d3ac4c6 100644 +--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c ++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +@@ -1669,7 +1669,7 @@ static struct omap_hwmod dra7xx_uart3_hwmod = { + .class = &dra7xx_uart_hwmod_class, + .clkdm_name = "l4per_clkdm", + .main_clk = "uart3_gfclk_mux", +- .flags = HWMOD_SWSUP_SIDLE_ACT, ++ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS, + .prcm = { + .omap4 = { + .clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET, +diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c +index f162f1b..82fd9dd 100644 +--- a/arch/arm/mach-pxa/corgi.c ++++ b/arch/arm/mach-pxa/corgi.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -711,6 +712,8 @@ static void __init corgi_init(void) + sharpsl_nand_partitions[1].size = 53 * 1024 * 1024; + + platform_add_devices(devices, ARRAY_SIZE(devices)); ++ ++ regulator_has_full_constraints(); + } + + static void __init fixup_corgi(struct tag *tags, char **cmdline, +diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c +index a7c30eb..007fd8a 100644 +--- a/arch/arm/mach-pxa/hx4700.c ++++ b/arch/arm/mach-pxa/hx4700.c +@@ -892,6 +892,8 @@ static void __init hx4700_init(void) + mdelay(10); + gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1); + mdelay(10); ++ ++ regulator_has_full_constraints(); + } + + MACHINE_START(H4700, "HP iPAQ HX4700") +diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c +index aedf053..b4fff29 100644 +--- a/arch/arm/mach-pxa/poodle.c ++++ b/arch/arm/mach-pxa/poodle.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -454,6 +455,7 @@ static void __init poodle_init(void) + pxa_set_i2c_info(NULL); + i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices)); + poodle_init_spi(); ++ regulator_has_full_constraints(); + } + + static void __init fixup_poodle(struct tag *tags, char **cmdline, +diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c +index 6645d1e..34853d5 100644 +--- a/arch/arm/mach-sa1100/pm.c ++++ b/arch/arm/mach-sa1100/pm.c +@@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state) + /* + * Ensure not to come back here if it wasn't intended + */ ++ RCSR = RCSR_SMR; + PSPR = 0; + + /* +diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c +index b3fc9f5..7ed72dc 100644 +--- a/arch/arm64/kernel/signal32.c ++++ b/arch/arm64/kernel/signal32.c +@@ -151,8 +151,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) + case __SI_TIMER: + err |= __put_user(from->si_tid, &to->si_tid); + err |= __put_user(from->si_overrun, &to->si_overrun); +- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, +- &to->si_ptr); ++ err |= __put_user(from->si_int, &to->si_int); + break; + case __SI_POLL: + err |= __put_user(from->si_band, &to->si_band); +@@ -181,7 +180,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) + case __SI_MESGQ: /* But this is */ + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); +- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); ++ err |= __put_user(from->si_int, &to->si_int); + break; + default: /* this is just in case for now ... */ + err |= __put_user(from->si_pid, &to->si_pid); +diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h +index a8a3747..eb2005b 100644 +--- a/arch/metag/include/asm/processor.h ++++ b/arch/metag/include/asm/processor.h +@@ -149,8 +149,8 @@ extern void exit_thread(void); + + unsigned long get_wchan(struct task_struct *p); + +-#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) +-#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) ++#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC) ++#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0) + + #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) + +diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c +index 6e58e97..cedeb56 100644 +--- a/arch/mips/kernel/mips_ksyms.c ++++ b/arch/mips/kernel/mips_ksyms.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + extern void *__bzero(void *__s, size_t __count); + extern long __strncpy_from_user_nocheck_asm(char *__to, +@@ -26,6 +27,13 @@ extern long __strnlen_user_nocheck_asm(const char *s); + extern long __strnlen_user_asm(const char *s); + + /* ++ * Core architecture code ++ */ ++#ifdef CONFIG_CPU_R4K_FPU ++EXPORT_SYMBOL_GPL(_save_fp); ++#endif ++ ++/* + * String functions + */ + EXPORT_SYMBOL(memset); +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S +index bbace09..03a2db5 100644 +--- a/arch/mips/kvm/kvm_locore.S ++++ b/arch/mips/kvm/kvm_locore.S +@@ -428,7 +428,7 @@ __kvm_mips_return_to_guest: + /* Setup status register for running guest in UM */ + .set at + or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) +- and v1, v1, ~ST0_CU0 ++ and v1, v1, ~(ST0_CU0 | ST0_MX) + .set noat + mtc0 v1, CP0_STATUS + ehb +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c +index 3e0ff8d..897c605 100644 +--- a/arch/mips/kvm/kvm_mips.c ++++ b/arch/mips/kvm/kvm_mips.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -418,11 +419,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) + vcpu->mmio_needed = 0; + } + ++ lose_fpu(1); ++ ++ local_irq_disable(); + /* Check if we have any exceptions/interrupts pending */ + kvm_mips_deliver_interrupts(vcpu, + kvm_read_c0_guest_cause(vcpu->arch.cop0)); + +- local_irq_disable(); + kvm_guest_enter(); + + r = __kvm_mips_vcpu_run(run, vcpu); +@@ -1021,9 +1024,6 @@ void kvm_mips_set_c0_status(void) + { + uint32_t status = read_c0_status(); + +- if (cpu_has_fpu) +- status |= (ST0_CU1); +- + if (cpu_has_dsp) + status |= (ST0_MX); + +diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c +index 47b6b9f..830edc8 100644 +--- a/arch/powerpc/sysdev/axonram.c ++++ b/arch/powerpc/sysdev/axonram.c +@@ -156,7 +156,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector, + } + + *kaddr = (void *)(bank->ph_addr + offset); +- *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT; ++ *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; + + return 0; + } +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index fab97ad..1777f89 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1207,21 +1207,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) + { + #ifdef CONFIG_X86_64 + bool vcpus_matched; +- bool do_request = false; + struct kvm_arch *ka = &vcpu->kvm->arch; + struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + + vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == + atomic_read(&vcpu->kvm->online_vcpus)); + +- if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) +- if (!ka->use_master_clock) +- do_request = 1; +- +- if (!vcpus_matched && ka->use_master_clock) +- do_request = 1; +- +- if (do_request) ++ /* ++ * Once the masterclock is enabled, always perform request in ++ * order to update it. ++ * ++ * In order to enable masterclock, the host clocksource must be TSC ++ * and the vcpus need to have matched TSCs. When that happens, ++ * perform request to enable masterclock. ++ */ ++ if (ka->use_master_clock || ++ (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched)) + kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); + + trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, +diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c +index 207d9aef..448ee89 100644 +--- a/arch/x86/mm/gup.c ++++ b/arch/x86/mm/gup.c +@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + return 0; +- if (unlikely(pmd_large(pmd))) { ++ if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) { + /* + * NUMA hinting faults need to be handled in the GUP + * slowpath for accounting purposes and so that they +diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c +index 8b977eb..006cc91 100644 +--- a/arch/x86/mm/hugetlbpage.c ++++ b/arch/x86/mm/hugetlbpage.c +@@ -66,9 +66,15 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) + return ERR_PTR(-EINVAL); + } + ++/* ++ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal ++ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. ++ * Otherwise, returns 0. ++ */ + int pmd_huge(pmd_t pmd) + { +- return !!(pmd_val(pmd) & _PAGE_PSE); ++ return !pmd_none(pmd) && ++ (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; + } + + int pud_huge(pud_t pud) +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 25e7e13..3601ff2 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -35,12 +35,12 @@ struct __read_mostly va_alignment va_align = { + .flags = -1, + }; + +-static unsigned int stack_maxrandom_size(void) ++static unsigned long stack_maxrandom_size(void) + { +- unsigned int max = 0; ++ unsigned long max = 0; + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) { +- max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; ++ max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT; + } + + return max; +diff --git a/block/blk-throttle.c b/block/blk-throttle.c +index 1474c3a..1599878 100644 +--- a/block/blk-throttle.c ++++ b/block/blk-throttle.c +@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, + struct blkg_rwstat rwstat = { }, tmp; + int i, cpu; + ++ if (tg->stats_cpu == NULL) ++ return 0; ++ + for_each_possible_cpu(cpu) { + struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); + +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c +index 91c25f26..d9bba99 100644 +--- a/block/cfq-iosched.c ++++ b/block/cfq-iosched.c +@@ -3585,6 +3585,11 @@ retry: + + blkcg = bio_blkcg(bio); + cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); ++ if (!cfqg) { ++ cfqq = &cfqd->oom_cfqq; ++ goto out; ++ } ++ + cfqq = cic_to_cfqq(cic, is_sync); + + /* +@@ -3621,7 +3626,7 @@ retry: + } else + cfqq = &cfqd->oom_cfqq; + } +- ++out: + if (new_cfqq) + kmem_cache_free(cfq_pool, new_cfqq); + +@@ -3651,12 +3656,17 @@ static struct cfq_queue * + cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, + struct bio *bio, gfp_t gfp_mask) + { +- const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); +- const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); ++ int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); ++ int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); + struct cfq_queue **async_cfqq = NULL; + struct cfq_queue *cfqq = NULL; + + if (!is_sync) { ++ if (!ioprio_valid(cic->ioprio)) { ++ struct task_struct *tsk = current; ++ ioprio = task_nice_ioprio(tsk); ++ ioprio_class = task_nice_ioclass(tsk); ++ } + async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); + cfqq = *async_cfqq; + } +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c +index b11949c..f667e37 100644 +--- a/drivers/bluetooth/ath3k.c ++++ b/drivers/bluetooth/ath3k.c +@@ -157,6 +157,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = { + #define USB_REQ_DFU_DNLOAD 1 + #define BULK_SIZE 4096 + #define FW_HDR_SIZE 20 ++#define TIMEGAP_USEC_MIN 50 ++#define TIMEGAP_USEC_MAX 100 + + static int ath3k_load_firmware(struct usb_device *udev, + const struct firmware *firmware) +@@ -187,6 +189,9 @@ static int ath3k_load_firmware(struct usb_device *udev, + count -= 20; + + while (count) { ++ /* workaround the compatibility issue with xHCI controller*/ ++ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); ++ + size = min_t(uint, count, BULK_SIZE); + pipe = usb_sndbulkpipe(udev, 0x02); + memcpy(send_buf, firmware->data + sent, size); +@@ -283,6 +288,9 @@ static int ath3k_load_fwfile(struct usb_device *udev, + count -= size; + + while (count) { ++ /* workaround the compatibility issue with xHCI controller*/ ++ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); ++ + size = min_t(uint, count, BULK_SIZE); + pipe = usb_sndbulkpipe(udev, 0x02); + +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c +index 6af1700..cfb9089 100644 +--- a/drivers/char/tpm/tpm-interface.c ++++ b/drivers/char/tpm/tpm-interface.c +@@ -1122,7 +1122,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, + + /* Make chip available */ + spin_lock(&driver_lock); +- list_add_rcu(&chip->list, &tpm_chip_list); ++ list_add_tail_rcu(&chip->list, &tpm_chip_list); + spin_unlock(&driver_lock); + + return chip; +diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c +index 7727292..503a85a 100644 +--- a/drivers/char/tpm/tpm_i2c_atmel.c ++++ b/drivers/char/tpm/tpm_i2c_atmel.c +@@ -168,6 +168,10 @@ static int i2c_atmel_probe(struct i2c_client *client, + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); ++ if (!chip->vendor.priv) { ++ rc = -ENOMEM; ++ goto out_err; ++ } + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); +diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c +index 7b158ef..23c7b13 100644 +--- a/drivers/char/tpm/tpm_i2c_nuvoton.c ++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c +@@ -538,6 +538,11 @@ static int i2c_nuvoton_probe(struct i2c_client *client, + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); ++ if (!chip->vendor.priv) { ++ rc = -ENOMEM; ++ goto out_err; ++ } ++ + init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&chip->vendor.int_queue); + +diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c +index be9af2e..576d111 100644 +--- a/drivers/char/tpm/tpm_i2c_stm_st33.c ++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c +@@ -488,7 +488,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf, + if (burstcnt < 0) + return burstcnt; + size = min_t(int, len - i - 1, burstcnt); +- ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size); ++ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size); + if (ret < 0) + goto out_err; + +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c +index af74c57..eff9d58 100644 +--- a/drivers/char/tpm/tpm_ibmvtpm.c ++++ b/drivers/char/tpm/tpm_ibmvtpm.c +@@ -148,7 +148,8 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) + crq.len = (u16)count; + crq.data = ibmvtpm->rtce_dma_handle; + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), ++ cpu_to_be64(word[1])); + if (rc != H_SUCCESS) { + dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); + rc = 0; +@@ -186,7 +187,8 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), ++ cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); +@@ -212,7 +214,8 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_GET_VERSION; + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), ++ cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "ibmvtpm_crq_get_version failed rc=%d\n", rc); +@@ -307,6 +310,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) + static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) + { + struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); ++ ++ /* ibmvtpm initializes at probe time, so the data we are ++ * asking for may not be set yet. Estimate that 4K required ++ * for TCE-mapped buffer in addition to CRQ. ++ */ ++ if (!ibmvtpm) ++ return CRQ_RES_BUF_SIZE + PAGE_SIZE; ++ + return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; + } + +@@ -327,7 +338,8 @@ static int tpm_ibmvtpm_suspend(struct device *dev) + crq.valid = (u8)IBMVTPM_VALID_CMD; + crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; + +- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); ++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), ++ cpu_to_be64(buf[1])); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "tpm_ibmvtpm_suspend failed rc=%d\n", rc); +@@ -472,11 +484,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, + case IBMVTPM_VALID_CMD: + switch (crq->msg) { + case VTPM_GET_RTCE_BUFFER_SIZE_RES: +- if (crq->len <= 0) { ++ if (be16_to_cpu(crq->len) <= 0) { + dev_err(ibmvtpm->dev, "Invalid rtce size\n"); + return; + } +- ibmvtpm->rtce_size = crq->len; ++ ibmvtpm->rtce_size = be16_to_cpu(crq->len); + ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size, + GFP_KERNEL); + if (!ibmvtpm->rtce_buf) { +@@ -497,11 +509,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq, + + return; + case VTPM_GET_VERSION_RES: +- ibmvtpm->vtpm_version = crq->data; ++ ibmvtpm->vtpm_version = be32_to_cpu(crq->data); + return; + case VTPM_TPM_COMMAND_RES: + /* len of the data in rtce buffer */ +- ibmvtpm->res_len = crq->len; ++ ibmvtpm->res_len = be16_to_cpu(crq->len); + wake_up_interruptible(&ibmvtpm->wq); + return; + default: +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c +index 2c46734..51350cd 100644 +--- a/drivers/char/tpm/tpm_tis.c ++++ b/drivers/char/tpm/tpm_tis.c +@@ -75,6 +75,10 @@ enum tis_defaults { + #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) + #define TPM_RID(l) (0x0F04 | ((l) << 12)) + ++struct priv_data { ++ bool irq_tested; ++}; ++ + static LIST_HEAD(tis_chips); + static DEFINE_MUTEX(tis_lock); + +@@ -338,12 +342,27 @@ out_err: + return rc; + } + ++static void disable_interrupts(struct tpm_chip *chip) ++{ ++ u32 intmask; ++ ++ intmask = ++ ioread32(chip->vendor.iobase + ++ TPM_INT_ENABLE(chip->vendor.locality)); ++ intmask &= ~TPM_GLOBAL_INT_ENABLE; ++ iowrite32(intmask, ++ chip->vendor.iobase + ++ TPM_INT_ENABLE(chip->vendor.locality)); ++ free_irq(chip->vendor.irq, chip); ++ chip->vendor.irq = 0; ++} ++ + /* + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) ++static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) + { + int rc; + u32 ordinal; +@@ -373,6 +392,30 @@ out_err: + return rc; + } + ++static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) ++{ ++ int rc, irq; ++ struct priv_data *priv = chip->vendor.priv; ++ ++ if (!chip->vendor.irq || priv->irq_tested) ++ return tpm_tis_send_main(chip, buf, len); ++ ++ /* Verify receipt of the expected IRQ */ ++ irq = chip->vendor.irq; ++ chip->vendor.irq = 0; ++ rc = tpm_tis_send_main(chip, buf, len); ++ chip->vendor.irq = irq; ++ if (!priv->irq_tested) ++ msleep(1); ++ if (!priv->irq_tested) { ++ disable_interrupts(chip); ++ dev_err(chip->dev, ++ FW_BUG "TPM interrupt not working, polling instead\n"); ++ } ++ priv->irq_tested = true; ++ return rc; ++} ++ + struct tis_vendor_timeout_override { + u32 did_vid; + unsigned long timeout_us[4]; +@@ -505,6 +548,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id) + if (interrupt == 0) + return IRQ_NONE; + ++ ((struct priv_data *)chip->vendor.priv)->irq_tested = true; + if (interrupt & TPM_INTF_DATA_AVAIL_INT) + wake_up_interruptible(&chip->vendor.read_queue); + if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) +@@ -534,9 +578,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, + u32 vendor, intfcaps, intmask; + int rc, i, irq_s, irq_e, probe; + struct tpm_chip *chip; ++ struct priv_data *priv; + ++ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); ++ if (priv == NULL) ++ return -ENOMEM; + if (!(chip = tpm_register_hardware(dev, &tpm_tis))) + return -ENODEV; ++ chip->vendor.priv = priv; + + chip->vendor.iobase = ioremap(start, len); + if (!chip->vendor.iobase) { +@@ -605,19 +654,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, + if (intfcaps & TPM_INTF_DATA_AVAIL_INT) + dev_dbg(dev, "\tData Avail Int Support\n"); + +- /* get the timeouts before testing for irqs */ +- if (tpm_get_timeouts(chip)) { +- dev_err(dev, "Could not get TPM timeouts and durations\n"); +- rc = -ENODEV; +- goto out_err; +- } +- +- if (tpm_do_selftest(chip)) { +- dev_err(dev, "TPM self test failed\n"); +- rc = -ENODEV; +- goto out_err; +- } +- + /* INTERRUPT Setup */ + init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&chip->vendor.int_queue); +@@ -719,6 +755,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, + } + } + ++ if (tpm_get_timeouts(chip)) { ++ dev_err(dev, "Could not get TPM timeouts and durations\n"); ++ rc = -ENODEV; ++ goto out_err; ++ } ++ ++ if (tpm_do_selftest(chip)) { ++ dev_err(dev, "TPM self test failed\n"); ++ rc = -ENODEV; ++ goto out_err; ++ } ++ + INIT_LIST_HEAD(&chip->vendor.list); + mutex_lock(&tis_lock); + list_add(&chip->vendor.list, &tis_chips); +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 4854f81..ef3b8ad 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -1365,9 +1365,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev, + unsigned long flags; + struct cpufreq_policy *policy; + +- read_lock_irqsave(&cpufreq_driver_lock, flags); ++ write_lock_irqsave(&cpufreq_driver_lock, flags); + policy = per_cpu(cpufreq_cpu_data, cpu); +- read_unlock_irqrestore(&cpufreq_driver_lock, flags); ++ per_cpu(cpufreq_cpu_data, cpu) = NULL; ++ write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + if (!policy) { + pr_debug("%s: No cpu_data found\n", __func__); +@@ -1422,7 +1423,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, + } + } + +- per_cpu(cpufreq_cpu_data, cpu) = NULL; + return 0; + } + +diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c +index 826b8be..82cef00 100644 +--- a/drivers/cpufreq/s3c2416-cpufreq.c ++++ b/drivers/cpufreq/s3c2416-cpufreq.c +@@ -263,7 +263,7 @@ out: + } + + #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE +-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) ++static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) + { + int count, v, i, found; + struct cpufreq_frequency_table *freq; +@@ -335,7 +335,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = { + .notifier_call = s3c2416_cpufreq_reboot_notifier_evt, + }; + +-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) ++static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) + { + struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; + struct cpufreq_frequency_table *freq; +diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c +index 2506974..0eb5b40 100644 +--- a/drivers/cpufreq/s3c24xx-cpufreq.c ++++ b/drivers/cpufreq/s3c24xx-cpufreq.c +@@ -454,7 +454,7 @@ static struct cpufreq_driver s3c24xx_driver = { + }; + + +-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info) ++int s3c_cpufreq_register(struct s3c_cpufreq_info *info) + { + if (!info || !info->name) { + printk(KERN_ERR "%s: failed to pass valid information\n", +diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c +index 7047821..4ab7a21 100644 +--- a/drivers/cpufreq/speedstep-lib.c ++++ b/drivers/cpufreq/speedstep-lib.c +@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, + + pr_debug("previous speed is %u\n", prev_speed); + ++ preempt_disable(); + local_irq_save(flags); + + /* switch to low state */ +@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, + + out: + local_irq_restore(flags); ++ preempt_enable(); ++ + return ret; + } + EXPORT_SYMBOL_GPL(speedstep_get_freqs); +diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c +index 998c17b..b52d8af 100644 +--- a/drivers/cpufreq/speedstep-smi.c ++++ b/drivers/cpufreq/speedstep-smi.c +@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state) + return; + + /* Disable IRQs */ ++ preempt_disable(); + local_irq_save(flags); + + command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); +@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state) + + do { + if (retry) { ++ /* ++ * We need to enable interrupts, otherwise the blockage ++ * won't resolve. ++ * ++ * We disable preemption so that other processes don't ++ * run. If other processes were running, they could ++ * submit more DMA requests, making the blockage worse. ++ */ + pr_debug("retry %u, previous result %u, waiting...\n", + retry, result); ++ local_irq_enable(); + mdelay(retry * 50); ++ local_irq_disable(); + } + retry++; + __asm__ __volatile__( +@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state) + + /* enable IRQs */ + local_irq_restore(flags); ++ preempt_enable(); + + if (new_state == state) + pr_debug("change to %u MHz succeeded after %u tries " +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index 98e14ee..278603c 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -2006,14 +2006,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err, + + static inline void decode_bus_error(int node_id, struct mce *m) + { +- struct mem_ctl_info *mci = mcis[node_id]; +- struct amd64_pvt *pvt = mci->pvt_info; ++ struct mem_ctl_info *mci; ++ struct amd64_pvt *pvt; + u8 ecc_type = (m->status >> 45) & 0x3; + u8 xec = XEC(m->status, 0x1f); + u16 ec = EC(m->status); + u64 sys_addr; + struct err_info err; + ++ mci = edac_mc_find(node_id); ++ if (!mci) ++ return; ++ ++ pvt = mci->pvt_info; ++ + /* Bail out early if this was an 'observed' error */ + if (PP(ec) == NBSL_PP_OBS) + return; +diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c +index 59ee486..6005d26 100644 +--- a/drivers/gpio/gpio-tps65912.c ++++ b/drivers/gpio/gpio-tps65912.c +@@ -26,9 +26,12 @@ struct tps65912_gpio_data { + struct gpio_chip gpio_chip; + }; + ++#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip) ++ + static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) + { +- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); ++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); ++ struct tps65912 *tps65912 = tps65912_gpio->tps65912; + int val; + + val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); +@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) + static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, + int value) + { +- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); ++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); ++ struct tps65912 *tps65912 = tps65912_gpio->tps65912; + + if (value) + tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, +@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, + static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, + int value) + { +- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); ++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); ++ struct tps65912 *tps65912 = tps65912_gpio->tps65912; + + /* Set the initial value */ + tps65912_gpio_set(gc, offset, value); +@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, + + static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) + { +- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); ++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); ++ struct tps65912 *tps65912 = tps65912_gpio->tps65912; + + return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, + GPIO_CFG_MASK); +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c +index 74ed17d..d26028c 100644 +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -45,12 +45,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) + + ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); + if (ret < 0) { +- /* We've found the gpio chip, but the translation failed. +- * Return true to stop looking and return the translation +- * error via out_gpio ++ /* We've found a gpio chip, but the translation failed. ++ * Store translation error in out_gpio. ++ * Return false to keep looking, as more than one gpio chip ++ * could be registered per of-node. + */ + gg_data->out_gpio = ERR_PTR(ret); +- return true; ++ return false; + } + + gg_data->out_gpio = gpio_to_desc(ret + gc->base); +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index 6e5d8fe..17be889 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -356,7 +356,10 @@ static int i2c_hid_hwreset(struct i2c_client *client) + static void i2c_hid_get_input(struct i2c_hid *ihid) + { + int ret, ret_size; +- int size = ihid->bufsize; ++ int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); ++ ++ if (size > ihid->bufsize) ++ size = ihid->bufsize; + + ret = i2c_master_recv(ihid->client, ihid->inbuf, size); + if (ret != size) { +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 55de4f6..b96ee9d 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -561,7 +561,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect + if (test_bit(WriteMostly, &rdev->flags)) { + /* Don't balance among write-mostly, just + * use the first as a last resort */ +- if (best_disk < 0) { ++ if (best_dist_disk < 0) { + if (is_badblock(rdev, this_sector, sectors, + &first_bad, &bad_sectors)) { + if (first_bad < this_sector) +@@ -570,7 +570,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect + best_good_sectors = first_bad - this_sector; + } else + best_good_sectors = sectors; +- best_disk = disk; ++ best_dist_disk = disk; ++ best_pending_disk = disk; + } + continue; + } +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 175584a..3545faf 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -3071,7 +3071,8 @@ static void handle_stripe_dirtying(struct r5conf *conf, + * generate correct data from the parity. + */ + if (conf->max_degraded == 2 || +- (recovery_cp < MaxSector && sh->sector >= recovery_cp)) { ++ (recovery_cp < MaxSector && sh->sector >= recovery_cp && ++ s->failed == 0)) { + /* Calculate the real rcw later - for now make it + * look like rcw is cheaper + */ +diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c +index f674dc0..d2a4e6d 100644 +--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c ++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c +@@ -350,6 +350,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) + { + struct dvb_usb_device *d = adap_to_d(adap); + struct lme2510_state *lme_int = adap_to_priv(adap); ++ struct usb_host_endpoint *ep; + + lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC); + +@@ -371,6 +372,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) + adap, + 8); + ++ /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */ ++ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); ++ ++ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) ++ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa), ++ + lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC); +diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c +index dfdfa77..c39f7d3 100644 +--- a/drivers/media/usb/em28xx/em28xx-audio.c ++++ b/drivers/media/usb/em28xx/em28xx-audio.c +@@ -814,7 +814,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev) + if (urb_size > ep_size * npackets) + npackets = DIV_ROUND_UP(urb_size, ep_size); + +- em28xx_info("Number of URBs: %d, with %d packets and %d size", ++ em28xx_info("Number of URBs: %d, with %d packets and %d size\n", + num_urb, npackets, urb_size); + + /* Estimate the bytes per period */ +@@ -974,7 +974,7 @@ static int em28xx_audio_fini(struct em28xx *dev) + return 0; + } + +- em28xx_info("Closing audio extension"); ++ em28xx_info("Closing audio extension\n"); + + if (dev->adev.sndcard) { + snd_card_disconnect(dev->adev.sndcard); +diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c +index 1373cfa..ec2ebe9 100644 +--- a/drivers/media/usb/em28xx/em28xx-dvb.c ++++ b/drivers/media/usb/em28xx/em28xx-dvb.c +@@ -1468,7 +1468,7 @@ static int em28xx_dvb_fini(struct em28xx *dev) + return 0; + } + +- em28xx_info("Closing DVB extension"); ++ em28xx_info("Closing DVB extension\n"); + + if (dev->dvb) { + struct em28xx_dvb *dvb = dev->dvb; +diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c +index 18f65d8..dd59c00 100644 +--- a/drivers/media/usb/em28xx/em28xx-input.c ++++ b/drivers/media/usb/em28xx/em28xx-input.c +@@ -810,7 +810,7 @@ static int em28xx_ir_fini(struct em28xx *dev) + return 0; + } + +- em28xx_info("Closing input extension"); ++ em28xx_info("Closing input extension\n"); + + em28xx_shutdown_buttons(dev); + +diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c +index e24ee08..0e8d085 100644 +--- a/drivers/media/usb/em28xx/em28xx-video.c ++++ b/drivers/media/usb/em28xx/em28xx-video.c +@@ -1900,7 +1900,7 @@ static int em28xx_v4l2_fini(struct em28xx *dev) + return 0; + } + +- em28xx_info("Closing video extension"); ++ em28xx_info("Closing video extension\n"); + + mutex_lock(&dev->lock); + +diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c +index 793dacd..561c6b4 100644 +--- a/drivers/mmc/host/sdhci-pxav3.c ++++ b/drivers/mmc/host/sdhci-pxav3.c +@@ -201,8 +201,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) + if (!pdata) + return NULL; + +- of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); +- if (clk_delay_cycles > 0) ++ if (!of_property_read_u32(np, "mrvl,clk-delay-cycles", ++ &clk_delay_cycles)) + pdata->clk_delay_cycles = clk_delay_cycles; + + return pdata; +diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c +index d06414e..a041746 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c +@@ -410,9 +410,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, + mvmvif->uploaded = false; + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + +- /* does this make sense at all? */ +- mvmvif->color++; +- + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); + spin_unlock_bh(&mvm->time_event_lock); +@@ -597,7 +594,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, + + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) +- goto out_release; ++ goto out_remove_mac; + + iwl_mvm_power_disable(mvm, vif); + +diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c +index 76ee486..4efcb28 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/tx.c ++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c +@@ -835,6 +835,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + sta_id = ba_notif->sta_id; + tid = ba_notif->tid; + ++ if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || ++ tid >= IWL_MAX_TID_COUNT, ++ "sta_id %d tid %d", sta_id, tid)) ++ return 0; ++ + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); +diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c +index 3d54900..52427fb 100644 +--- a/drivers/net/wireless/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c +@@ -729,7 +729,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) + iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, + trans_pcie->kw.dma >> 4); + +- iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr); ++ /* ++ * Send 0 as the scd_base_addr since the device may have be reset ++ * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will ++ * contain garbage. ++ */ ++ iwl_pcie_tx_start(trans, 0); + } + + /* +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index 25f0bc6..7f41551 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -1324,7 +1324,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) + if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) + return -ENOMEM; + +- if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x", ++ if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device, + (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), +diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c +index 5d59572..5510c88 100644 +--- a/drivers/pci/rom.c ++++ b/drivers/pci/rom.c +@@ -69,6 +69,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) + { + void __iomem *image; + int last_image; ++ unsigned length; + + image = rom; + do { +@@ -91,9 +92,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) + if (readb(pds + 3) != 'R') + break; + last_image = readb(pds + 21) & 0x80; +- /* this length is reliable */ +- image += readw(pds + 16) * 512; +- } while (!last_image); ++ length = readw(pds + 16); ++ image += length * 512; ++ } while (length && !last_image); + + /* never return a size larger than the PCI resource window */ + /* there are known ROMs that get the size wrong */ +diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c +index de029bb..5ccca87 100644 +--- a/drivers/power/88pm860x_charger.c ++++ b/drivers/power/88pm860x_charger.c +@@ -711,6 +711,7 @@ static int pm860x_charger_probe(struct platform_device *pdev) + return 0; + + out_irq: ++ power_supply_unregister(&info->usb); + while (--i >= 0) + free_irq(info->irq[i], info); + out: +diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c +index ad3ff8f..e4c95e1 100644 +--- a/drivers/power/bq24190_charger.c ++++ b/drivers/power/bq24190_charger.c +@@ -929,7 +929,7 @@ static void bq24190_charger_init(struct power_supply *charger) + charger->properties = bq24190_charger_properties; + charger->num_properties = ARRAY_SIZE(bq24190_charger_properties); + charger->supplied_to = bq24190_charger_supplied_to; +- charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to); ++ charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to); + charger->get_property = bq24190_charger_get_property; + charger->set_property = bq24190_charger_set_property; + charger->property_is_writeable = bq24190_charger_property_is_writeable; +diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c +index a0024b2..86e03c6 100644 +--- a/drivers/power/gpio-charger.c ++++ b/drivers/power/gpio-charger.c +@@ -168,7 +168,7 @@ static int gpio_charger_suspend(struct device *dev) + + if (device_may_wakeup(dev)) + gpio_charger->wakeup_enabled = +- enable_irq_wake(gpio_charger->irq); ++ !enable_irq_wake(gpio_charger->irq); + + return 0; + } +@@ -178,7 +178,7 @@ static int gpio_charger_resume(struct device *dev) + struct platform_device *pdev = to_platform_device(dev); + struct gpio_charger *gpio_charger = platform_get_drvdata(pdev); + +- if (gpio_charger->wakeup_enabled) ++ if (device_may_wakeup(dev) && gpio_charger->wakeup_enabled) + disable_irq_wake(gpio_charger->irq); + power_supply_changed(&gpio_charger->charger); + +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index f655592..a1f04e3 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -92,6 +92,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance) + { + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; ++ ++ instance->mask_interrupts = 0; + /* For Thunderbolt/Invader also clear intr on enable */ + writel(~0, ®s->outbound_intr_status); + readl(®s->outbound_intr_status); +@@ -100,7 +102,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance) + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +- instance->mask_interrupts = 0; + } + + /** +diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c +index 601e9cc..bb2890e 100644 +--- a/drivers/target/iscsi/iscsi_target_tq.c ++++ b/drivers/target/iscsi/iscsi_target_tq.c +@@ -24,36 +24,22 @@ + #include "iscsi_target_tq.h" + #include "iscsi_target.h" + +-static LIST_HEAD(active_ts_list); + static LIST_HEAD(inactive_ts_list); +-static DEFINE_SPINLOCK(active_ts_lock); + static DEFINE_SPINLOCK(inactive_ts_lock); + static DEFINE_SPINLOCK(ts_bitmap_lock); + +-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts) +-{ +- spin_lock(&active_ts_lock); +- list_add_tail(&ts->ts_list, &active_ts_list); +- iscsit_global->active_ts++; +- spin_unlock(&active_ts_lock); +-} +- + static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) + { ++ if (!list_empty(&ts->ts_list)) { ++ WARN_ON(1); ++ return; ++ } + spin_lock(&inactive_ts_lock); + list_add_tail(&ts->ts_list, &inactive_ts_list); + iscsit_global->inactive_ts++; + spin_unlock(&inactive_ts_lock); + } + +-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts) +-{ +- spin_lock(&active_ts_lock); +- list_del(&ts->ts_list); +- iscsit_global->active_ts--; +- spin_unlock(&active_ts_lock); +-} +- + static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) + { + struct iscsi_thread_set *ts; +@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) + + ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); + +- list_del(&ts->ts_list); ++ list_del_init(&ts->ts_list); + iscsit_global->inactive_ts--; + spin_unlock(&inactive_ts_lock); + +@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void) + + void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) + { +- iscsi_add_ts_to_active_list(ts); +- + spin_lock_bh(&ts->ts_state_lock); + conn->thread_set = ts; + ts->conn = conn; +@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) + + if (ts->delay_inactive && (--ts->thread_count == 0)) { + spin_unlock_bh(&ts->ts_state_lock); +- iscsi_del_ts_from_active_list(ts); + + if (!iscsit_global->in_shutdown) + iscsi_deallocate_extra_thread_sets(); +@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) + + if (ts->delay_inactive && (--ts->thread_count == 0)) { + spin_unlock_bh(&ts->ts_state_lock); +- iscsi_del_ts_from_active_list(ts); + + if (!iscsit_global->in_shutdown) + iscsi_deallocate_extra_thread_sets(); +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 25c9bc7..e49616e 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -209,6 +209,9 @@ static int pty_signal(struct tty_struct *tty, int sig) + unsigned long flags; + struct pid *pgrp; + ++ if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP) ++ return -EINVAL; ++ + if (tty->link) { + spin_lock_irqsave(&tty->link->ctrl_lock, flags); + pgrp = get_pid(tty->link->pgrp); +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index ce352b8..0d3e6cb 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -2392,7 +2392,7 @@ static int atmel_serial_probe(struct platform_device *pdev) + + ret = atmel_init_port(port, pdev); + if (ret) +- goto err; ++ goto err_clear_bit; + + if (!atmel_use_pdc_rx(&port->uart)) { + ret = -ENOMEM; +@@ -2441,6 +2441,8 @@ err_alloc_ring: + clk_put(port->clk); + port->clk = NULL; + } ++err_clear_bit: ++ clear_bit(port->uart.line, atmel_ports_in_use); + err: + return ret; + } +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 23b5d32..693091a 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -498,6 +498,7 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed) + #endif + if (DO_UPDATE(vc)) + do_update_region(vc, (unsigned long) p, count); ++ notify_update(vc); + } + + /* used by selection: complement pointer position */ +@@ -514,6 +515,7 @@ void complement_pos(struct vc_data *vc, int offset) + scr_writew(old, screenpos(vc, old_offset, 1)); + if (DO_UPDATE(vc)) + vc->vc_sw->con_putc(vc, old, oldy, oldx); ++ notify_update(vc); + } + + old_offset = offset; +@@ -531,8 +533,8 @@ void complement_pos(struct vc_data *vc, int offset) + oldy = (offset >> 1) / vc->vc_cols; + vc->vc_sw->con_putc(vc, new, oldy, oldx); + } ++ notify_update(vc); + } +- + } + + static void insert_char(struct vc_data *vc, unsigned int nr) +diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c +index 684ef70..506b969 100644 +--- a/drivers/usb/core/buffer.c ++++ b/drivers/usb/core/buffer.c +@@ -22,17 +22,25 @@ + */ + + /* FIXME tune these based on pool statistics ... */ +-static const size_t pool_max[HCD_BUFFER_POOLS] = { +- /* platforms without dma-friendly caches might need to +- * prevent cacheline sharing... +- */ +- 32, +- 128, +- 512, +- PAGE_SIZE / 2 +- /* bigger --> allocate pages */ ++static size_t pool_max[HCD_BUFFER_POOLS] = { ++ 32, 128, 512, 2048, + }; + ++void __init usb_init_pool_max(void) ++{ ++ /* ++ * The pool_max values must never be smaller than ++ * ARCH_KMALLOC_MINALIGN. ++ */ ++ if (ARCH_KMALLOC_MINALIGN <= 32) ++ ; /* Original value is okay */ ++ else if (ARCH_KMALLOC_MINALIGN <= 64) ++ pool_max[0] = 64; ++ else if (ARCH_KMALLOC_MINALIGN <= 128) ++ pool_max[0] = 0; /* Don't use this pool */ ++ else ++ BUILD_BUG(); /* We don't allow this */ ++} + + /* SETUP primitives */ + +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index ef6ec13b..ee6c556 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -1617,6 +1617,7 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status) + int usb_hcd_unlink_urb (struct urb *urb, int status) + { + struct usb_hcd *hcd; ++ struct usb_device *udev = urb->dev; + int retval = -EIDRM; + unsigned long flags; + +@@ -1628,20 +1629,19 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) + spin_lock_irqsave(&hcd_urb_unlink_lock, flags); + if (atomic_read(&urb->use_count) > 0) { + retval = 0; +- usb_get_dev(urb->dev); ++ usb_get_dev(udev); + } + spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags); + if (retval == 0) { + hcd = bus_to_hcd(urb->dev->bus); + retval = unlink1(hcd, urb, status); +- usb_put_dev(urb->dev); ++ if (retval == 0) ++ retval = -EINPROGRESS; ++ else if (retval != -EIDRM && retval != -EBUSY) ++ dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", ++ urb, retval); ++ usb_put_dev(udev); + } +- +- if (retval == 0) +- retval = -EINPROGRESS; +- else if (retval != -EIDRM && retval != -EBUSY) +- dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n", +- urb, retval); + return retval; + } + +diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c +index 4d11449..a922730 100644 +--- a/drivers/usb/core/usb.c ++++ b/drivers/usb/core/usb.c +@@ -1050,6 +1050,7 @@ static int __init usb_init(void) + pr_info("%s: USB support disabled\n", usbcore_name); + return 0; + } ++ usb_init_pool_max(); + + retval = usb_debugfs_init(); + if (retval) +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 9e8708c..a2d0409 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -56,6 +56,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ ++ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ + { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ +diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c +index 602913d..edfd797 100644 +--- a/drivers/xen/manage.c ++++ b/drivers/xen/manage.c +@@ -113,10 +113,16 @@ static void do_suspend(void) + + err = freeze_processes(); + if (err) { +- pr_err("%s: freeze failed %d\n", __func__, err); ++ pr_err("%s: freeze processes failed %d\n", __func__, err); + goto out; + } + ++ err = freeze_kernel_threads(); ++ if (err) { ++ pr_err("%s: freeze kernel threads failed %d\n", __func__, err); ++ goto out_thaw; ++ } ++ + err = dpm_suspend_start(PMSG_FREEZE); + if (err) { + pr_err("%s: dpm_suspend_start %d\n", __func__, err); +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 67be295..f4d7b2f 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -549,11 +549,12 @@ out: + + static unsigned long randomize_stack_top(unsigned long stack_top) + { +- unsigned int random_variable = 0; ++ unsigned long random_variable = 0; + + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) { +- random_variable = get_random_int() & STACK_RND_MASK; ++ random_variable = (unsigned long) get_random_int(); ++ random_variable &= STACK_RND_MASK; + random_variable <<= PAGE_SHIFT; + } + #ifdef CONFIG_STACK_GROWSUP +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index cbd3a7d..93de3ba 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -2655,32 +2655,23 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key, + return 0; + } + +-int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path, ++int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, + u64 iobjectid, u64 ioff, u8 key_type, + struct btrfs_key *found_key) + { + int ret; + struct btrfs_key key; + struct extent_buffer *eb; +- struct btrfs_path *path; ++ ++ ASSERT(path); + + key.type = key_type; + key.objectid = iobjectid; + key.offset = ioff; + +- if (found_path == NULL) { +- path = btrfs_alloc_path(); +- if (!path) +- return -ENOMEM; +- } else +- path = found_path; +- + ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); +- if ((ret < 0) || (found_key == NULL)) { +- if (path != found_path) +- btrfs_free_path(path); ++ if ((ret < 0) || (found_key == NULL)) + return ret; +- } + + eb = path->nodes[0]; + if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 0db8ded..f48d5fc 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -1560,6 +1560,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, + bool check_ref) + { + struct btrfs_root *root; ++ struct btrfs_path *path; + int ret; + + if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) +@@ -1599,8 +1600,14 @@ again: + if (ret) + goto fail; + +- ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID, ++ path = btrfs_alloc_path(); ++ if (!path) { ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ret = btrfs_find_item(fs_info->tree_root, path, BTRFS_ORPHAN_OBJECTID, + location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); ++ btrfs_free_path(path); + if (ret < 0) + goto fail; + if (ret == 0) +@@ -2411,7 +2418,7 @@ int open_ctree(struct super_block *sb, + features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; + + if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) +- printk(KERN_ERR "BTRFS: has skinny extents\n"); ++ printk(KERN_INFO "BTRFS: has skinny extents\n"); + + /* + * flag our filesystem as having big metadata blocks if +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 39d83da..aeb57b98 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -1238,10 +1238,19 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 offset) + { + int ret; +- ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID, ++ struct btrfs_path *path; ++ ++ path = btrfs_alloc_path(); ++ if (!path) ++ return -ENOMEM; ++ ++ ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID, + offset, BTRFS_ORPHAN_ITEM_KEY, NULL); + if (ret > 0) + ret = btrfs_insert_orphan_item(trans, root, offset); ++ ++ btrfs_free_path(path); ++ + return ret; + } + +diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c +index 7654e87..9ad5ba4 100644 +--- a/fs/jffs2/scan.c ++++ b/fs/jffs2/scan.c +@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo + sumlen = c->sector_size - je32_to_cpu(sm->offset); + sumptr = buf + buf_size - sumlen; + ++ /* sm->offset maybe wrong but MAGIC maybe right */ ++ if (sumlen > c->sector_size) ++ goto full_scan; ++ + /* Now, make sure the summary itself is available */ + if (sumlen > buf_size) { + /* Need to kmalloc for this. */ +@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo + } + } + ++full_scan: + buf_ofs = jeb->offset; + + if (!buf_size) { +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c +index 073b4cf..0a2016b 100644 +--- a/fs/nfs/callback.c ++++ b/fs/nfs/callback.c +@@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp) + if (try_to_freeze()) + continue; + +- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); ++ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE); + spin_lock_bh(&serv->sv_cb_lock); + if (!list_empty(&serv->sv_cb_list)) { + req = list_first_entry(&serv->sv_cb_list, + struct rpc_rqst, rq_bc_list); + list_del(&req->rq_bc_list); + spin_unlock_bh(&serv->sv_cb_lock); ++ finish_wait(&serv->sv_cb_waitq, &wq); + dprintk("Invoking bc_svc_process()\n"); + error = bc_svc_process(serv, req, rqstp); + dprintk("bc_svc_process() returned w/ error code= %d\n", + error); + } else { + spin_unlock_bh(&serv->sv_cb_lock); +- schedule(); ++ /* schedule_timeout to game the hung task watchdog */ ++ schedule_timeout(60 * HZ); ++ finish_wait(&serv->sv_cb_waitq, &wq); + } +- finish_wait(&serv->sv_cb_waitq, &wq); + } + return 0; + } +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c +index f4ccfe6..02f8d09 100644 +--- a/fs/nfs/callback_xdr.c ++++ b/fs/nfs/callback_xdr.c +@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, + + for (i = 0; i < args->csa_nrclists; i++) { + status = decode_rc_list(xdr, &args->csa_rclists[i]); +- if (status) ++ if (status) { ++ args->csa_nrclists = i; + goto out_free; ++ } + } + } + status = 0; +diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c +index 3314911..645f180 100644 +--- a/fs/xfs/xfs_buf_item.c ++++ b/fs/xfs/xfs_buf_item.c +@@ -319,6 +319,10 @@ xfs_buf_item_format( + ASSERT(atomic_read(&bip->bli_refcount) > 0); + ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || + (bip->bli_flags & XFS_BLI_STALE)); ++ ASSERT((bip->bli_flags & XFS_BLI_STALE) || ++ (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF ++ && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF)); ++ + + /* + * If it is an inode buffer, transfer the in-memory state to the +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c +index 3a137e9..5d90b8d 100644 +--- a/fs/xfs/xfs_inode.c ++++ b/fs/xfs/xfs_inode.c +@@ -1946,6 +1946,7 @@ xfs_iunlink( + agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket_index); ++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + return 0; +@@ -2037,6 +2038,7 @@ xfs_iunlink_remove( + agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket_index); ++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + } else { +diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c +index 6d7d1de..1b271f5 100644 +--- a/fs/xfs/xfs_qm.c ++++ b/fs/xfs/xfs_qm.c +@@ -1108,6 +1108,11 @@ xfs_qm_reset_dqcounts( + */ + xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, + "xfs_quotacheck"); ++ /* ++ * Reset type in case we are reusing group quota file for ++ * project quotas or vice versa ++ */ ++ ddq->d_flags = type; + ddq->d_bcount = 0; + ddq->d_icount = 0; + ddq->d_rtbcount = 0; +diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c +index c812c5c..b626f3d 100644 +--- a/fs/xfs/xfs_trans.c ++++ b/fs/xfs/xfs_trans.c +@@ -474,6 +474,7 @@ xfs_trans_apply_sb_deltas( + whole = 1; + } + ++ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); + if (whole) + /* + * Log the whole thing, the fields are noncontiguous. +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h +index 1c804b0..7ee1774 100644 +--- a/include/linux/fsnotify.h ++++ b/include/linux/fsnotify.h +@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, + new_dir_mask |= FS_ISDIR; + } + +- fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); +- fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); ++ fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, ++ fs_cookie); ++ fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name, ++ fs_cookie); + + if (target) + fsnotify_link_count(target); +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h +index efe8d8a..e34bce37 100644 +--- a/include/linux/usb/hcd.h ++++ b/include/linux/usb/hcd.h +@@ -447,6 +447,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops; + #endif /* CONFIG_PCI */ + + /* pci-ish (pdev null is ok) buffer alloc/mapping support */ ++void usb_init_pool_max(void); + int hcd_buffer_create(struct usb_hcd *hcd); + void hcd_buffer_destroy(struct usb_hcd *hcd); + +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c +index 0b097c8..449518e 100644 +--- a/kernel/debug/kdb/kdb_main.c ++++ b/kernel/debug/kdb/kdb_main.c +@@ -2535,7 +2535,7 @@ static int kdb_summary(int argc, const char **argv) + #define K(x) ((x) << (PAGE_SHIFT - 10)) + kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n" + "Buffers: %8lu kB\n", +- val.totalram, val.freeram, val.bufferram); ++ K(val.totalram), K(val.freeram), K(val.bufferram)); + return 0; + } + +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c +index 28db9be..6211d5d 100644 +--- a/kernel/time/ntp.c ++++ b/kernel/time/ntp.c +@@ -631,10 +631,14 @@ int ntp_validate_timex(struct timex *txc) + if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) + return -EPERM; + +- if (txc->modes & ADJ_FREQUENCY) { +- if (LONG_MIN / PPM_SCALE > txc->freq) ++ /* ++ * Check for potential multiplication overflows that can ++ * only happen on 64-bit systems: ++ */ ++ if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) { ++ if (LLONG_MIN / PPM_SCALE > txc->freq) + return -EINVAL; +- if (LONG_MAX / PPM_SCALE < txc->freq) ++ if (LLONG_MAX / PPM_SCALE < txc->freq) + return -EINVAL; + } + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 7113672..813b021 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4694,7 +4694,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, + *fpos += written; + + out_unlock: +- for (i = 0; i < nr_pages; i++){ ++ for (i = nr_pages - 1; i >= 0; i--) { + kunmap_atomic(map_page[i]); + put_page(pages[i]); + } +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 67d0c17..472259b 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -3456,6 +3456,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, + { + struct page *page; + ++ if (!pmd_present(*pmd)) ++ return NULL; + page = pte_page(*(pte_t *)pmd); + if (page) + page += ((address & ~PMD_MASK) >> PAGE_SHIFT); +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c +index 0676f2b..45f077c 100644 +--- a/net/ceph/osd_client.c ++++ b/net/ceph/osd_client.c +@@ -977,12 +977,24 @@ static void put_osd(struct ceph_osd *osd) + */ + static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) + { +- dout("__remove_osd %p\n", osd); +- BUG_ON(!list_empty(&osd->o_requests)); +- rb_erase(&osd->o_node, &osdc->osds); ++ dout("%s %p osd%d\n", __func__, osd, osd->o_osd); ++ WARN_ON(!list_empty(&osd->o_requests)); ++ WARN_ON(!list_empty(&osd->o_linger_requests)); ++ + list_del_init(&osd->o_osd_lru); +- ceph_con_close(&osd->o_con); +- put_osd(osd); ++ rb_erase(&osd->o_node, &osdc->osds); ++ RB_CLEAR_NODE(&osd->o_node); ++} ++ ++static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) ++{ ++ dout("%s %p osd%d\n", __func__, osd, osd->o_osd); ++ ++ if (!RB_EMPTY_NODE(&osd->o_node)) { ++ ceph_con_close(&osd->o_con); ++ __remove_osd(osdc, osd); ++ put_osd(osd); ++ } + } + + static void remove_all_osds(struct ceph_osd_client *osdc) +@@ -992,7 +1004,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc) + while (!RB_EMPTY_ROOT(&osdc->osds)) { + struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), + struct ceph_osd, o_node); +- __remove_osd(osdc, osd); ++ remove_osd(osdc, osd); + } + mutex_unlock(&osdc->request_mutex); + } +@@ -1022,7 +1034,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc) + list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { + if (time_before(jiffies, osd->lru_ttl)) + break; +- __remove_osd(osdc, osd); ++ remove_osd(osdc, osd); + } + mutex_unlock(&osdc->request_mutex); + } +@@ -1037,8 +1049,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) + dout("__reset_osd %p osd%d\n", osd, osd->o_osd); + if (list_empty(&osd->o_requests) && + list_empty(&osd->o_linger_requests)) { +- __remove_osd(osdc, osd); +- ++ remove_osd(osdc, osd); + return -ENODEV; + } + +@@ -1840,6 +1851,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc) + { + struct rb_node *p, *n; + ++ dout("%s %p\n", __func__, osdc); + for (p = rb_first(&osdc->osds); p; p = n) { + struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); + +diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c +index 56cc891..d99c8d3 100644 +--- a/sound/pci/riptide/riptide.c ++++ b/sound/pci/riptide/riptide.c +@@ -2032,32 +2032,43 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id) + { + static int dev; + struct gameport *gameport; ++ int ret; + + if (dev >= SNDRV_CARDS) + return -ENODEV; ++ + if (!enable[dev]) { +- dev++; +- return -ENOENT; ++ ret = -ENOENT; ++ goto inc_dev; + } + +- if (!joystick_port[dev++]) +- return 0; ++ if (!joystick_port[dev]) { ++ ret = 0; ++ goto inc_dev; ++ } + + gameport = gameport_allocate_port(); +- if (!gameport) +- return -ENOMEM; ++ if (!gameport) { ++ ret = -ENOMEM; ++ goto inc_dev; ++ } + if (!request_region(joystick_port[dev], 8, "Riptide gameport")) { + snd_printk(KERN_WARNING + "Riptide: cannot grab gameport 0x%x\n", + joystick_port[dev]); + gameport_free_port(gameport); +- return -EBUSY; ++ ret = -EBUSY; ++ goto inc_dev; + } + + gameport->io = joystick_port[dev]; + gameport_register_port(gameport); + pci_set_drvdata(pci, gameport); +- return 0; ++ ++ ret = 0; ++inc_dev: ++ dev++; ++ return ret; + } + + static void snd_riptide_joystick_remove(struct pci_dev *pci) +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index e98dc00..2116750 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -6102,6 +6102,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream) + snd_pcm_hw_constraint_minmax(runtime, + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, + 64, 8192); ++ snd_pcm_hw_constraint_minmax(runtime, ++ SNDRV_PCM_HW_PARAM_PERIODS, ++ 2, 2); + break; + } + +@@ -6176,6 +6179,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream) + snd_pcm_hw_constraint_minmax(runtime, + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, + 64, 8192); ++ snd_pcm_hw_constraint_minmax(runtime, ++ SNDRV_PCM_HW_PARAM_PERIODS, ++ 2, 2); + break; + } + diff --git a/projects/imx6/patches/linux/linux-000-patch-3.14.35-36.patch b/projects/imx6/patches/linux/linux-000-patch-3.14.35-36.patch new file mode 100644 index 0000000000..8e23c25903 --- /dev/null +++ b/projects/imx6/patches/linux/linux-000-patch-3.14.35-36.patch @@ -0,0 +1,3448 @@ +diff --git a/Makefile b/Makefile +index 9720e86..4e6537b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 35 ++SUBLEVEL = 36 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h +index 15334ab..fb95aa8 100644 +--- a/arch/arc/include/asm/processor.h ++++ b/arch/arc/include/asm/processor.h +@@ -69,18 +69,19 @@ unsigned long thread_saved_pc(struct task_struct *t); + #define release_segments(mm) do { } while (0) + + #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) ++#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) + + /* + * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. + * Look in process.c for details of kernel stack layout + */ +-#define KSTK_ESP(tsk) (tsk->thread.ksp) ++#define TSK_K_ESP(tsk) (tsk->thread.ksp) + +-#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ ++#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \ + sizeof(struct callee_regs) + off))) + +-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) +-#define KSTK_FP(tsk) KSTK_REG(tsk, 0) ++#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) ++#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) + + /* + * Do necessary setup to start up a newly executed thread. +diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c +index 9ce47cf..fb98769 100644 +--- a/arch/arc/kernel/stacktrace.c ++++ b/arch/arc/kernel/stacktrace.c +@@ -64,9 +64,9 @@ static void seed_unwind_frame_info(struct task_struct *tsk, + + frame_info->task = tsk; + +- frame_info->regs.r27 = KSTK_FP(tsk); +- frame_info->regs.r28 = KSTK_ESP(tsk); +- frame_info->regs.r31 = KSTK_BLINK(tsk); ++ frame_info->regs.r27 = TSK_K_FP(tsk); ++ frame_info->regs.r28 = TSK_K_ESP(tsk); ++ frame_info->regs.r31 = TSK_K_BLINK(tsk); + frame_info->regs.r63 = (unsigned int)__switch_to; + + /* In the prologue of __switch_to, first FP is saved on stack +diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h +index bc9e0f4..e51621e 100644 +--- a/arch/mips/kvm/trace.h ++++ b/arch/mips/kvm/trace.h +@@ -26,18 +26,18 @@ TRACE_EVENT(kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason), + TP_STRUCT__entry( +- __field(struct kvm_vcpu *, vcpu) ++ __field(unsigned long, pc) + __field(unsigned int, reason) + ), + + TP_fast_assign( +- __entry->vcpu = vcpu; ++ __entry->pc = vcpu->arch.pc; + __entry->reason = reason; + ), + + TP_printk("[%s]PC: 0x%08lx", + kvm_mips_exit_types_str[__entry->reason], +- __entry->vcpu->arch.pc) ++ __entry->pc) + ); + + #endif /* _TRACE_KVM_H */ +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index 02553d6..06469ee 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -542,11 +542,14 @@ ENTRY(ret_from_fork) + testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? + jz 1f + +- testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET +- jnz int_ret_from_sys_call +- +- RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET +- jmp ret_from_sys_call # go to the SYSRET fastpath ++ /* ++ * By the time we get here, we have no idea whether our pt_regs, ++ * ti flags, and ti status came from the 64-bit SYSCALL fast path, ++ * the slow path, or one of the ia32entry paths. ++ * Use int_ret_from_sys_call to return, since it can safely handle ++ * all of the above. ++ */ ++ jmp int_ret_from_sys_call + + 1: + subq $REST_SKIP, %rsp # leave space for volatiles +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 38d3751..09651d4 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -4646,7 +4646,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) + if (rc != X86EMUL_CONTINUE) + goto done; + } +- ctxt->dst.orig_val = ctxt->dst.val; ++ /* Copy full 64-bit value for CMPXCHG8B. */ ++ ctxt->dst.orig_val64 = ctxt->dst.val64; + + special_insn: + +diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c +index bb0b904..997540d 100644 +--- a/drivers/acpi/video.c ++++ b/drivers/acpi/video.c +@@ -2064,6 +2064,17 @@ EXPORT_SYMBOL(acpi_video_unregister); + + static int __init acpi_video_init(void) + { ++ /* ++ * Let the module load even if ACPI is disabled (e.g. due to ++ * a broken BIOS) so that i915.ko can still be loaded on such ++ * old systems without an AcpiOpRegion. ++ * ++ * acpi_video_register() will report -ENODEV later as well due ++ * to acpi_disabled when i915.ko tries to register itself afterwards. ++ */ ++ if (acpi_disabled) ++ return 0; ++ + dmi_check_system(video_dmi_table); + + if (intel_opregion_present()) +diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c +index 4a58c55..797bab9 100644 +--- a/drivers/clk/clk-gate.c ++++ b/drivers/clk/clk-gate.c +@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name, + struct clk_init_data init; + + if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { +- if (bit_idx > 16) { ++ if (bit_idx > 15) { + pr_err("gate bit exceeds LOWORD field\n"); + return ERR_PTR(-EINVAL); + } +diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c +index 9e23264..ea4db84 100644 +--- a/drivers/clk/sunxi/clk-factors.c ++++ b/drivers/clk/sunxi/clk-factors.c +@@ -62,7 +62,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw, + p = FACTOR_GET(config->pshift, config->pwidth, reg); + + /* Calculate the rate */ +- rate = (parent_rate * n * (k + 1) >> p) / (m + 1); ++ rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1); + + return rate; + } +diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h +index 02e1a43..d2d0efa 100644 +--- a/drivers/clk/sunxi/clk-factors.h ++++ b/drivers/clk/sunxi/clk-factors.h +@@ -15,6 +15,7 @@ struct clk_factors_config { + u8 mwidth; + u8 pshift; + u8 pwidth; ++ u8 n_start; + }; + + struct clk_factors { +diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c +index abb6c5a..06a14b8 100644 +--- a/drivers/clk/sunxi/clk-sunxi.c ++++ b/drivers/clk/sunxi/clk-sunxi.c +@@ -407,6 +407,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = { + .kwidth = 2, + .mshift = 0, + .mwidth = 2, ++ .n_start = 1, + }; + + static struct clk_factors_config sun4i_pll5_config = { +diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c +index 09dd017..5f52f3f 100644 +--- a/drivers/clk/zynq/clkc.c ++++ b/drivers/clk/zynq/clkc.c +@@ -300,6 +300,7 @@ static void __init zynq_clk_setup(struct device_node *np) + clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x], + "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, + 26, 0, &armclk_lock); ++ clk_prepare_enable(clks[cpu_2x]); + + clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1, + 4 + 2 * tmp); +diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c +index 97cdd16..c98b101 100644 +--- a/drivers/firmware/efi/runtime-map.c ++++ b/drivers/firmware/efi/runtime-map.c +@@ -170,7 +170,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj) + + return 0; + out_add_entry: +- for (j = i - 1; j > 0; j--) { ++ for (j = i - 1; j >= 0; j--) { + entry = *(map_entries + j); + kobject_put(&entry->kobj); + } +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index 8ef67cb..f0ed0ba 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -3558,7 +3558,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, + struct radeon_ring *ring = &rdev->ring[fence->ring]; + u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + +- /* EVENT_WRITE_EOP - flush caches, send int */ ++ /* Workaround for cache flush problems. First send a dummy EOP ++ * event down the pipe with seq one below. ++ */ ++ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); ++ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | ++ EOP_TC_ACTION_EN | ++ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | ++ EVENT_INDEX(5))); ++ radeon_ring_write(ring, addr & 0xfffffffc); ++ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | ++ DATA_SEL(1) | INT_SEL(0)); ++ radeon_ring_write(ring, fence->seq - 1); ++ radeon_ring_write(ring, 0); ++ ++ /* Then send the real EOP event down the pipe. */ + radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | +@@ -6809,7 +6823,6 @@ int cik_irq_set(struct radeon_device *rdev) + u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; + u32 grbm_int_cntl = 0; + u32 dma_cntl, dma_cntl1; +- u32 thermal_int; + + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); +@@ -6846,13 +6859,6 @@ int cik_irq_set(struct radeon_device *rdev) + cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; + cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; + +- if (rdev->flags & RADEON_IS_IGP) +- thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & +- ~(THERM_INTH_MASK | THERM_INTL_MASK); +- else +- thermal_int = RREG32_SMC(CG_THERMAL_INT) & +- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); +- + /* enable CP interrupts on all rings */ + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { + DRM_DEBUG("cik_irq_set: sw int gfx\n"); +@@ -7010,14 +7016,6 @@ int cik_irq_set(struct radeon_device *rdev) + hpd6 |= DC_HPDx_INT_EN; + } + +- if (rdev->irq.dpm_thermal) { +- DRM_DEBUG("dpm thermal\n"); +- if (rdev->flags & RADEON_IS_IGP) +- thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; +- else +- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; +- } +- + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + + WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); +@@ -7071,11 +7069,6 @@ int cik_irq_set(struct radeon_device *rdev) + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + +- if (rdev->flags & RADEON_IS_IGP) +- WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); +- else +- WREG32_SMC(CG_THERMAL_INT, thermal_int); +- + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c +index 351db36..c7c7bc5 100644 +--- a/drivers/gpu/drm/radeon/kv_dpm.c ++++ b/drivers/gpu/drm/radeon/kv_dpm.c +@@ -1121,6 +1121,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) + } + } + ++static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) ++{ ++ u32 thermal_int; ++ ++ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); ++ if (enable) ++ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; ++ else ++ thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); ++ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); ++ ++} ++ + int kv_dpm_enable(struct radeon_device *rdev) + { + struct kv_power_info *pi = kv_get_pi(rdev); +@@ -1232,8 +1245,7 @@ int kv_dpm_late_enable(struct radeon_device *rdev) + DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + return ret; + } +- rdev->irq.dpm_thermal = true; +- radeon_irq_set(rdev); ++ kv_enable_thermal_int(rdev, true); + } + + /* powerdown unused blocks for now */ +@@ -1261,6 +1273,7 @@ void kv_dpm_disable(struct radeon_device *rdev) + kv_stop_dpm(rdev); + kv_enable_ulv(rdev, false); + kv_reset_am(rdev); ++ kv_enable_thermal_int(rdev, false); + + kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); + } +diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c +index bf6300c..f8c01b8 100644 +--- a/drivers/gpu/drm/radeon/ni.c ++++ b/drivers/gpu/drm/radeon/ni.c +@@ -1073,12 +1073,12 @@ static void cayman_gpu_init(struct radeon_device *rdev) + + if ((rdev->config.cayman.max_backends_per_se == 1) && + (rdev->flags & RADEON_IS_IGP)) { +- if ((disabled_rb_mask & 3) == 1) { +- /* RB0 disabled, RB1 enabled */ +- tmp = 0x11111111; +- } else { ++ if ((disabled_rb_mask & 3) == 2) { + /* RB1 disabled, RB0 enabled */ + tmp = 0x00000000; ++ } else { ++ /* RB0 disabled, RB1 enabled */ ++ tmp = 0x11111111; + } + } else { + tmp = gb_addr_config & NUM_PIPES_MASK; +diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c +index 3334f91..e981082 100644 +--- a/drivers/gpu/drm/radeon/r600_dpm.c ++++ b/drivers/gpu/drm/radeon/r600_dpm.c +@@ -187,7 +187,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { +- vrefresh = radeon_crtc->hw_mode.vrefresh; ++ vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); + break; + } + } +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index e2de749..2fa3cf6 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -3272,6 +3272,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev, + + args.in.ucVoltageType = VOLTAGE_TYPE_VDDC; + args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; ++ args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id); + args.in.ulSCLKFreq = + cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); + +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 4b87bb1..a413f76 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -1066,6 +1066,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct + return; + } + ++ /* ++ * Ignore reports for absolute data if the data didn't change. This is ++ * not only an optimization but also fixes 'dead' key reports. Some ++ * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID ++ * 0x31 and 0x32) report multiple keys, even though a localized keyboard ++ * can only have one of them physically available. The 'dead' keys ++ * report constant 0. As all map to the same keycode, they'd confuse ++ * the input layer. If we filter the 'dead' keys on the HID level, we ++ * skip the keycode translation and only forward real events. ++ */ ++ if (!(field->flags & (HID_MAIN_ITEM_RELATIVE | ++ HID_MAIN_ITEM_BUFFERED_BYTE)) && ++ (field->flags & HID_MAIN_ITEM_VARIABLE) && ++ usage->usage_index < field->maxusage && ++ value == field->value[usage->usage_index]) ++ return; ++ + /* report the usage code as scancode if the key status has changed */ + if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value) + input_event(input, EV_MSC, MSC_SCAN, usage->hid); +diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c +index 47dcb34..3a615f3 100644 +--- a/drivers/iio/adc/mcp3422.c ++++ b/drivers/iio/adc/mcp3422.c +@@ -57,20 +57,11 @@ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + } + +-/* LSB is in nV to eliminate floating point */ +-static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625}; +- +-/* +- * scales calculated as: +- * rates_to_lsb[sample_rate] / (1 << pga); +- * pga is 1 for 0, 2 +- */ +- + static const int mcp3422_scales[4][4] = { +- { 1000000, 250000, 62500, 15625 }, +- { 500000 , 125000, 31250, 7812 }, +- { 250000 , 62500 , 15625, 3906 }, +- { 125000 , 31250 , 7812 , 1953 } }; ++ { 1000000, 500000, 250000, 125000 }, ++ { 250000 , 125000, 62500 , 31250 }, ++ { 62500 , 31250 , 15625 , 7812 }, ++ { 15625 , 7812 , 3906 , 1953 } }; + + /* Constant msleep times for data acquisitions */ + static const int mcp3422_read_times[4] = { +diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c +index 17aca4d..861ba3d 100644 +--- a/drivers/iio/dac/ad5686.c ++++ b/drivers/iio/dac/ad5686.c +@@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi) + st = iio_priv(indio_dev); + spi_set_drvdata(spi, indio_dev); + +- st->reg = devm_regulator_get(&spi->dev, "vcc"); ++ st->reg = devm_regulator_get_optional(&spi->dev, "vcc"); + if (!IS_ERR(st->reg)) { + ret = regulator_enable(st->reg); + if (ret) +diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c +index 7c582f7..70753bf 100644 +--- a/drivers/iio/imu/adis16400_core.c ++++ b/drivers/iio/imu/adis16400_core.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -447,7 +448,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, + mutex_unlock(&indio_dev->mlock); + if (ret) + return ret; +- val16 = ((val16 & 0xFFF) << 4) >> 4; ++ val16 = sign_extend32(val16, 11); + *val = val16; + return IIO_VAL_INT; + case IIO_CHAN_INFO_OFFSET: +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index 56a4b7c..45d67e9 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx, + if (!optlen) + return -EINVAL; + ++ memset(&sa_path, 0, sizeof(sa_path)); ++ sa_path.vlan_id = 0xffff; ++ + ib_sa_unpack_path(path_data->path_rec, &sa_path); + ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); + if (ret) +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c +index 23467a2..2adc143 100644 +--- a/drivers/infiniband/core/uverbs_cmd.c ++++ b/drivers/infiniband/core/uverbs_cmd.c +@@ -1964,20 +1964,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, + if (qp->real_qp == qp) { + ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); + if (ret) +- goto out; ++ goto release_qp; + ret = qp->device->modify_qp(qp, attr, + modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); + } else { + ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); + } + +- put_qp_read(qp); +- + if (ret) +- goto out; ++ goto release_qp; + + ret = in_len; + ++release_qp: ++ put_qp_read(qp); ++ + out: + kfree(attr); + +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index 11f0606..1a3d924 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -1161,8 +1161,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + struct mlx4_ib_qp *mqp = to_mqp(ibqp); + u64 reg_id; + struct mlx4_ib_steering *ib_steering = NULL; +- enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? +- MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; ++ enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; + + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { +@@ -1175,8 +1174,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + !!(mqp->flags & + MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), + prot, ®_id); +- if (err) ++ if (err) { ++ pr_err("multicast attach op failed, err %d\n", err); + goto err_malloc; ++ } + + err = add_gid_entry(ibqp, gid); + if (err) +@@ -1224,8 +1225,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + struct net_device *ndev; + struct mlx4_ib_gid_entry *ge; + u64 reg_id = 0; +- enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? +- MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; ++ enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; + + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { +diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h +index 1946101..675d3c7 100644 +--- a/drivers/infiniband/hw/qib/qib.h ++++ b/drivers/infiniband/hw/qib/qib.h +@@ -1080,12 +1080,6 @@ struct qib_devdata { + /* control high-level access to EEPROM */ + struct mutex eep_lock; + uint64_t traffic_wds; +- /* active time is kept in seconds, but logged in hours */ +- atomic_t active_time; +- /* Below are nominal shadow of EEPROM, new since last EEPROM update */ +- uint8_t eep_st_errs[QIB_EEP_LOG_CNT]; +- uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT]; +- uint16_t eep_hrs; + /* + * masks for which bits of errs, hwerrs that cause + * each of the counters to increment. +@@ -1307,8 +1301,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer, + int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, + const void *buffer, int len); + void qib_get_eeprom_info(struct qib_devdata *); +-int qib_update_eeprom_log(struct qib_devdata *dd); +-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr); ++#define qib_inc_eeprom_err(dd, eidx, incr) + void qib_dump_lookup_output_queue(struct qib_devdata *); + void qib_force_pio_avail_update(struct qib_devdata *); + void qib_clear_symerror_on_linkup(unsigned long opaque); +diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c +index 4d5d71a..e2280b0 100644 +--- a/drivers/infiniband/hw/qib/qib_eeprom.c ++++ b/drivers/infiniband/hw/qib/qib_eeprom.c +@@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) + "Board SN %s did not pass functional test: %s\n", + dd->serial, ifp->if_comment); + +- memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); +- /* +- * Power-on (actually "active") hours are kept as little-endian value +- * in EEPROM, but as seconds in a (possibly as small as 24-bit) +- * atomic_t while running. +- */ +- atomic_set(&dd->active_time, 0); +- dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); +- + done: + vfree(buf); + + bail:; + } + +-/** +- * qib_update_eeprom_log - copy active-time and error counters to eeprom +- * @dd: the qlogic_ib device +- * +- * Although the time is kept as seconds in the qib_devdata struct, it is +- * rounded to hours for re-write, as we have only 16 bits in EEPROM. +- * First-cut code reads whole (expected) struct qib_flash, modifies, +- * re-writes. Future direction: read/write only what we need, assuming +- * that the EEPROM had to have been "good enough" for driver init, and +- * if not, we aren't making it worse. +- * +- */ +-int qib_update_eeprom_log(struct qib_devdata *dd) +-{ +- void *buf; +- struct qib_flash *ifp; +- int len, hi_water; +- uint32_t new_time, new_hrs; +- u8 csum; +- int ret, idx; +- unsigned long flags; +- +- /* first, check if we actually need to do anything. */ +- ret = 0; +- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { +- if (dd->eep_st_new_errs[idx]) { +- ret = 1; +- break; +- } +- } +- new_time = atomic_read(&dd->active_time); +- +- if (ret == 0 && new_time < 3600) +- goto bail; +- +- /* +- * The quick-check above determined that there is something worthy +- * of logging, so get current contents and do a more detailed idea. +- * read full flash, not just currently used part, since it may have +- * been written with a newer definition +- */ +- len = sizeof(struct qib_flash); +- buf = vmalloc(len); +- ret = 1; +- if (!buf) { +- qib_dev_err(dd, +- "Couldn't allocate memory to read %u bytes from eeprom for logging\n", +- len); +- goto bail; +- } +- +- /* Grab semaphore and read current EEPROM. If we get an +- * error, let go, but if not, keep it until we finish write. +- */ +- ret = mutex_lock_interruptible(&dd->eep_lock); +- if (ret) { +- qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); +- goto free_bail; +- } +- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); +- if (ret) { +- mutex_unlock(&dd->eep_lock); +- qib_dev_err(dd, "Unable read EEPROM for logging\n"); +- goto free_bail; +- } +- ifp = (struct qib_flash *)buf; +- +- csum = flash_csum(ifp, 0); +- if (csum != ifp->if_csum) { +- mutex_unlock(&dd->eep_lock); +- qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", +- csum, ifp->if_csum); +- ret = 1; +- goto free_bail; +- } +- hi_water = 0; +- spin_lock_irqsave(&dd->eep_st_lock, flags); +- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { +- int new_val = dd->eep_st_new_errs[idx]; +- if (new_val) { +- /* +- * If we have seen any errors, add to EEPROM values +- * We need to saturate at 0xFF (255) and we also +- * would need to adjust the checksum if we were +- * trying to minimize EEPROM traffic +- * Note that we add to actual current count in EEPROM, +- * in case it was altered while we were running. +- */ +- new_val += ifp->if_errcntp[idx]; +- if (new_val > 0xFF) +- new_val = 0xFF; +- if (ifp->if_errcntp[idx] != new_val) { +- ifp->if_errcntp[idx] = new_val; +- hi_water = offsetof(struct qib_flash, +- if_errcntp) + idx; +- } +- /* +- * update our shadow (used to minimize EEPROM +- * traffic), to match what we are about to write. +- */ +- dd->eep_st_errs[idx] = new_val; +- dd->eep_st_new_errs[idx] = 0; +- } +- } +- /* +- * Now update active-time. We would like to round to the nearest hour +- * but unless atomic_t are sure to be proper signed ints we cannot, +- * because we need to account for what we "transfer" to EEPROM and +- * if we log an hour at 31 minutes, then we would need to set +- * active_time to -29 to accurately count the _next_ hour. +- */ +- if (new_time >= 3600) { +- new_hrs = new_time / 3600; +- atomic_sub((new_hrs * 3600), &dd->active_time); +- new_hrs += dd->eep_hrs; +- if (new_hrs > 0xFFFF) +- new_hrs = 0xFFFF; +- dd->eep_hrs = new_hrs; +- if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) { +- ifp->if_powerhour[0] = new_hrs & 0xFF; +- hi_water = offsetof(struct qib_flash, if_powerhour); +- } +- if ((new_hrs >> 8) != ifp->if_powerhour[1]) { +- ifp->if_powerhour[1] = new_hrs >> 8; +- hi_water = offsetof(struct qib_flash, if_powerhour) + 1; +- } +- } +- /* +- * There is a tiny possibility that we could somehow fail to write +- * the EEPROM after updating our shadows, but problems from holding +- * the spinlock too long are a much bigger issue. +- */ +- spin_unlock_irqrestore(&dd->eep_st_lock, flags); +- if (hi_water) { +- /* we made some change to the data, uopdate cksum and write */ +- csum = flash_csum(ifp, 1); +- ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1); +- } +- mutex_unlock(&dd->eep_lock); +- if (ret) +- qib_dev_err(dd, "Failed updating EEPROM\n"); +- +-free_bail: +- vfree(buf); +-bail: +- return ret; +-} +- +-/** +- * qib_inc_eeprom_err - increment one of the four error counters +- * that are logged to EEPROM. +- * @dd: the qlogic_ib device +- * @eidx: 0..3, the counter to increment +- * @incr: how much to add +- * +- * Each counter is 8-bits, and saturates at 255 (0xFF). They +- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log() +- * is called, but it can only be called in a context that allows sleep. +- * This function can be called even at interrupt level. +- */ +-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr) +-{ +- uint new_val; +- unsigned long flags; +- +- spin_lock_irqsave(&dd->eep_st_lock, flags); +- new_val = dd->eep_st_new_errs[eidx] + incr; +- if (new_val > 255) +- new_val = 255; +- dd->eep_st_new_errs[eidx] = new_val; +- spin_unlock_irqrestore(&dd->eep_st_lock, flags); +-} +diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c +index 84e593d..295f631 100644 +--- a/drivers/infiniband/hw/qib/qib_iba6120.c ++++ b/drivers/infiniband/hw/qib/qib_iba6120.c +@@ -2682,8 +2682,6 @@ static void qib_get_6120_faststats(unsigned long opaque) + spin_lock_irqsave(&dd->eep_st_lock, flags); + traffic_wds -= dd->traffic_wds; + dd->traffic_wds += traffic_wds; +- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) +- atomic_add(5, &dd->active_time); /* S/B #define */ + spin_unlock_irqrestore(&dd->eep_st_lock, flags); + + qib_chk_6120_errormask(dd); +diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c +index 454c2e7..c86e71b 100644 +--- a/drivers/infiniband/hw/qib/qib_iba7220.c ++++ b/drivers/infiniband/hw/qib/qib_iba7220.c +@@ -3299,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque) + spin_lock_irqsave(&dd->eep_st_lock, flags); + traffic_wds -= dd->traffic_wds; + dd->traffic_wds += traffic_wds; +- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) +- atomic_add(5, &dd->active_time); /* S/B #define */ + spin_unlock_irqrestore(&dd->eep_st_lock, flags); + done: + mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); +diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c +index d1bd213..0f8d1f0 100644 +--- a/drivers/infiniband/hw/qib/qib_iba7322.c ++++ b/drivers/infiniband/hw/qib/qib_iba7322.c +@@ -5191,8 +5191,6 @@ static void qib_get_7322_faststats(unsigned long opaque) + spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); + traffic_wds -= ppd->dd->traffic_wds; + ppd->dd->traffic_wds += traffic_wds; +- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) +- atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); + spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); + if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & + QIB_IB_QDR) && +diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c +index 76c3e17..8c9bb6c 100644 +--- a/drivers/infiniband/hw/qib/qib_init.c ++++ b/drivers/infiniband/hw/qib/qib_init.c +@@ -922,7 +922,6 @@ static void qib_shutdown_device(struct qib_devdata *dd) + } + } + +- qib_update_eeprom_log(dd); + } + + /** +diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c +index 3c8e4e3..b9ccbda 100644 +--- a/drivers/infiniband/hw/qib/qib_sysfs.c ++++ b/drivers/infiniband/hw/qib/qib_sysfs.c +@@ -611,28 +611,6 @@ bail: + return ret < 0 ? ret : count; + } + +-static ssize_t show_logged_errs(struct device *device, +- struct device_attribute *attr, char *buf) +-{ +- struct qib_ibdev *dev = +- container_of(device, struct qib_ibdev, ibdev.dev); +- struct qib_devdata *dd = dd_from_dev(dev); +- int idx, count; +- +- /* force consistency with actual EEPROM */ +- if (qib_update_eeprom_log(dd) != 0) +- return -ENXIO; +- +- count = 0; +- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { +- count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", +- dd->eep_st_errs[idx], +- idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); +- } +- +- return count; +-} +- + /* + * Dump tempsense regs. in decimal, to ease shell-scripts. + */ +@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL); + static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); + static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); + static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); +-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); + static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); + static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); + static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); +@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = { + &dev_attr_nfreectxts, + &dev_attr_serial, + &dev_attr_boardversion, +- &dev_attr_logged_errors, + &dev_attr_tempsense, + &dev_attr_localbus_info, + &dev_attr_chip_reset, +diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c +index 05f371d..d4b0a31 100644 +--- a/drivers/input/tablet/wacom_wac.c ++++ b/drivers/input/tablet/wacom_wac.c +@@ -700,6 +700,12 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) + input_report_key(input, BTN_7, (data[4] & 0x40)); /* Left */ + input_report_key(input, BTN_8, (data[4] & 0x80)); /* Down */ + input_report_key(input, BTN_0, (data[3] & 0x01)); /* Center */ ++ ++ if (data[4] | (data[3] & 0x01)) { ++ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); ++ } else { ++ input_report_abs(input, ABS_MISC, 0); ++ } + } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) { + int i; + +diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c +index db404a0..d2a8d64 100644 +--- a/drivers/md/dm-io.c ++++ b/drivers/md/dm-io.c +@@ -292,6 +292,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, + unsigned short logical_block_size = queue_logical_block_size(q); + sector_t num_sectors; + ++ /* Reject unsupported discard requests */ ++ if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { ++ dec_count(io, region, -EOPNOTSUPP); ++ return; ++ } ++ + /* + * where->count may be zero if rw holds a flush and we need to + * send a zero-sized flush. +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c +index 7dfdb5c..089d627 100644 +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context) + return; + } + ++ /* ++ * If the bio is discard, return an error, but do not ++ * degrade the array. ++ */ ++ if (bio->bi_rw & REQ_DISCARD) { ++ bio_endio(bio, -EOPNOTSUPP); ++ return; ++ } ++ + for (i = 0; i < ms->nr_mirrors; i++) + if (test_bit(i, &error)) + fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index ebddef5..c356a10 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -1440,8 +1440,6 @@ out: + full_bio->bi_private = pe->full_bio_private; + atomic_inc(&full_bio->bi_remaining); + } +- free_pending_exception(pe); +- + increment_pending_exceptions_done_count(); + + up_write(&s->lock); +@@ -1458,6 +1456,8 @@ out: + } + + retry_origin_bios(s, origin_bios); ++ ++ free_pending_exception(pe); + } + + static void commit_callback(void *context, int success) +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 65ee3a0..1582c3da 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -2288,7 +2288,7 @@ int dm_setup_md_queue(struct mapped_device *md) + return 0; + } + +-static struct mapped_device *dm_find_md(dev_t dev) ++struct mapped_device *dm_get_md(dev_t dev) + { + struct mapped_device *md; + unsigned minor = MINOR(dev); +@@ -2299,12 +2299,15 @@ static struct mapped_device *dm_find_md(dev_t dev) + spin_lock(&_minor_lock); + + md = idr_find(&_minor_idr, minor); +- if (md && (md == MINOR_ALLOCED || +- (MINOR(disk_devt(dm_disk(md))) != minor) || +- dm_deleting_md(md) || +- test_bit(DMF_FREEING, &md->flags))) { +- md = NULL; +- goto out; ++ if (md) { ++ if ((md == MINOR_ALLOCED || ++ (MINOR(disk_devt(dm_disk(md))) != minor) || ++ dm_deleting_md(md) || ++ test_bit(DMF_FREEING, &md->flags))) { ++ md = NULL; ++ goto out; ++ } ++ dm_get(md); + } + + out: +@@ -2312,16 +2315,6 @@ out: + + return md; + } +- +-struct mapped_device *dm_get_md(dev_t dev) +-{ +- struct mapped_device *md = dm_find_md(dev); +- +- if (md) +- dm_get(md); +- +- return md; +-} + EXPORT_SYMBOL_GPL(dm_get_md); + + void *dm_get_mdptr(struct mapped_device *md) +diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c +index cdd31c2..b296538 100644 +--- a/drivers/misc/mei/init.c ++++ b/drivers/misc/mei/init.c +@@ -275,6 +275,8 @@ void mei_stop(struct mei_device *dev) + + dev->dev_state = MEI_DEV_POWER_DOWN; + mei_reset(dev); ++ /* move device to disabled state unconditionally */ ++ dev->dev_state = MEI_DEV_DISABLED; + + mutex_unlock(&dev->device_lock); + +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c +index 07c942b..e8c21f9 100644 +--- a/drivers/net/macvtap.c ++++ b/drivers/net/macvtap.c +@@ -637,12 +637,15 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, + } /* else everything is zero */ + } + ++/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ ++#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) ++ + /* Get packet from user space buffer */ + static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, + const struct iovec *iv, unsigned long total_len, + size_t count, int noblock) + { +- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); ++ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); + struct sk_buff *skb; + struct macvlan_dev *vlan; + unsigned long len = total_len; +@@ -701,7 +704,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, + linear = vnet_hdr.hdr_len; + } + +- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, ++ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, + linear, noblock, &err); + if (!skb) + goto err; +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index 76d96b9..1d56878 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -194,6 +194,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features) + } + + /** ++ * phy_check_valid - check if there is a valid PHY setting which matches ++ * speed, duplex, and feature mask ++ * @speed: speed to match ++ * @duplex: duplex to match ++ * @features: A mask of the valid settings ++ * ++ * Description: Returns true if there is a valid setting, false otherwise. ++ */ ++static inline bool phy_check_valid(int speed, int duplex, u32 features) ++{ ++ unsigned int idx; ++ ++ idx = phy_find_valid(phy_find_setting(speed, duplex), features); ++ ++ return settings[idx].speed == speed && settings[idx].duplex == duplex && ++ (settings[idx].setting & features); ++} ++ ++/** + * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex + * @phydev: the target phy_device struct + * +@@ -955,7 +974,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) + int eee_lp, eee_cap, eee_adv; + u32 lp, cap, adv; + int status; +- unsigned int idx; + + /* Read phy status to properly get the right settings */ + status = phy_read_status(phydev); +@@ -987,8 +1005,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) + + adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); + lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); +- idx = phy_find_setting(phydev->speed, phydev->duplex); +- if (!(lp & adv & settings[idx].setting)) ++ if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) + return -EPROTONOSUPPORT; + + if (clk_stop_enable) { +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 32efe83..c28e2da 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -42,9 +42,7 @@ + + static struct team_port *team_port_get_rcu(const struct net_device *dev) + { +- struct team_port *port = rcu_dereference(dev->rx_handler_data); +- +- return team_port_exists(dev) ? port : NULL; ++ return rcu_dereference(dev->rx_handler_data); + } + + static struct team_port *team_port_get_rtnl(const struct net_device *dev) +@@ -1725,11 +1723,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) + if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); +- rcu_read_lock(); +- list_for_each_entry_rcu(port, &team->port_list, list) ++ mutex_lock(&team->lock); ++ list_for_each_entry(port, &team->port_list, list) + if (team->ops.port_change_dev_addr) + team->ops.port_change_dev_addr(team, port); +- rcu_read_unlock(); ++ mutex_unlock(&team->lock); + return 0; + } + +diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c +index 3d18bb0..1bfe0fc 100644 +--- a/drivers/net/usb/plusb.c ++++ b/drivers/net/usb/plusb.c +@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = { + }, { + USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ + .driver_info = (unsigned long) &prolific_info, ++}, { ++ USB_DEVICE(0x3923, 0x7825), /* National Instruments USB ++ * Host-to-Host Cable ++ */ ++ .driver_info = (unsigned long) &prolific_info, + }, + + { }, // END +diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c +index a3399c4..b9b651e 100644 +--- a/drivers/net/wireless/ath/ath5k/reset.c ++++ b/drivers/net/wireless/ath/ath5k/reset.c +@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) + regval = ioread32(reg); + iowrite32(regval | val, reg); + regval = ioread32(reg); +- usleep_range(100, 150); ++ udelay(100); /* NB: should be atomic */ + + /* Bring BB/MAC out of reset */ + iowrite32(regval & ~val, reg); +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c +index 5642a9b..953bd0b 100644 +--- a/drivers/scsi/be2iscsi/be_main.c ++++ b/drivers/scsi/be2iscsi/be_main.c +@@ -581,7 +581,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) + "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); + return NULL; + } +- shost->dma_boundary = pcidev->dma_mask; + shost->max_id = BE2_MAX_SESSIONS; + shost->max_channel = 0; + shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index df5e961..eb81c98 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -522,7 +522,7 @@ static ssize_t + sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) + { + sg_io_hdr_t *hp = &srp->header; +- int err = 0; ++ int err = 0, err2; + int len; + + if (count < SZ_SG_IO_HDR) { +@@ -551,8 +551,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) + goto err_out; + } + err_out: +- err = sg_finish_rem_req(srp); +- return (0 == err) ? count : err; ++ err2 = sg_finish_rem_req(srp); ++ return err ? : err2 ? : count; + } + + static ssize_t +diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c +index 1e9da40..5287810 100644 +--- a/drivers/staging/comedi/comedi_compat32.c ++++ b/drivers/staging/comedi/comedi_compat32.c +@@ -262,7 +262,7 @@ static int compat_cmd(struct file *file, unsigned long arg) + { + struct comedi_cmd __user *cmd; + struct comedi32_cmd_struct __user *cmd32; +- int rc; ++ int rc, err; + + cmd32 = compat_ptr(arg); + cmd = compat_alloc_user_space(sizeof(*cmd)); +@@ -271,7 +271,15 @@ static int compat_cmd(struct file *file, unsigned long arg) + if (rc) + return rc; + +- return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd); ++ rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd); ++ if (rc == -EAGAIN) { ++ /* Special case: copy cmd back to user. */ ++ err = put_compat_cmd(cmd32, cmd); ++ if (err) ++ rc = err; ++ } ++ ++ return rc; + } + + /* Handle 32-bit COMEDI_CMDTEST ioctl. */ +diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c +index 4fff173..3d1cb5b 100644 +--- a/drivers/staging/comedi/drivers/cb_pcidas64.c ++++ b/drivers/staging/comedi/drivers/cb_pcidas64.c +@@ -441,6 +441,29 @@ static const struct comedi_lrange ai_ranges_64xx = { + } + }; + ++static const uint8_t ai_range_code_64xx[8] = { ++ 0x0, 0x1, 0x2, 0x3, /* bipolar 10, 5, 2,5, 1.25 */ ++ 0x8, 0x9, 0xa, 0xb /* unipolar 10, 5, 2.5, 1.25 */ ++}; ++ ++/* analog input ranges for 64-Mx boards */ ++static const struct comedi_lrange ai_ranges_64_mx = { ++ 7, { ++ BIP_RANGE(5), ++ BIP_RANGE(2.5), ++ BIP_RANGE(1.25), ++ BIP_RANGE(0.625), ++ UNI_RANGE(5), ++ UNI_RANGE(2.5), ++ UNI_RANGE(1.25) ++ } ++}; ++ ++static const uint8_t ai_range_code_64_mx[7] = { ++ 0x0, 0x1, 0x2, 0x3, /* bipolar 5, 2.5, 1.25, 0.625 */ ++ 0x9, 0xa, 0xb /* unipolar 5, 2.5, 1.25 */ ++}; ++ + /* analog input ranges for 60xx boards */ + static const struct comedi_lrange ai_ranges_60xx = { + 4, { +@@ -451,6 +474,10 @@ static const struct comedi_lrange ai_ranges_60xx = { + } + }; + ++static const uint8_t ai_range_code_60xx[4] = { ++ 0x0, 0x1, 0x4, 0x7 /* bipolar 10, 5, 0.5, 0.05 */ ++}; ++ + /* analog input ranges for 6030, etc boards */ + static const struct comedi_lrange ai_ranges_6030 = { + 14, { +@@ -471,6 +498,11 @@ static const struct comedi_lrange ai_ranges_6030 = { + } + }; + ++static const uint8_t ai_range_code_6030[14] = { ++ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */ ++ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */ ++}; ++ + /* analog input ranges for 6052, etc boards */ + static const struct comedi_lrange ai_ranges_6052 = { + 15, { +@@ -492,6 +524,11 @@ static const struct comedi_lrange ai_ranges_6052 = { + } + }; + ++static const uint8_t ai_range_code_6052[15] = { ++ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, /* bipolar 10 ... 0.05 */ ++ 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf /* unipolar 10 ... 0.1 */ ++}; ++ + /* analog input ranges for 4020 board */ + static const struct comedi_lrange ai_ranges_4020 = { + 2, { +@@ -595,6 +632,7 @@ struct pcidas64_board { + int ai_bits; /* analog input resolution */ + int ai_speed; /* fastest conversion period in ns */ + const struct comedi_lrange *ai_range_table; ++ const uint8_t *ai_range_code; + int ao_nchan; /* number of analog out channels */ + int ao_bits; /* analog output resolution */ + int ao_scan_speed; /* analog output scan speed */ +@@ -653,6 +691,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, + .ai_range_table = &ai_ranges_64xx, ++ .ai_range_code = ai_range_code_64xx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -668,6 +707,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, + .ai_range_table = &ai_ranges_64xx, ++ .ai_range_code = ai_range_code_64xx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -682,7 +722,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_bits = 16, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -697,7 +738,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_bits = 16, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -712,7 +754,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_bits = 16, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ao_range_table = &ao_ranges_64xx, + .ao_range_code = ao_range_code_64xx, + .ai_fifo = &ai_fifo_64xx, +@@ -727,6 +770,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_bits = 16, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -742,6 +786,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -756,6 +801,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -771,6 +817,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -786,6 +833,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6030, ++ .ai_range_code = ai_range_code_6030, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -801,6 +849,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6030, ++ .ai_range_code = ai_range_code_6030, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -814,6 +863,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6030, ++ .ai_range_code = ai_range_code_6030, + .ai_fifo = &ai_fifo_60xx, + .has_8255 = 0, + }, +@@ -825,6 +875,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6030, ++ .ai_range_code = ai_range_code_6030, + .ai_fifo = &ai_fifo_60xx, + .has_8255 = 0, + }, +@@ -837,6 +888,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 0, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, + .has_8255 = 0, + }, +@@ -850,6 +902,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -865,6 +918,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 100000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_60xx, ++ .ai_range_code = ai_range_code_60xx, + .ao_range_table = &range_bipolar10, + .ao_range_code = ao_range_code_60xx, + .ai_fifo = &ai_fifo_60xx, +@@ -880,6 +934,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 1000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6052, ++ .ai_range_code = ai_range_code_6052, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -895,6 +950,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 3333, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6052, ++ .ai_range_code = ai_range_code_6052, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -910,6 +966,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 1000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6052, ++ .ai_range_code = ai_range_code_6052, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -925,6 +982,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 1000, + .layout = LAYOUT_60XX, + .ai_range_table = &ai_ranges_6052, ++ .ai_range_code = ai_range_code_6052, + .ao_range_table = &ao_ranges_6030, + .ao_range_code = ao_range_code_6030, + .ai_fifo = &ai_fifo_60xx, +@@ -959,6 +1017,7 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, + .ai_range_table = &ai_ranges_64xx, ++ .ai_range_code = ai_range_code_64xx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -970,7 +1029,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -982,7 +1042,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -994,7 +1055,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 0, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1006,7 +1068,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 2, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1018,7 +1081,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 2, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1030,7 +1094,8 @@ static const struct pcidas64_board pcidas64_boards[] = { + .ao_nchan = 2, + .ao_scan_speed = 10000, + .layout = LAYOUT_64XX, +- .ai_range_table = &ai_ranges_64xx, ++ .ai_range_table = &ai_ranges_64_mx, ++ .ai_range_code = ai_range_code_64_mx, + .ai_fifo = ai_fifo_64xx, + .has_8255 = 1, + }, +@@ -1127,45 +1192,8 @@ static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev, + unsigned int range_index) + { + const struct pcidas64_board *thisboard = comedi_board(dev); +- const struct comedi_krange *range = +- &thisboard->ai_range_table->range[range_index]; +- unsigned int bits = 0; + +- switch (range->max) { +- case 10000000: +- bits = 0x000; +- break; +- case 5000000: +- bits = 0x100; +- break; +- case 2000000: +- case 2500000: +- bits = 0x200; +- break; +- case 1000000: +- case 1250000: +- bits = 0x300; +- break; +- case 500000: +- bits = 0x400; +- break; +- case 200000: +- case 250000: +- bits = 0x500; +- break; +- case 100000: +- bits = 0x600; +- break; +- case 50000: +- bits = 0x700; +- break; +- default: +- comedi_error(dev, "bug! in ai_range_bits_6xxx"); +- break; +- } +- if (range->min == 0) +- bits += 0x900; +- return bits; ++ return thisboard->ai_range_code[range_index] << 8; + } + + static unsigned int hw_revision(const struct comedi_device *dev, +diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c +index 9ec1df9..be89260 100644 +--- a/drivers/staging/iio/adc/mxs-lradc.c ++++ b/drivers/staging/iio/adc/mxs-lradc.c +@@ -214,11 +214,17 @@ struct mxs_lradc { + unsigned long is_divided; + + /* +- * Touchscreen LRADC channels receives a private slot in the CTRL4 +- * register, the slot #7. Therefore only 7 slots instead of 8 in the +- * CTRL4 register can be mapped to LRADC channels when using the +- * touchscreen. +- * ++ * When the touchscreen is enabled, we give it two private virtual ++ * channels: #6 and #7. This means that only 6 virtual channels (instead ++ * of 8) will be available for buffered capture. ++ */ ++#define TOUCHSCREEN_VCHANNEL1 7 ++#define TOUCHSCREEN_VCHANNEL2 6 ++#define BUFFER_VCHANS_LIMITED 0x3f ++#define BUFFER_VCHANS_ALL 0xff ++ u8 buffer_vchans; ++ ++ /* + * Furthermore, certain LRADC channels are shared between touchscreen + * and/or touch-buttons and generic LRADC block. Therefore when using + * either of these, these channels are not available for the regular +@@ -342,6 +348,9 @@ struct mxs_lradc { + #define LRADC_CTRL4 0x140 + #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) + #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) ++#define LRADC_CTRL4_LRADCSELECT(n, x) \ ++ (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \ ++ LRADC_CTRL4_LRADCSELECT_MASK(n)) + + #define LRADC_RESOLUTION 12 + #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) +@@ -423,6 +432,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc) + LRADC_STATUS_TOUCH_DETECT_RAW); + } + ++static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch, ++ unsigned ch) ++{ ++ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch), ++ LRADC_CTRL4); ++ mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4); ++} ++ + static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) + { + /* +@@ -450,12 +467,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) + LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), + LRADC_DELAY(3)); + +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | +- LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) | +- LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); ++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1); + +- /* wake us again, when the complete conversion is done */ +- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1); + /* + * after changing the touchscreen plates setting + * the signals need some initial time to settle. Start the +@@ -508,12 +521,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1, + LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), + LRADC_DELAY(3)); + +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | +- LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) | +- LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); ++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1); + +- /* wake us again, when the conversions are done */ +- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1); + /* + * after changing the touchscreen plates setting + * the signals need some initial time to settle. Start the +@@ -578,36 +587,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc, + #define TS_CH_XM 4 + #define TS_CH_YM 5 + +-static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc) +-{ +- u32 reg; +- int val; +- +- reg = readl(lradc->base + LRADC_CTRL1); +- +- /* only channels 3 to 5 are of interest here */ +- if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) { +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) | +- LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1); +- val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP); +- } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) { +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) | +- LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1); +- val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM); +- } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) { +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) | +- LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1); +- val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM); +- } else { +- return -EIO; +- } +- +- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); +- mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); +- +- return val; +-} +- + /* + * YP(open)--+-------------+ + * | |--+ +@@ -651,7 +630,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc) + mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); + + lradc->cur_plate = LRADC_SAMPLE_X; +- mxs_lradc_setup_ts_channel(lradc, TS_CH_YP); ++ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP); ++ mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); + } + + /* +@@ -672,7 +652,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc) + mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); + + lradc->cur_plate = LRADC_SAMPLE_Y; +- mxs_lradc_setup_ts_channel(lradc, TS_CH_XM); ++ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM); ++ mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); + } + + /* +@@ -693,7 +674,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc) + mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); + + lradc->cur_plate = LRADC_SAMPLE_PRESSURE; +- mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); ++ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM); ++ mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP); ++ mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2, ++ TOUCHSCREEN_VCHANNEL1); + } + + static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) +@@ -706,6 +690,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) + mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); + } + ++static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc) ++{ ++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, ++ LRADC_CTRL1); ++ mxs_lradc_reg_set(lradc, ++ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); ++ /* ++ * start with the Y-pos, because it uses nearly the same plate ++ * settings like the touch detection ++ */ ++ mxs_lradc_prepare_y_pos(lradc); ++} ++ + static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) + { + input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); +@@ -723,10 +720,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc) + * start a dummy conversion to burn time to settle the signals + * note: we are not interested in the conversion's value + */ +- mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5)); +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); +- mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1); +- mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) | ++ mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1)); ++ mxs_lradc_reg_clear(lradc, ++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | ++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); ++ mxs_lradc_reg_wrt(lradc, ++ LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) | + LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ + LRADC_DELAY(2)); + } +@@ -758,59 +757,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid) + + /* if it is released, wait for the next touch via IRQ */ + lradc->cur_plate = LRADC_TOUCH; +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); ++ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); ++ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); ++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ | ++ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | ++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); + mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); + } + + /* touchscreen's state machine */ + static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) + { +- int val; +- + switch (lradc->cur_plate) { + case LRADC_TOUCH: +- /* +- * start with the Y-pos, because it uses nearly the same plate +- * settings like the touch detection +- */ +- if (mxs_lradc_check_touch_event(lradc)) { +- mxs_lradc_reg_clear(lradc, +- LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, +- LRADC_CTRL1); +- mxs_lradc_prepare_y_pos(lradc); +- } ++ if (mxs_lradc_check_touch_event(lradc)) ++ mxs_lradc_start_touch_event(lradc); + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, + LRADC_CTRL1); + return; + + case LRADC_SAMPLE_Y: +- val = mxs_lradc_read_ts_channel(lradc); +- if (val < 0) { +- mxs_lradc_enable_touch_detection(lradc); /* re-start */ +- return; +- } +- lradc->ts_y_pos = val; ++ lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc, ++ TOUCHSCREEN_VCHANNEL1); + mxs_lradc_prepare_x_pos(lradc); + return; + + case LRADC_SAMPLE_X: +- val = mxs_lradc_read_ts_channel(lradc); +- if (val < 0) { +- mxs_lradc_enable_touch_detection(lradc); /* re-start */ +- return; +- } +- lradc->ts_x_pos = val; ++ lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc, ++ TOUCHSCREEN_VCHANNEL1); + mxs_lradc_prepare_pressure(lradc); + return; + + case LRADC_SAMPLE_PRESSURE: +- lradc->ts_pressure = +- mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); ++ lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc, ++ TOUCHSCREEN_VCHANNEL2, ++ TOUCHSCREEN_VCHANNEL1); + mxs_lradc_complete_touch_event(lradc); + return; + + case LRADC_SAMPLE_VALID: +- val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */ + mxs_lradc_finish_touch_event(lradc, 1); + break; + } +@@ -842,9 +827,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val) + * used if doing raw sampling. + */ + if (lradc->soc == IMX28_LRADC) +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, ++ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), + LRADC_CTRL1); +- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); ++ mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0); + + /* Enable / disable the divider per requirement */ + if (test_bit(chan, &lradc->is_divided)) +@@ -1091,9 +1076,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc) + { + /* stop all interrupts from firing */ + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | +- LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) | +- LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5), +- LRADC_CTRL1); ++ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | ++ LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); + + /* Power-down touchscreen touch-detect circuitry. */ + mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); +@@ -1159,25 +1143,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data) + struct iio_dev *iio = data; + struct mxs_lradc *lradc = iio_priv(iio); + unsigned long reg = readl(lradc->base + LRADC_CTRL1); ++ uint32_t clr_irq = mxs_lradc_irq_mask(lradc); + const uint32_t ts_irq_mask = + LRADC_CTRL1_TOUCH_DETECT_IRQ | +- LRADC_CTRL1_LRADC_IRQ(2) | +- LRADC_CTRL1_LRADC_IRQ(3) | +- LRADC_CTRL1_LRADC_IRQ(4) | +- LRADC_CTRL1_LRADC_IRQ(5); ++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | ++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2); + + if (!(reg & mxs_lradc_irq_mask(lradc))) + return IRQ_NONE; + +- if (lradc->use_touchscreen && (reg & ts_irq_mask)) ++ if (lradc->use_touchscreen && (reg & ts_irq_mask)) { + mxs_lradc_handle_touch(lradc); ++ /* Make sure we don't clear the next conversion's interrupt. */ ++ clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | ++ LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2)); ++ } + + if (iio_buffer_enabled(iio)) +- iio_trigger_poll(iio->trig, iio_get_time_ns()); +- else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) ++ if (iio_buffer_enabled(iio)) { ++ if (reg & lradc->buffer_vchans) ++ iio_trigger_poll(iio->trig, iio_get_time_ns()); ++ } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) { + complete(&lradc->completion); ++ } + +- mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), LRADC_CTRL1); ++ mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1); + + return IRQ_HANDLED; + } +@@ -1288,9 +1278,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) + } + + if (lradc->soc == IMX28_LRADC) +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, +- LRADC_CTRL1); +- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); ++ mxs_lradc_reg_clear(lradc, ++ lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, ++ LRADC_CTRL1); ++ mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); + + for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { + ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); +@@ -1323,10 +1314,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio) + mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | + LRADC_DELAY_KICK, LRADC_DELAY(0)); + +- mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); ++ mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); + if (lradc->soc == IMX28_LRADC) +- mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, +- LRADC_CTRL1); ++ mxs_lradc_reg_clear(lradc, ++ lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, ++ LRADC_CTRL1); + + kfree(lradc->buffer); + mutex_unlock(&lradc->lock); +@@ -1352,7 +1344,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio, + if (lradc->use_touchbutton) + rsvd_chans++; + if (lradc->use_touchscreen) +- rsvd_chans++; ++ rsvd_chans += 2; + + /* Test for attempts to map channels with special mode of operation. */ + if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) +@@ -1412,6 +1404,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = { + .channel = 8, + .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, + }, ++ /* Hidden channel to keep indexes */ ++ { ++ .type = IIO_TEMP, ++ .indexed = 1, ++ .scan_index = -1, ++ .channel = 9, ++ }, + MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ + MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ + MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ +@@ -1563,6 +1562,11 @@ static int mxs_lradc_probe(struct platform_device *pdev) + + touch_ret = mxs_lradc_probe_touchscreen(lradc, node); + ++ if (touch_ret == 0) ++ lradc->buffer_vchans = BUFFER_VCHANS_LIMITED; ++ else ++ lradc->buffer_vchans = BUFFER_VCHANS_ALL; ++ + /* Grab all IRQ sources */ + for (i = 0; i < of_cfg->irq_count; i++) { + lradc->irq[i] = platform_get_irq(pdev, i); +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index 1205dbd..0fccdcf 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -1877,8 +1877,8 @@ static int core_scsi3_update_aptpl_buf( + } + + if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { +- pr_err("Unable to update renaming" +- " APTPL metadata\n"); ++ pr_err("Unable to update renaming APTPL metadata," ++ " reallocating larger buffer\n"); + ret = -EMSGSIZE; + goto out; + } +@@ -1895,8 +1895,8 @@ static int core_scsi3_update_aptpl_buf( + lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); + + if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { +- pr_err("Unable to update renaming" +- " APTPL metadata\n"); ++ pr_err("Unable to update renaming APTPL metadata," ++ " reallocating larger buffer\n"); + ret = -EMSGSIZE; + goto out; + } +@@ -1959,7 +1959,7 @@ static int __core_scsi3_write_aptpl_to_file( + static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) + { + unsigned char *buf; +- int rc; ++ int rc, len = PR_APTPL_BUF_LEN; + + if (!aptpl) { + char *null_buf = "No Registrations or Reservations\n"; +@@ -1973,25 +1973,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b + + return 0; + } +- +- buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL); ++retry: ++ buf = vzalloc(len); + if (!buf) + return TCM_OUT_OF_RESOURCES; + +- rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN); ++ rc = core_scsi3_update_aptpl_buf(dev, buf, len); + if (rc < 0) { +- kfree(buf); +- return TCM_OUT_OF_RESOURCES; ++ vfree(buf); ++ len *= 2; ++ goto retry; + } + + rc = __core_scsi3_write_aptpl_to_file(dev, buf); + if (rc != 0) { + pr_err("SPC-3 PR: Could not update APTPL\n"); +- kfree(buf); ++ vfree(buf); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + dev->t10_pr.pr_aptpl_active = 1; +- kfree(buf); ++ vfree(buf); + pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); + return 0; + } +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index 5216acd..68511e8 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -266,6 +266,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) + static sense_reason_t + sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) + { ++ struct se_device *dev = cmd->se_dev; ++ sector_t end_lba = dev->transport->get_blocks(dev) + 1; + unsigned int sectors = sbc_get_write_same_sectors(cmd); + + if ((flags[0] & 0x04) || (flags[0] & 0x02)) { +@@ -279,6 +281,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o + sectors, cmd->se_dev->dev_attrib.max_write_same_len); + return TCM_INVALID_CDB_FIELD; + } ++ /* ++ * Sanity check for LBA wrap and request past end of device. ++ */ ++ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || ++ ((cmd->t_task_lba + sectors) > end_lba)) { ++ pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", ++ (unsigned long long)end_lba, cmd->t_task_lba, sectors); ++ return TCM_ADDRESS_OUT_OF_RANGE; ++ } ++ + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ + if (flags[0] & 0x10) { + pr_warn("WRITE SAME with ANCHOR not supported\n"); +@@ -911,7 +923,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + unsigned long long end_lba; + + end_lba = dev->transport->get_blocks(dev) + 1; +- if (cmd->t_task_lba + sectors > end_lba) { ++ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || ++ ((cmd->t_task_lba + sectors) > end_lba)) { + pr_err("cmd exceeds last lba %llu " + "(lba %llu, sectors %u)\n", + end_lba, cmd->t_task_lba, sectors); +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index 25d0741..39988fa 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -996,8 +996,8 @@ EXPORT_SYMBOL(start_tty); + /* We limit tty time update visibility to every 8 seconds or so. */ + static void tty_update_time(struct timespec *time) + { +- unsigned long sec = get_seconds() & ~7; +- if ((long)(sec - time->tv_sec) > 0) ++ unsigned long sec = get_seconds(); ++ if (abs(sec - time->tv_sec) & ~7) + time->tv_sec = sec; + } + +diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c +index 6fd60fe..22da05d 100644 +--- a/drivers/tty/tty_ioctl.c ++++ b/drivers/tty/tty_ioctl.c +@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout) + #endif + if (!timeout) + timeout = MAX_SCHEDULE_TIMEOUT; ++ + if (wait_event_interruptible_timeout(tty->write_wait, +- !tty_chars_in_buffer(tty), timeout) >= 0) { +- if (tty->ops->wait_until_sent) +- tty->ops->wait_until_sent(tty, timeout); ++ !tty_chars_in_buffer(tty), timeout) < 0) { ++ return; + } ++ ++ if (timeout == MAX_SCHEDULE_TIMEOUT) ++ timeout = 0; ++ ++ if (tty->ops->wait_until_sent) ++ tty->ops->wait_until_sent(tty, timeout); + } + EXPORT_SYMBOL(tty_wait_until_sent); + +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index 9ca7716..45b7b96 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb) + as->status = urb->status; + signr = as->signr; + if (signr) { ++ memset(&sinfo, 0, sizeof(sinfo)); + sinfo.si_signo = as->signr; + sinfo.si_errno = as->status; + sinfo.si_code = SI_ASYNCIO; +@@ -2227,6 +2228,7 @@ static void usbdev_remove(struct usb_device *udev) + wake_up_all(&ps->wait); + list_del_init(&ps->list); + if (ps->discsignr) { ++ memset(&sinfo, 0, sizeof(sinfo)); + sinfo.si_signo = ps->discsignr; + sinfo.si_errno = EPIPE; + sinfo.si_code = SI_ASYNCIO; +diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c +index 2a6841c..cfca302 100644 +--- a/drivers/usb/dwc3/dwc3-omap.c ++++ b/drivers/usb/dwc3/dwc3-omap.c +@@ -211,6 +211,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value) + omap->irq0_offset, value); + } + ++static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value) ++{ ++ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC + ++ omap->irqmisc_offset, value); ++} ++ ++static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value) ++{ ++ dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 - ++ omap->irq0_offset, value); ++} ++ + static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, + enum omap_dwc3_vbus_id_status status) + { +@@ -351,9 +363,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap) + + static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) + { ++ u32 reg; ++ + /* disable all IRQs */ +- dwc3_omap_write_irqmisc_set(omap, 0x00); +- dwc3_omap_write_irq0_set(omap, 0x00); ++ reg = USBOTGSS_IRQO_COREIRQ_ST; ++ dwc3_omap_write_irq0_clr(omap, reg); ++ ++ reg = (USBOTGSS_IRQMISC_OEVT | ++ USBOTGSS_IRQMISC_DRVVBUS_RISE | ++ USBOTGSS_IRQMISC_CHRGVBUS_RISE | ++ USBOTGSS_IRQMISC_DISCHRGVBUS_RISE | ++ USBOTGSS_IRQMISC_IDPULLUP_RISE | ++ USBOTGSS_IRQMISC_DRVVBUS_FALL | ++ USBOTGSS_IRQMISC_CHRGVBUS_FALL | ++ USBOTGSS_IRQMISC_DISCHRGVBUS_FALL | ++ USBOTGSS_IRQMISC_IDPULLUP_FALL); ++ ++ dwc3_omap_write_irqmisc_clr(omap, reg); + } + + static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index faa8b98..a95eee8 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -2133,7 +2133,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, + if (event_trb != ep_ring->dequeue) { + /* The event was for the status stage */ + if (event_trb == td->last_trb) { +- if (td->urb->actual_length != 0) { ++ if (td->urb_length_set) { + /* Don't overwrite a previously set error code + */ + if ((*status == -EINPROGRESS || *status == 0) && +@@ -2147,7 +2147,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, + td->urb->transfer_buffer_length; + } + } else { +- /* Maybe the event was for the data stage? */ ++ /* ++ * Maybe the event was for the data stage? If so, update ++ * already the actual_length of the URB and flag it as ++ * set, so that it is not overwritten in the event for ++ * the last TRB. ++ */ ++ td->urb_length_set = true; + td->urb->actual_length = + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 96e9e78..7225dd2 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1,3 +1,4 @@ ++ + /* + * xHCI host controller driver + * +@@ -88,9 +89,10 @@ struct xhci_cap_regs { + #define HCS_IST(p) (((p) >> 0) & 0xf) + /* bits 4:7, max number of Event Ring segments */ + #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) ++/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */ + /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ +-/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ +-#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) ++/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */ ++#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f)) + + /* HCSPARAMS3 - hcs_params3 - bitmasks */ + /* bits 0:7, Max U1 to U0 latency for the roothub ports */ +@@ -1289,6 +1291,8 @@ struct xhci_td { + struct xhci_segment *start_seg; + union xhci_trb *first_trb; + union xhci_trb *last_trb; ++ /* actual_length of the URB has already been set */ ++ bool urb_length_set; + }; + + /* xHCI command default timeout value */ +diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c +index 9374bd2..6f91eb9 100644 +--- a/drivers/usb/serial/bus.c ++++ b/drivers/usb/serial/bus.c +@@ -51,6 +51,7 @@ static int usb_serial_device_probe(struct device *dev) + { + struct usb_serial_driver *driver; + struct usb_serial_port *port; ++ struct device *tty_dev; + int retval = 0; + int minor; + +@@ -75,12 +76,20 @@ static int usb_serial_device_probe(struct device *dev) + retval = device_create_file(dev, &dev_attr_port_number); + if (retval) { + if (driver->port_remove) +- retval = driver->port_remove(port); ++ driver->port_remove(port); + goto exit_with_autopm; + } + + minor = port->minor; +- tty_register_device(usb_serial_tty_driver, minor, dev); ++ tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev); ++ if (IS_ERR(tty_dev)) { ++ retval = PTR_ERR(tty_dev); ++ device_remove_file(dev, &dev_attr_port_number); ++ if (driver->port_remove) ++ driver->port_remove(port); ++ goto exit_with_autopm; ++ } ++ + dev_info(&port->serial->dev->dev, + "%s converter now attached to ttyUSB%d\n", + driver->description, minor); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index a2d0409..8d114b9 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ + { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ + { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ ++ { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */ ++ { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */ + { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ + { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ + { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index debcdef..9235005 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -812,6 +812,8 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, ++ { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), +@@ -991,6 +993,23 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, + /* GE Healthcare devices */ + { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, ++ /* Active Research (Actisense) devices */ ++ { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) }, ++ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, ++ { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index e52409c9..56b1b55 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -38,6 +38,9 @@ + + #define FTDI_LUMEL_PD12_PID 0x6002 + ++/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */ ++#define CYBER_CORTEX_AV_PID 0x8698 ++ + /* + * Marvell OpenRD Base, Client + * http://www.open-rd.org +@@ -1438,3 +1441,23 @@ + */ + #define GE_HEALTHCARE_VID 0x1901 + #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 ++ ++/* ++ * Active Research (Actisense) devices ++ */ ++#define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */ ++#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */ ++#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */ ++#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */ ++#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */ ++#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */ ++#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */ ++#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */ ++#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */ ++#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */ ++#define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */ ++#define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */ ++#define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */ ++#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */ ++#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */ ++#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ +diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c +index b63ce02..d6a1979 100644 +--- a/drivers/usb/serial/generic.c ++++ b/drivers/usb/serial/generic.c +@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) + * character or at least one jiffy. + */ + period = max_t(unsigned long, (10 * HZ / bps), 1); +- period = min_t(unsigned long, period, timeout); ++ if (timeout) ++ period = min_t(unsigned long, period, timeout); + + dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", + __func__, jiffies_to_msecs(timeout), +@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) + schedule_timeout_interruptible(period); + if (signal_pending(current)) + break; +- if (time_after(jiffies, expire)) ++ if (timeout && time_after(jiffies, expire)) + break; + } + } +diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c +index ab1d690..460a406 100644 +--- a/drivers/usb/serial/mxuport.c ++++ b/drivers/usb/serial/mxuport.c +@@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port) + } + + /* Initial port termios */ +- mxuport_set_termios(tty, port, NULL); ++ if (tty) ++ mxuport_set_termios(tty, port, NULL); + + /* + * TODO: use RQ_VENDOR_GET_MSR, once we know what it +diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c +index 3182c0e..e3399dc 100644 +--- a/fs/autofs4/dev-ioctl.c ++++ b/fs/autofs4/dev-ioctl.c +@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param) + */ + static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) + { +- struct autofs_dev_ioctl tmp; ++ struct autofs_dev_ioctl tmp, *res; + + if (copy_from_user(&tmp, in, sizeof(tmp))) + return ERR_PTR(-EFAULT); +@@ -103,7 +103,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i + if (tmp.size < sizeof(tmp)) + return ERR_PTR(-EINVAL); + +- return memdup_user(in, tmp.size); ++ res = memdup_user(in, tmp.size); ++ if (!IS_ERR(res)) ++ res->size = tmp.size; ++ ++ return res; + } + + static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 279b06e..0a841dd 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1774,22 +1774,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, + mutex_unlock(&inode->i_mutex); + + /* +- * we want to make sure fsync finds this change +- * but we haven't joined a transaction running right now. +- * +- * Later on, someone is sure to update the inode and get the +- * real transid recorded. +- * +- * We set last_trans now to the fs_info generation + 1, +- * this will either be one more than the running transaction +- * or the generation used for the next transaction if there isn't +- * one running right now. +- * + * We also have to set last_sub_trans to the current log transid, + * otherwise subsequent syncs to a file that's been synced in this + * transaction will appear to have already occured. + */ +- BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; + BTRFS_I(inode)->last_sub_trans = root->log_transid; + if (num_written > 0) { + err = generic_write_sync(file, pos, num_written); +@@ -1892,25 +1880,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + atomic_inc(&root->log_batch); + + /* +- * check the transaction that last modified this inode +- * and see if its already been committed +- */ +- if (!BTRFS_I(inode)->last_trans) { +- mutex_unlock(&inode->i_mutex); +- goto out; +- } +- +- /* +- * if the last transaction that changed this file was before +- * the current transaction, we can bail out now without any +- * syncing ++ * If the last transaction that changed this file was before the current ++ * transaction and we have the full sync flag set in our inode, we can ++ * bail out now without any syncing. ++ * ++ * Note that we can't bail out if the full sync flag isn't set. This is ++ * because when the full sync flag is set we start all ordered extents ++ * and wait for them to fully complete - when they complete they update ++ * the inode's last_trans field through: ++ * ++ * btrfs_finish_ordered_io() -> ++ * btrfs_update_inode_fallback() -> ++ * btrfs_update_inode() -> ++ * btrfs_set_inode_last_trans() ++ * ++ * So we are sure that last_trans is up to date and can do this check to ++ * bail out safely. For the fast path, when the full sync flag is not ++ * set in our inode, we can not do it because we start only our ordered ++ * extents and don't wait for them to complete (that is when ++ * btrfs_finish_ordered_io runs), so here at this point their last_trans ++ * value might be less than or equals to fs_info->last_trans_committed, ++ * and setting a speculative last_trans for an inode when a buffered ++ * write is made (such as fs_info->generation + 1 for example) would not ++ * be reliable since after setting the value and before fsync is called ++ * any number of transactions can start and commit (transaction kthread ++ * commits the current transaction periodically), and a transaction ++ * commit does not start nor waits for ordered extents to complete. + */ + smp_mb(); + if (btrfs_inode_in_log(inode, root->fs_info->generation) || +- BTRFS_I(inode)->last_trans <= +- root->fs_info->last_trans_committed) { +- BTRFS_I(inode)->last_trans = 0; +- ++ (full_sync && BTRFS_I(inode)->last_trans <= ++ root->fs_info->last_trans_committed)) { + /* + * We'v had everything committed since the last time we were + * modified so clear this flag in case it was set for whatever +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index d68a725..653cdd8 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -6870,7 +6870,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, + ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && + em->block_start != EXTENT_MAP_HOLE)) { + int type; +- int ret; + u64 block_start, orig_start, orig_block_len, ram_bytes; + + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index aeb57b98..a7f32bf 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -979,7 +979,7 @@ again: + base = btrfs_item_ptr_offset(leaf, path->slots[0]); + + while (cur_offset < item_size) { +- extref = (struct btrfs_inode_extref *)base + cur_offset; ++ extref = (struct btrfs_inode_extref *)(base + cur_offset); + + victim_name_len = btrfs_inode_extref_name_len(leaf, extref); + +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index 1576195..1ff8fe5 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -245,10 +245,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) + return 0; + } + ++static void debugfs_evict_inode(struct inode *inode) ++{ ++ truncate_inode_pages(&inode->i_data, 0); ++ clear_inode(inode); ++ if (S_ISLNK(inode->i_mode)) ++ kfree(inode->i_private); ++} ++ + static const struct super_operations debugfs_super_operations = { + .statfs = simple_statfs, + .remount_fs = debugfs_remount, + .show_options = debugfs_show_options, ++ .evict_inode = debugfs_evict_inode, + }; + + static int debug_fill_super(struct super_block *sb, void *data, int silent) +@@ -465,23 +474,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) + int ret = 0; + + if (debugfs_positive(dentry)) { +- if (dentry->d_inode) { +- dget(dentry); +- switch (dentry->d_inode->i_mode & S_IFMT) { +- case S_IFDIR: +- ret = simple_rmdir(parent->d_inode, dentry); +- break; +- case S_IFLNK: +- kfree(dentry->d_inode->i_private); +- /* fall through */ +- default: +- simple_unlink(parent->d_inode, dentry); +- break; +- } +- if (!ret) +- d_delete(dentry); +- dput(dentry); +- } ++ dget(dentry); ++ if (S_ISDIR(dentry->d_inode->i_mode)) ++ ret = simple_rmdir(parent->d_inode, dentry); ++ else ++ simple_unlink(parent->d_inode, dentry); ++ if (!ret) ++ d_delete(dentry); ++ dput(dentry); + } + return ret; + } +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c +index 3ed1be9..2ea3537 100644 +--- a/fs/nfs/delegation.c ++++ b/fs/nfs/delegation.c +@@ -161,8 +161,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, + &delegation->flags); + NFS_I(inode)->delegation_state = delegation->type; + spin_unlock(&delegation->lock); +- put_rpccred(oldcred); + rcu_read_unlock(); ++ put_rpccred(oldcred); + trace_nfs4_reclaim_delegation(inode, res->delegation_type); + } else { + /* We appear to have raced with a delegation return. */ +diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c +index b2e3ff3..ecdbae1 100644 +--- a/fs/nilfs2/btree.c ++++ b/fs/nilfs2/btree.c +@@ -31,6 +31,8 @@ + #include "alloc.h" + #include "dat.h" + ++static void __nilfs_btree_init(struct nilfs_bmap *bmap); ++ + static struct nilfs_btree_path *nilfs_btree_alloc_path(void) + { + struct nilfs_btree_path *path; +@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, + return ret; + } + ++/** ++ * nilfs_btree_root_broken - verify consistency of btree root node ++ * @node: btree root node to be examined ++ * @ino: inode number ++ * ++ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. ++ */ ++static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, ++ unsigned long ino) ++{ ++ int level, flags, nchildren; ++ int ret = 0; ++ ++ level = nilfs_btree_node_get_level(node); ++ flags = nilfs_btree_node_get_flags(node); ++ nchildren = nilfs_btree_node_get_nchildren(node); ++ ++ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || ++ level > NILFS_BTREE_LEVEL_MAX || ++ nchildren < 0 || ++ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { ++ pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", ++ ino, level, flags, nchildren); ++ ret = 1; ++ } ++ return ret; ++} ++ + int nilfs_btree_broken_node_block(struct buffer_head *bh) + { + int ret; +@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, + + /* convert and insert */ + dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; +- nilfs_btree_init(btree); ++ __nilfs_btree_init(btree); + if (nreq != NULL) { + nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); + nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); +@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { + .bop_gather_data = NULL, + }; + +-int nilfs_btree_init(struct nilfs_bmap *bmap) ++static void __nilfs_btree_init(struct nilfs_bmap *bmap) + { + bmap->b_ops = &nilfs_btree_ops; + bmap->b_nchildren_per_block = + NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); +- return 0; ++} ++ ++int nilfs_btree_init(struct nilfs_bmap *bmap) ++{ ++ int ret = 0; ++ ++ __nilfs_btree_init(bmap); ++ ++ if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), ++ bmap->b_inode->i_ino)) ++ ret = -EIO; ++ return ret; + } + + void nilfs_btree_init_gc(struct nilfs_bmap *bmap) +diff --git a/fs/proc/generic.c b/fs/proc/generic.c +index b7f268e..2e2d9d5 100644 +--- a/fs/proc/generic.c ++++ b/fs/proc/generic.c +@@ -19,7 +19,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -162,17 +161,6 @@ void proc_free_inum(unsigned int inum) + spin_unlock_irqrestore(&proc_inum_lock, flags); + } + +-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) +-{ +- nd_set_link(nd, __PDE_DATA(dentry->d_inode)); +- return NULL; +-} +- +-static const struct inode_operations proc_link_inode_operations = { +- .readlink = generic_readlink, +- .follow_link = proc_follow_link, +-}; +- + /* + * Don't create negative dentries here, return -ENOENT by hand + * instead. +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index 124fc43..2f2815f 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include + +@@ -401,6 +402,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = { + }; + #endif + ++static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) ++{ ++ struct proc_dir_entry *pde = PDE(dentry->d_inode); ++ if (unlikely(!use_pde(pde))) ++ return ERR_PTR(-EINVAL); ++ nd_set_link(nd, pde->data); ++ return pde; ++} ++ ++static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p) ++{ ++ unuse_pde(p); ++} ++ ++const struct inode_operations proc_link_inode_operations = { ++ .readlink = generic_readlink, ++ .follow_link = proc_follow_link, ++ .put_link = proc_put_link, ++}; ++ + struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) + { + struct inode *inode = new_inode_pseudo(sb); +diff --git a/fs/proc/internal.h b/fs/proc/internal.h +index 651d09a..8b8ca1d 100644 +--- a/fs/proc/internal.h ++++ b/fs/proc/internal.h +@@ -202,6 +202,7 @@ struct pde_opener { + int closing; + struct completion *c; + }; ++extern const struct inode_operations proc_link_inode_operations; + + extern const struct inode_operations proc_pid_link_inode_operations; + +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 1772fad..34932540 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -407,7 +407,7 @@ struct t10_reservation { + /* Activate Persistence across Target Power Loss enabled + * for SCSI device */ + int pr_aptpl_active; +-#define PR_APTPL_BUF_LEN 8192 ++#define PR_APTPL_BUF_LEN 262144 + u32 pr_generation; + spinlock_t registration_lock; + spinlock_t aptpl_reg_lock; +diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h +index aece134..4ad10ba 100644 +--- a/include/trace/events/kmem.h ++++ b/include/trace/events/kmem.h +@@ -268,11 +268,11 @@ TRACE_EVENT(mm_page_alloc_extfrag, + + TP_PROTO(struct page *page, + int alloc_order, int fallback_order, +- int alloc_migratetype, int fallback_migratetype, int new_migratetype), ++ int alloc_migratetype, int fallback_migratetype), + + TP_ARGS(page, + alloc_order, fallback_order, +- alloc_migratetype, fallback_migratetype, new_migratetype), ++ alloc_migratetype, fallback_migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) +@@ -289,7 +289,8 @@ TRACE_EVENT(mm_page_alloc_extfrag, + __entry->fallback_order = fallback_order; + __entry->alloc_migratetype = alloc_migratetype; + __entry->fallback_migratetype = fallback_migratetype; +- __entry->change_ownership = (new_migratetype == alloc_migratetype); ++ __entry->change_ownership = (alloc_migratetype == ++ get_pageblock_migratetype(page)); + ), + + TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", +diff --git a/mm/compaction.c b/mm/compaction.c +index 4229fc2..a522208 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -937,7 +937,7 @@ static int compact_finished(struct zone *zone, + return COMPACT_PARTIAL; + + /* Job done if allocation would set block type */ +- if (cc->order >= pageblock_order && area->nr_free) ++ if (order >= pageblock_order && area->nr_free) + return COMPACT_PARTIAL; + } + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 472259b..c3e8660 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2488,9 +2488,10 @@ again: + goto unlock; + + /* +- * HWPoisoned hugepage is already unmapped and dropped reference ++ * Migrating hugepage or HWPoisoned hugepage is already ++ * unmapped and its refcount is dropped, so just clear pte here. + */ +- if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { ++ if (unlikely(!pte_present(pte))) { + huge_pte_clear(mm, address, ptep); + goto unlock; + } +@@ -3163,7 +3164,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, + spin_unlock(ptl); + continue; + } +- if (!huge_pte_none(huge_ptep_get(ptep))) { ++ pte = huge_ptep_get(ptep); ++ if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { ++ spin_unlock(ptl); ++ continue; ++ } ++ if (unlikely(is_hugetlb_entry_migration(pte))) { ++ swp_entry_t entry = pte_to_swp_entry(pte); ++ ++ if (is_write_migration_entry(entry)) { ++ pte_t newpte; ++ ++ make_migration_entry_read(&entry); ++ newpte = swp_entry_to_pte(entry); ++ set_huge_pte_at(mm, address, ptep, newpte); ++ pages++; ++ } ++ spin_unlock(ptl); ++ continue; ++ } ++ if (!huge_pte_none(pte)) { + pte = huge_ptep_get_and_clear(mm, address, ptep); + pte = pte_mkhuge(huge_pte_modify(pte, newprot)); + pte = arch_make_huge_pte(pte, vma, NULL, 0); +diff --git a/mm/memory.c b/mm/memory.c +index 7f30bea..102af09 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -4024,7 +4024,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + if (follow_phys(vma, addr, write, &prot, &phys_addr)) + return -EINVAL; + +- maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); ++ maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); + if (write) + memcpy_toio(maddr + offset, buf, len); + else +diff --git a/mm/mmap.c b/mm/mmap.c +index 085bcd8..d4c97ba 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -129,7 +129,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed); + */ + int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) + { +- unsigned long free, allowed, reserve; ++ long free, allowed, reserve; + + vm_acct_memory(pages); + +@@ -193,7 +193,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) + */ + if (mm) { + reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); +- allowed -= min(mm->total_vm / 32, reserve); ++ allowed -= min_t(long, mm->total_vm / 32, reserve); + } + + if (percpu_counter_read_positive(&vm_committed_as) < allowed) +diff --git a/mm/nommu.c b/mm/nommu.c +index 3ee4f74..76b3f90 100644 +--- a/mm/nommu.c ++++ b/mm/nommu.c +@@ -1905,7 +1905,7 @@ EXPORT_SYMBOL(unmap_mapping_range); + */ + int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) + { +- unsigned long free, allowed, reserve; ++ long free, allowed, reserve; + + vm_acct_memory(pages); + +@@ -1969,7 +1969,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) + */ + if (mm) { + reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); +- allowed -= min(mm->total_vm / 32, reserve); ++ allowed -= min_t(long, mm->total_vm / 32, reserve); + } + + if (percpu_counter_read_positive(&vm_committed_as) < allowed) +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index ea41913..0479732 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -1081,8 +1081,8 @@ static void change_pageblock_range(struct page *pageblock_page, + * nor move CMA pages to different free lists. We don't want unmovable pages + * to be allocated from MIGRATE_CMA areas. + * +- * Returns the new migratetype of the pageblock (or the same old migratetype +- * if it was unchanged). ++ * Returns the allocation migratetype if free pages were stolen, or the ++ * fallback migratetype if it was decided not to steal. + */ + static int try_to_steal_freepages(struct zone *zone, struct page *page, + int start_type, int fallback_type) +@@ -1113,12 +1113,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page, + + /* Claim the whole block if over half of it is free */ + if (pages >= (1 << (pageblock_order-1)) || +- page_group_by_mobility_disabled) { +- ++ page_group_by_mobility_disabled) + set_pageblock_migratetype(page, start_type); +- return start_type; +- } + ++ return start_type; + } + + return fallback_type; +@@ -1170,7 +1168,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) + set_freepage_migratetype(page, new_type); + + trace_mm_page_alloc_extfrag(page, order, current_order, +- start_migratetype, migratetype, new_type); ++ start_migratetype, migratetype); + + return page; + } +diff --git a/net/compat.c b/net/compat.c +index cbc1a2a..275af79 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -738,24 +738,18 @@ static unsigned char nas[21] = { + + asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) + { +- if (flags & MSG_CMSG_COMPAT) +- return -EINVAL; + return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + } + + asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned int vlen, unsigned int flags) + { +- if (flags & MSG_CMSG_COMPAT) +- return -EINVAL; + return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, + flags | MSG_CMSG_COMPAT); + } + + asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) + { +- if (flags & MSG_CMSG_COMPAT) +- return -EINVAL; + return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + } + +@@ -778,9 +772,6 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, + int datagrams; + struct timespec ktspec; + +- if (flags & MSG_CMSG_COMPAT) +- return -EINVAL; +- + if (timeout == NULL) + return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, + flags | MSG_CMSG_COMPAT, NULL); +diff --git a/net/core/dev.c b/net/core/dev.c +index 4ed77d7..f6d8d7f 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -940,7 +940,7 @@ bool dev_valid_name(const char *name) + return false; + + while (*name) { +- if (*name == '/' || isspace(*name)) ++ if (*name == '/' || *name == ':' || isspace(*name)) + return false; + name++; + } +diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c +index 9d3d9e7..372ac66 100644 +--- a/net/core/gen_stats.c ++++ b/net/core/gen_stats.c +@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) + return 0; + + nla_put_failure: ++ kfree(d->xstats); ++ d->xstats = NULL; ++ d->xstats_len = 0; + spin_unlock_bh(d->lock); + return -1; + } +@@ -217,7 +220,9 @@ int + gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) + { + if (d->compat_xstats) { +- d->xstats = st; ++ d->xstats = kmemdup(st, len, GFP_ATOMIC); ++ if (!d->xstats) ++ goto err_out; + d->xstats_len = len; + } + +@@ -225,6 +230,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) + return gnet_stats_copy(d, TCA_STATS_APP, st, len); + + return 0; ++ ++err_out: ++ d->xstats_len = 0; ++ spin_unlock_bh(d->lock); ++ return -1; + } + EXPORT_SYMBOL(gnet_stats_copy_app); + +@@ -257,6 +267,9 @@ gnet_stats_finish_copy(struct gnet_dump *d) + return -1; + } + ++ kfree(d->xstats); ++ d->xstats = NULL; ++ d->xstats_len = 0; + spin_unlock_bh(d->lock); + return 0; + } +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index fdac61c..ca68d32 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -2812,25 +2812,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, + skb->dev = odev; + skb->pkt_type = PACKET_HOST; + ++ pktgen_finalize_skb(pkt_dev, skb, datalen); ++ + if (!(pkt_dev->flags & F_UDPCSUM)) { + skb->ip_summed = CHECKSUM_NONE; + } else if (odev->features & NETIF_F_V4_CSUM) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum = 0; +- udp4_hwcsum(skb, udph->source, udph->dest); ++ udp4_hwcsum(skb, iph->saddr, iph->daddr); + } else { +- __wsum csum = udp_csum(skb); ++ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0); + + /* add protocol-dependent pseudo-header */ +- udph->check = csum_tcpudp_magic(udph->source, udph->dest, ++ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + datalen + 8, IPPROTO_UDP, csum); + + if (udph->check == 0) + udph->check = CSUM_MANGLED_0; + } + +- pktgen_finalize_skb(pkt_dev, skb, datalen); +- + #ifdef CONFIG_XFRM + if (!process_ipsec(pkt_dev, skb, protocol)) + return NULL; +@@ -2946,6 +2946,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, + skb->dev = odev; + skb->pkt_type = PACKET_HOST; + ++ pktgen_finalize_skb(pkt_dev, skb, datalen); ++ + if (!(pkt_dev->flags & F_UDPCSUM)) { + skb->ip_summed = CHECKSUM_NONE; + } else if (odev->features & NETIF_F_V6_CSUM) { +@@ -2954,7 +2956,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, + skb->csum_offset = offsetof(struct udphdr, check); + udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); + } else { +- __wsum csum = udp_csum(skb); ++ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0); + + /* add protocol-dependent pseudo-header */ + udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum); +@@ -2963,8 +2965,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, + udph->check = CSUM_MANGLED_0; + } + +- pktgen_finalize_skb(pkt_dev, skb, datalen); +- + return skb; + } + +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index a6613ff..8aadd6a 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1264,14 +1264,10 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { + }; + + static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { +- [IFLA_VF_MAC] = { .type = NLA_BINARY, +- .len = sizeof(struct ifla_vf_mac) }, +- [IFLA_VF_VLAN] = { .type = NLA_BINARY, +- .len = sizeof(struct ifla_vf_vlan) }, +- [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, +- .len = sizeof(struct ifla_vf_tx_rate) }, +- [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, +- .len = sizeof(struct ifla_vf_spoofchk) }, ++ [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, ++ [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, ++ [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) }, ++ [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) }, + }; + + static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { +@@ -2034,8 +2030,16 @@ replay: + } + } + err = rtnl_configure_link(dev, ifm); +- if (err < 0) +- unregister_netdevice(dev); ++ if (err < 0) { ++ if (ops->newlink) { ++ LIST_HEAD(list_kill); ++ ++ ops->dellink(dev, &list_kill); ++ unregister_netdevice_many(&list_kill); ++ } else { ++ unregister_netdevice(dev); ++ } ++ } + out: + put_net(dest_net); + return err; +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index c10a3ce..9ff497d 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -679,27 +679,30 @@ EXPORT_SYMBOL(ip_defrag); + struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) + { + struct iphdr iph; ++ int netoff; + u32 len; + + if (skb->protocol != htons(ETH_P_IP)) + return skb; + +- if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) ++ netoff = skb_network_offset(skb); ++ ++ if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) + return skb; + + if (iph.ihl < 5 || iph.version != 4) + return skb; + + len = ntohs(iph.tot_len); +- if (skb->len < len || len < (iph.ihl * 4)) ++ if (skb->len < netoff + len || len < (iph.ihl * 4)) + return skb; + + if (ip_is_fragment(&iph)) { + skb = skb_share_check(skb, GFP_ATOMIC); + if (skb) { +- if (!pskb_may_pull(skb, iph.ihl*4)) ++ if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) + return skb; +- if (pskb_trim_rcsum(skb, len)) ++ if (pskb_trim_rcsum(skb, netoff + len)) + return skb; + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); + if (ip_defrag(skb, user)) +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index dd637fc..05686c4 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -843,7 +843,8 @@ static int __ip_append_data(struct sock *sk, + cork->length += length; + if (((length > mtu) || (skb && skb_is_gso(skb))) && + (sk->sk_protocol == IPPROTO_UDP) && +- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { ++ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && ++ (sk->sk_type == SOCK_DGRAM)) { + err = ip_ufo_append_data(sk, queue, getfrag, from, length, + hh_len, fragheaderlen, transhdrlen, + maxfraglen, flags); +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 04ce671..b94002a 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -259,6 +259,10 @@ int ping_init_sock(struct sock *sk) + kgid_t low, high; + int ret = 0; + ++#if IS_ENABLED(CONFIG_IPV6) ++ if (sk->sk_family == AF_INET6) ++ inet6_sk(sk)->ipv6only = 1; ++#endif + inet_get_ping_group_range_net(net, &low, &high); + if (gid_lte(low, group) && gid_lte(group, high)) + return 0; +@@ -305,6 +309,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, + if (addr_len < sizeof(*addr)) + return -EINVAL; + ++ if (addr->sin_family != AF_INET && ++ !(addr->sin_family == AF_UNSPEC && ++ addr->sin_addr.s_addr == htonl(INADDR_ANY))) ++ return -EAFNOSUPPORT; ++ + pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", + sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); + +@@ -330,7 +339,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, + return -EINVAL; + + if (addr->sin6_family != AF_INET6) +- return -EINVAL; ++ return -EAFNOSUPPORT; + + pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", + sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); +@@ -716,7 +725,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m + if (msg->msg_namelen < sizeof(*usin)) + return -EINVAL; + if (usin->sin_family != AF_INET) +- return -EINVAL; ++ return -EAFNOSUPPORT; + daddr = usin->sin_addr.s_addr; + /* no remote port */ + } else { +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 12f7ef0..d7907ec 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1294,7 +1294,8 @@ emsgsize: + if (((length > mtu) || + (skb && skb_is_gso(skb))) && + (sk->sk_protocol == IPPROTO_UDP) && +- (rt->dst.dev->features & NETIF_F_UFO)) { ++ (rt->dst.dev->features & NETIF_F_UFO) && ++ (sk->sk_type == SOCK_DGRAM)) { + err = ip6_ufo_append_data(sk, getfrag, from, length, + hh_len, fragheaderlen, + transhdrlen, mtu, flags, rt); +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c +index bda7429..4611995 100644 +--- a/net/ipv6/ping.c ++++ b/net/ipv6/ping.c +@@ -103,9 +103,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + + if (msg->msg_name) { + DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); +- if (msg->msg_namelen < sizeof(struct sockaddr_in6) || +- u->sin6_family != AF_INET6) { ++ if (msg->msg_namelen < sizeof(*u)) + return -EINVAL; ++ if (u->sin6_family != AF_INET6) { ++ return -EAFNOSUPPORT; + } + if (sk->sk_bound_dev_if && + sk->sk_bound_dev_if != u->sin6_scope_id) { +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 6f1b850..3809ca2 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) + u32 *p = NULL; + + if (!(rt->dst.flags & DST_HOST)) +- return NULL; ++ return dst_cow_metrics_generic(dst, old); + + peer = rt6_get_peer_create(rt); + if (peer) { +diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c +index 2ba8b97..fdcb968 100644 +--- a/net/irda/ircomm/ircomm_tty.c ++++ b/net/irda/ircomm/ircomm_tty.c +@@ -818,7 +818,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) + orig_jiffies = jiffies; + + /* Set poll time to 200 ms */ +- poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200)); ++ poll_time = msecs_to_jiffies(200); ++ if (timeout) ++ poll_time = min_t(unsigned long, timeout, poll_time); + + spin_lock_irqsave(&self->spinlock, flags); + while (self->tx_skb && self->tx_skb->len) { +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index e5a7ac2..dca076f 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -562,6 +562,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) + if (tx->sdata->control_port_no_encrypt) + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; ++ info->flags |= IEEE80211_TX_CTL_USE_MINRATE; + } + + return TX_CONTINUE; +diff --git a/net/sched/ematch.c b/net/sched/ematch.c +index 3a633de..a2abc44 100644 +--- a/net/sched/ematch.c ++++ b/net/sched/ematch.c +@@ -227,6 +227,7 @@ static int tcf_em_validate(struct tcf_proto *tp, + * to replay the request. + */ + module_put(em->ops->owner); ++ em->ops = NULL; + err = -EAGAIN; + } + #endif +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c +index ae333c1..0adc66c 100644 +--- a/net/sunrpc/cache.c ++++ b/net/sunrpc/cache.c +@@ -920,7 +920,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait, + poll_wait(filp, &queue_wait, wait); + + /* alway allow write */ +- mask = POLL_OUT | POLLWRNORM; ++ mask = POLLOUT | POLLWRNORM; + + if (!rp) + return mask; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 566b0f6..ee24057 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -1404,6 +1404,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) + if (! snd_pcm_playback_empty(substream)) { + snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); + snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); ++ } else { ++ runtime->status->state = SNDRV_PCM_STATE_SETUP; + } + break; + case SNDRV_PCM_STATE_RUNNING: +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 103e85a..2f3059b 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -3984,7 +3984,7 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, + /* Panther Point */ + { PCI_DEVICE(0x8086, 0x1e20), +- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, + /* Lynx Point */ + { PCI_DEVICE(0x8086, 0x8c20), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 12f28d7..231b264 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -85,6 +85,7 @@ enum { + STAC_ALIENWARE_M17X, + STAC_92HD89XX_HP_FRONT_JACK, + STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK, ++ STAC_92HD73XX_ASUS_MOBO, + STAC_92HD73XX_MODELS + }; + +@@ -1935,7 +1936,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = { + [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = { + .type = HDA_FIXUP_PINS, + .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs, +- } ++ }, ++ [STAC_92HD73XX_ASUS_MOBO] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ /* enable 5.1 and SPDIF out */ ++ { 0x0c, 0x01014411 }, ++ { 0x0d, 0x01014410 }, ++ { 0x0e, 0x01014412 }, ++ { 0x22, 0x014b1180 }, ++ { } ++ } ++ }, + }; + + static const struct hda_model_fixup stac92hd73xx_models[] = { +@@ -1947,6 +1959,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = { + { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" }, + { .id = STAC_DELL_EQ, .name = "dell-eq" }, + { .id = STAC_ALIENWARE_M17X, .name = "alienware" }, ++ { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" }, + {} + }; + +@@ -1999,6 +2012,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = { + "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, + "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10", ++ STAC_92HD73XX_ASUS_MOBO), + {} /* terminator */ + }; + +diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c +index 07b8b7b..81f6a75 100644 +--- a/sound/soc/omap/omap-pcm.c ++++ b/sound/soc/omap/omap-pcm.c +@@ -200,7 +200,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd) + struct snd_pcm *pcm = rtd->pcm; + int ret; + +- ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64)); ++ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + diff --git a/projects/imx6/patches/linux/linux-900-hide_tsc_error.patch b/projects/imx6/patches/linux/linux-900-hide_tsc_error.patch deleted file mode 100644 index 61b73b69b0..0000000000 --- a/projects/imx6/patches/linux/linux-900-hide_tsc_error.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff -uNr linux-3.6.4-orig/arch/x86/kernel/tsc.c linux-3.6.4-new/arch/x86/kernel/tsc.c ---- linux-3.6.4-orig/arch/x86/kernel/tsc.c 2012-11-03 14:19:55.000000000 +0100 -+++ linux-3.6.4-new/arch/x86/kernel/tsc.c 2012-11-03 14:23:05.000000000 +0100 -@@ -374,7 +374,7 @@ - goto success; - } - } -- pr_err("Fast TSC calibration failed\n"); -+ pr_info("Fast TSC calibration failed\n"); - return 0; - - success: