lkml.org 
[lkml]   [2015]   [Oct]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: [3.19.y-ckt stable] Linux 3.19.8-ckt9
Date
diff --git a/Makefile b/Makefile
index 34abe24..5666ae7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 8
-EXTRAVERSION = -ckt8
+EXTRAVERSION = -ckt9
NAME = Sedated Swine

# *DOCUMENTATION*
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c1785ee..8e78ed5 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -50,6 +50,14 @@ AS += -EL
LD += -EL
endif

+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
+
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index c792391..cbcbc8a 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -204,7 +204,7 @@

tfp410_pins: pinmux_tfp410_pins {
pinctrl-single,pins = <
- 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
+ 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
>;
};

diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 159720d..ec23e86 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -174,8 +174,8 @@

i2c5_pins: pinmux_i2c5_pins {
pinctrl-single,pins = <
- 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
- 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
+ 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
+ 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
>;
};

diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 07db2f8..0e59152 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -260,15 +260,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (err)
return err;

- patch_text((void *)bpt->bpt_addr,
- *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+ /* Machine is already stopped, so we can use __patch_text() directly */
+ __patch_text((void *)bpt->bpt_addr,
+ *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);

return err;
}

int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
- patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+ /* Machine is already stopped, so we can use __patch_text() directly */
+ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);

return 0;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 8aa6f1b..a0d42f9 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -354,12 +354,17 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
*/
thumb = handler & 1;

-#if __LINUX_ARM_ARCH__ >= 7
+#if __LINUX_ARM_ARCH__ >= 6
/*
- * Clear the If-Then Thumb-2 execution state
- * ARM spec requires this to be all 000s in ARM mode
- * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
- * signal transition without this.
+ * Clear the If-Then Thumb-2 execution state. ARM spec
+ * requires this to be all 000s in ARM mode. Snapdragon
+ * S4/Krait misbehaves on a Thumb=>ARM signal transition
+ * without this.
+ *
+ * We must do this whenever we are running on a Thumb-2
+ * capable CPU, which includes ARMv6T2. However, we elect
+ * to do this whenever we're on an ARMv6 or later CPU for
+ * simplicity.
*/
cpsr &= ~PSR_IT_MASK;
#endif
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 3ec9687..6391fa6 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -428,7 +428,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the first
* time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
+ if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index f6f1481..1fb756e 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -523,8 +523,7 @@ ARM_BE8(rev r6, r6 )

mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
- bic r2, #1 @ Clear ENABLE
- mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
isb

mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
@@ -537,6 +536,9 @@ ARM_BE8(rev r6, r6 )
mcrr p15, 4, r2, r2, c14 @ CNTVOFF

1:
+ mov r2, #0 @ Clear ENABLE
+ mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
+
#endif
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 9ec6dfe..0512ed4 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1439,8 +1439,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = mem->guest_phys_addr +
(vm_start - mem->userspace_addr);
- phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
- vm_start - vma->vm_start;
+ phys_addr_t pa;
+
+ pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ pa += vm_start - vma->vm_start;

ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index b61c049..39cdeb0 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -20,13 +20,6 @@

#include "common.h"

-#if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
-#define intc_of_init NULL
-#endif
-#ifndef CONFIG_ARCH_OMAP4
-#define gic_of_init NULL
-#endif
-
static const struct of_device_id omap_dt_match_table[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "ti,omap-infra", },
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5fee0bd..67f5ba5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -331,6 +331,22 @@ config ARM64_ERRATUM_845719

If unsure, say Y.

+config ARM64_ERRATUM_843419
+ bool "Cortex-A53: 843419: A load or store might access an incorrect address"
+ depends on MODULES
+ default y
+ help
+ This option builds kernel modules using the large memory model in
+ order to avoid the use of the ADRP instruction, which can cause
+ a subsequent memory access to use an incorrect address on Cortex-A53
+ parts up to r0p4.
+
+ Note that the kernel itself must be linked with a version of ld
+ which fixes potentially affected ADRP instructions through the
+ use of veneers.
+
+ If unsure, say Y.
+
endmenu


diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 0666888..661928f 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -32,6 +32,10 @@ endif

CHECKFLAGS += -D__aarch64__

+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+CFLAGS_MODULE += -mcmodel=large
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 98af7da..f985fc5 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -545,6 +545,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr hstr_el2, xzr // Disable CP15 traps to EL2
#endif

+ /* EL2 debug */
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+ msr mdcr_el2, x0 // all PMU counters from EL1
+
/* Stage-2 translation */
msr vttbr_el2, xzr

diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 9b6f71d..4223b0a 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
AARCH64_INSN_IMM_ADR);
break;
+#ifndef CONFIG_ARM64_ERRATUM_843419
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
AARCH64_INSN_IMM_ADR);
break;
+#endif
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 6ae9340..7ab6dec 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -213,14 +213,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)

/*
* VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
*/
+union __fpsimd_vreg {
+ __uint128_t raw;
+ struct {
+#ifdef __AARCH64EB__
+ u64 hi;
+ u64 lo;
+#else
+ u64 lo;
+ u64 hi;
+#endif
+ };
+};
+
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{
struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc;
- int err = 0;
+ int i, err = 0;

/*
* Save the hardware registers to the fpsimd_state structure.
@@ -236,10 +254,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
/*
* Now copy the FP registers. Since the registers are packed,
* we can copy the prefix we want (V0-V15) as it is.
- * FIXME: Won't work if big endian.
*/
- err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
- sizeof(frame->ufp.fpregs));
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg = {
+ .raw = fpsimd->vregs[i >> 1],
+ };
+
+ __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ }

/* Create an AArch32 fpscr from the fpsr and the fpcr. */
fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
@@ -264,7 +287,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr;
- int err = 0;
+ int i, err = 0;

__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
@@ -274,12 +297,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;

- /*
- * Copy the FP registers into the start of the fpsimd_state.
- * FIXME: Won't work if big endian.
- */
- err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
- sizeof(frame->ufp.fpregs));
+ /* Copy the FP registers into the start of the fpsimd_state. */
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg;
+
+ __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ fpsimd.vregs[i >> 1] = vreg.raw;
+ }

/* Extract the fpsr and the fpcr from the fpscr */
__get_user_error(fpscr, &frame->ufp.fpscr, err);
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index c3ca89c..0a2ff0f 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -843,8 +843,6 @@
mrs x3, cntv_ctl_el0
and x3, x3, #3
str w3, [x0, #VCPU_TIMER_CNTV_CTL]
- bic x3, x3, #1 // Clear Enable
- msr cntv_ctl_el0, x3

isb

@@ -852,6 +850,9 @@
str x3, [x0, #VCPU_TIMER_CNTV_CVAL]

1:
+ // Disable the virtual timer
+ msr cntv_ctl_el0, xzr
+
// Allow physical timer/counter access for the host
mrs x2, cnthctl_el2
orr x2, x2, #3
@@ -946,13 +947,15 @@ ENTRY(__kvm_vcpu_run)
// Guest context
add x2, x0, #VCPU_CONTEXT

+ // We must restore the 32-bit state before the sysregs, thanks
+ // to Cortex-A57 erratum #852523.
+ restore_guest_32bit_state
bl __restore_sysregs
bl __restore_fpsimd

skip_debug_state x3, 1f
bl __restore_debug
1:
- restore_guest_32bit_state
restore_guest_regs

// That's it, no more messing around.
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8a5bc1c..981e607 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -28,6 +28,9 @@ BOOTCFLAGS += -m64
endif
ifdef CONFIG_CPU_BIG_ENDIAN
BOOTCFLAGS += -mbig-endian
+else
+BOOTCFLAGS += -mlittle-endian
+BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
endif

BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 8668651..79cd39f 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
BUG_ON(index >= 4096);

vpn = hpt_vpn(ea, vsid, ssize);
- hash = hpt_hash(vpn, shift, ssize);
hpte_slot_array = get_hpte_slot_array(pmdp);
if (psize == MMU_PAGE_4K) {
/*
@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
valid = hpte_valid(hpte_slot_array, index);
if (valid) {
/* update the hpte bits */
+ hash = hpt_hash(vpn, shift, ssize);
hidx = hpte_hash_index(hpte_slot_array, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
if (!valid) {
unsigned long hpte_group;

+ hash = hpt_hash(vpn, shift, ssize);
/* insert new entry */
pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
new_pmd |= _PAGE_HASHPTE;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 3948b8a..d35ec84 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -99,6 +99,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
struct msi_desc *entry;
+ irq_hw_number_t hwirq;

if (WARN_ON(!phb))
return;
@@ -106,10 +107,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&phb->msi_bmp,
- virq_to_hw(entry->irq) - phb->msi_base, 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
}
}
#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 4bbb4b8..fd9fa2e 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
struct fsl_msi *msi_data;
+ irq_hw_number_t hwirq;

list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}

return;
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index a3f660e..9e1da94 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -65,6 +65,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;

pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);

@@ -72,10 +73,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;

+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), ALLOC_CHUNK);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
}

return;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index b2cef18..13a34b2 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;

list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;

+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
}

return;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 6e2e6aa..02a137d 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+ irq_hw_number_t hwirq;

dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");

list_for_each_entry(entry, &dev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
}

diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 34d5fa7..f564740 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -48,6 +48,19 @@ typedef struct
struct ucontext32 uc;
} rt_sigframe32;

+static inline void sigset_to_sigset32(unsigned long *set64,
+ compat_sigset_word *set32)
+{
+ set32[0] = (compat_sigset_word) set64[0];
+ set32[1] = (compat_sigset_word)(set64[0] >> 32);
+}
+
+static inline void sigset32_to_sigset(compat_sigset_word *set32,
+ unsigned long *set64)
+{
+ set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
+}
+
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -303,10 +316,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
+ compat_sigset_t cset;
sigset_t set;

- if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+ if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe;
+ sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
@@ -323,10 +338,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
+ compat_sigset_t cset;
sigset_t set;

- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
goto badframe;
+ sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
@@ -407,7 +424,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
return -EFAULT;

/* Create struct sigcontext32 on the signal stack */
- memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
+ sigset_to_sigset32(set->sig, sc.oldmask);
sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
return -EFAULT;
@@ -468,6 +485,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
+ compat_sigset_t cset;
rt_sigframe32 __user *frame;
unsigned long restorer;
size_t frame_size;
@@ -515,11 +533,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
store_sigregs();

/* Create ucontext on the signal stack. */
+ sigset_to_sigset32(set->sig, cset.sig);
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs32(regs, &frame->uc.uc_mcontext) ||
- __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
+ __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;

diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index c8aa65d..9e49356 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -275,6 +275,7 @@
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
#define MSR_K8_TSEG_ADDR 0xc0010112
+#define MSR_K8_TSEG_MASK 0xc0010113
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 29b5b18..c803cda 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -366,6 +366,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
apic_write(APIC_LVTT, lvtt_value);

if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
+ /*
+ * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
+ * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
+ * According to Intel, MFENCE can do the serialization here.
+ */
+ asm volatile("mfence" : : : "memory");
+
printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
return;
}
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 138e7af..9072010 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1441,7 +1441,18 @@ END(error_exit)
/* Runs on exception stack */
ENTRY(nmi)
INTR_FRAME
+ /*
+ * Fix up the exception frame if we're on Xen.
+ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
+ * one value to the stack on native, so it may clobber the rdx
+ * scratch slot, but it won't clobber any of the important
+ * slots past it.
+ *
+ * Xen is a different story, because the Xen frame itself overlaps
+ * the "NMI executing" variable.
+ */
PARAVIRT_ADJUST_EXCEPTION_FRAME
+
/*
* We allow breakpoints in NMIs. If a breakpoint occurs, then
* the iretq it performs will take us out of NMI context.
@@ -1493,9 +1504,12 @@ ENTRY(nmi)
* we don't want to enable interrupts, because then we'll end
* up in an awkward situation in which IRQs are on but NMIs
* are off.
+ *
+ * We also must not push anything to the stack before switching
+ * stacks lest we corrupt the "NMI executing" variable.
*/

- SWAPGS
+ SWAPGS_UNSAFE_STACK
cld
movq %rsp, %rdx
movq PER_CPU_VAR(kernel_stack), %rsp
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 548d25f..8d12f05 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -41,10 +41,18 @@
#include <asm/timer.h>
#include <asm/special_insns.h>

-/* nop stub */
-void _paravirt_nop(void)
-{
-}
+/*
+ * nop stub, which must not clobber anything *including the stack* to
+ * avoid confusing the entry prologues.
+ */
+extern void _paravirt_nop(void);
+asm (".pushsection .entry.text, \"ax\"\n"
+ ".global _paravirt_nop\n"
+ "_paravirt_nop:\n\t"
+ "ret\n\t"
+ ".size _paravirt_nop, . - _paravirt_nop\n\t"
+ ".type _paravirt_nop, @function\n\t"
+ ".popsection");

/* identity function, which can be inlined */
u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 5054497..21187eb 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -21,6 +21,7 @@
#include <asm/hypervisor.h>
#include <asm/nmi.h>
#include <asm/x86_init.h>
+#include <asm/geode.h>

unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -1004,15 +1005,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);

static void __init check_system_tsc_reliable(void)
{
-#ifdef CONFIG_MGEODE_LX
- /* RTSC counts during suspend */
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
+ if (is_geode_lx()) {
+ /* RTSC counts during suspend */
#define RTSC_SUSP 0x100
- unsigned long res_low, res_high;
+ unsigned long res_low, res_high;

- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
- /* Geode_LX - the OLPC CPU has a very reliable TSC */
- if (res_low & RTSC_SUSP)
- tsc_clocksource_reliable = 1;
+ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+ /* Geode_LX - the OLPC CPU has a very reliable TSC */
+ if (res_low & RTSC_SUSP)
+ tsc_clocksource_reliable = 1;
+ }
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bde6bd1..ba2ba0f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5918,6 +5918,8 @@ static __init int hardware_setup(void)
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);

+ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 57d5915..145832a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2469,6 +2469,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
case MSR_K8_SYSCFG:
+ case MSR_K8_TSEG_ADDR:
+ case MSR_K8_TSEG_MASK:
case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_K7_EVNTSEL0:
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index f1ff39a..54d946a 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -325,12 +325,14 @@ void zcomp_destroy(struct zcomp *comp)
* allocate new zcomp and initialize it. return compressing
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
- * case of allocation error.
+ * case of allocation error, or any other error potentially
+ * returned by functions zcomp_strm_{multi,single}_create.
*/
struct zcomp *zcomp_create(const char *compress, int max_strm)
{
struct zcomp *comp;
struct zcomp_backend *backend;
+ int error;

backend = find_backend(compress);
if (!backend)
@@ -342,12 +344,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)

comp->backend = backend;
if (max_strm > 1)
- zcomp_strm_multi_create(comp, max_strm);
+ error = zcomp_strm_multi_create(comp, max_strm);
else
- zcomp_strm_single_create(comp);
- if (!comp->stream) {
+ error = zcomp_strm_single_create(comp);
+ if (error) {
kfree(comp);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
}
return comp;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index a4bd90f..d96b152 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -41,7 +41,7 @@ find_section(struct bdb_header *bdb, int section_id)
{
u8 *base = (u8 *)bdb;
int index = 0;
- u16 total, current_size;
+ u32 total, current_size;
u8 current_id;

/* skip to first section */
@@ -56,6 +56,10 @@ find_section(struct bdb_header *bdb, int section_id)
current_size = *((u16 *)(base + index));
index += 2;

+ /* The MIPI Sequence Block v3+ has a separate size field. */
+ if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
+ current_size = *((const u32 *)(base + index + 1));
+
if (index + current_size > total)
return NULL;

@@ -794,6 +798,12 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
return;
}

+ /* Fail gracefully for forward incompatible sequence block. */
+ if (sequence->version >= 3) {
+ DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
+ return;
+ }
+
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");

block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 3224879..42b3dbb 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = ddev->dev_private;
- int connected;
+ bool connected = false;

/* The first monitor is always connected */
- connected = (output->index == 0) ||
- (qdev->client_monitors_config &&
- qdev->client_monitors_config->count > output->index &&
- qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
+ if (!qdev->client_monitors_config) {
+ if (output->index == 0)
+ connected = true;
+ } else
+ connected = qdev->client_monitors_config->count > output->index &&
+ qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);

DRM_DEBUG("#%d connected: %d\n", output->index, connected);
if (!connected)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d26a6da..d8896ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -636,7 +636,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size,
bool shareable,
uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf);
+ struct vmw_dma_buffer **p_dma_buf,
+ struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf,
uint32_t *handle);
@@ -650,7 +651,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_dma_buffer **out);
+ uint32_t id, struct vmw_dma_buffer **out,
+ struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2711b09..01b8423 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -887,7 +887,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;

- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
+ NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
@@ -948,7 +949,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;

- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
+ NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 87e39f6..e189898 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}

- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
if (ret)
goto out_unlock;

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 210ef15..c5b4c47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -356,7 +356,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}

*out_surf = NULL;
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
return ret;
}

@@ -483,7 +483,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size,
bool shareable,
uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf)
+ struct vmw_dma_buffer **p_dma_buf,
+ struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp;
@@ -517,6 +518,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
}

*p_dma_buf = &user_bo->dma;
+ if (p_base) {
+ *p_base = &user_bo->prime.base;
+ kref_get(&(*p_base)->refcount);
+ }
*handle = user_bo->prime.base.hash.key;

out_no_base_object:
@@ -633,6 +638,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_base_object *buffer_base;
int ret;

if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -645,7 +651,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,

switch (arg->op) {
case drm_vmw_synccpu_grab:
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
+ &buffer_base);
if (unlikely(ret != 0))
return ret;

@@ -653,6 +660,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf);
+ ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -694,7 +702,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
return ret;

ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- req->size, false, &handle, &dma_buf);
+ req->size, false, &handle, &dma_buf,
+ NULL);
if (unlikely(ret != 0))
goto out_no_dmabuf;

@@ -723,7 +732,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
}

int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_dma_buffer **out)
+ uint32_t handle, struct vmw_dma_buffer **out,
+ struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base;
@@ -745,7 +755,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
- ttm_base_object_unref(&base);
+ if (p_base)
+ *p_base = base;
+ else
+ ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;

return 0;
@@ -1006,7 +1019,7 @@ int vmw_dumb_create(struct drm_file *file_priv,

ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle,
- &dma_buf);
+ &dma_buf, NULL);
if (unlikely(ret != 0))
goto out_no_dmabuf;

@@ -1034,7 +1047,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
struct vmw_dma_buffer *out_buf;
int ret;

- ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
+ ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0)
return -EINVAL;

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6a4584a..d2751ad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -470,7 +470,7 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,

if (arg->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
- &buffer);
+ &buffer, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader "
"creation.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 4ecdbf3..17a4107 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -43,6 +43,7 @@ struct vmw_user_surface {
struct vmw_surface srf;
uint32_t size;
struct drm_master *master;
+ struct ttm_base_object *backup_base;
};

/**
@@ -652,6 +653,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res;

*p_base = NULL;
+ if (user_srf->backup_base)
+ ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res);
}

@@ -846,7 +849,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
res->backup_size,
true,
&backup_handle,
- &res->backup);
+ &res->backup,
+ &user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -1309,7 +1313,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,

if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
- &res->backup);
+ &res->backup,
+ &user_srf->backup_base);
} else if (req->drm_surface_flags &
drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1317,7 +1322,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
&backup_handle,
- &res->backup);
+ &res->backup,
+ &user_srf->backup_base);

if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 0773930..ff80b83 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -353,6 +353,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]

/* NCT6776 specific data */

+/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
+#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
+#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
+
static const s8 NCT6776_ALARM_BITS[] = {
0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
@@ -3506,8 +3510,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3578,8 +3582,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3655,8 +3659,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index b7350d5..46563f1 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2918,9 +2918,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
static int
isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
- int ret;
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ int ret = 0;

switch (state) {
+ case ISTATE_REMOVE:
+ spin_lock_bh(&conn->cmd_lock);
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+ isert_put_cmd(isert_cmd, true);
+ break;
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
ret = isert_put_nopin(cmd, conn, false);
break;
@@ -3287,6 +3294,41 @@ isert_wait4flush(struct isert_conn *isert_conn)
wait_for_completion(&isert_conn->conn_wait_comp_err);
}

+/**
+ * isert_put_unsol_pending_cmds() - Drop commands waiting for
+ * unsolicitate dataout
+ * @conn: iscsi connection
+ *
+ * We might still have commands that are waiting for unsolicited
+ * dataouts messages. We must put the extra reference on those
+ * before blocking on the target_wait_for_session_cmds
+ */
+static void
+isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd, *tmp;
+ static LIST_HEAD(drop_cmd_list);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
+ if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
+ (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
+ (cmd->write_data_done < cmd->se_cmd.data_length))
+ list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
+ list_del_init(&cmd->i_conn_node);
+ if (cmd->i_state != ISTATE_REMOVE) {
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+
+ isert_info("conn %p dropping cmd %p\n", conn, cmd);
+ isert_put_cmd(isert_cmd, true);
+ }
+ }
+}
+
static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
@@ -3305,8 +3347,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->conn_mutex);

- isert_wait4cmds(conn);
isert_wait4flush(isert_conn);
+ isert_put_unsol_pending_cmds(conn);
+ isert_wait4cmds(conn);
isert_wait4logout(isert_conn);

queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index a2e8c3f..c2c578f 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d)
{
struct irq_domain *domain = d->domain;
struct irq_domain_chip_generic *dgc = domain->gc;
- struct irq_chip_generic *gc = dgc->gc[0];
+ struct irq_chip_generic *bgc = dgc->gc[0];
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);

- /* Disable interrupt on AIC5 */
- irq_gc_lock(gc);
+ /*
+ * Disable interrupt on AIC5. We always take the lock of the
+ * first irq chip as all chips share the same registers.
+ */
+ irq_gc_lock(bgc);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
gc->mask_cache &= ~d->mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(bgc);
}

static void aic5_unmask(struct irq_data *d)
{
struct irq_domain *domain = d->domain;
struct irq_domain_chip_generic *dgc = domain->gc;
- struct irq_chip_generic *gc = dgc->gc[0];
+ struct irq_chip_generic *bgc = dgc->gc[0];
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);

- /* Enable interrupt on AIC5 */
- irq_gc_lock(gc);
+ /*
+ * Enable interrupt on AIC5. We always take the lock of the
+ * first irq chip as all chips share the same registers.
+ */
+ irq_gc_lock(bgc);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IECR);
gc->mask_cache |= d->mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(bgc);
}

static int aic5_retrigger(struct irq_data *d)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index eee4fd6..cc55691 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -987,8 +987,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
int slice;
int rc;

- pci_dev_get(dev);
-
if (cxl_verbose)
dump_cxl_config_space(dev);

diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c..f2aa89b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -501,8 +501,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
if (rxcomplete >= budget || txcomplete > 0)
return rxcomplete;

- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_complete(napi);

netdev_dbg(priv->dev,
"NAPI Complete, did %d packets with budget %d\n",
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fab4757..f8a081a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1452,6 +1452,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
struct sk_buff *skb;
unsigned char *data;
+ dma_addr_t phys_addr;
u32 rx_status;
int rx_bytes, err;

@@ -1459,6 +1460,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
data = (unsigned char *)rx_desc->buf_cookie;
+ phys_addr = rx_desc->buf_phys_addr;

if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
@@ -1507,7 +1509,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
if (!skb)
goto err_drop_frame;

- dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
+ dma_unmap_single(dev->dev.parent, phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);

rcvd_pkts++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7f16627..37cdc34 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1248,8 +1248,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
memcpy(rss_context->rss_key, priv->rss_key,
MLX4_EN_RSS_KEY_SIZE);
- netdev_rss_key_fill(rss_context->rss_key,
- MLX4_EN_RSS_KEY_SIZE);
} else {
en_err(priv, "Unknown RSS hash function requested\n");
err = -EINVAL;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 4d050ee..84e835e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1054,10 +1054,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return 0;

case TUNSETSNDBUF:
- if (get_user(u, up))
+ if (get_user(s, sp))
return -EFAULT;

- q->sk.sk_sndbuf = u;
+ q->sk.sk_sndbuf = s;
return 0;

case TUNGETVNETHDRSZ:
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index e7ed251..7a59893 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -779,7 +779,7 @@ int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
- int retval, pm;
+ int retval, pm, mpn;

clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@@ -810,6 +810,8 @@ int usbnet_stop (struct net_device *net)

usbnet_purge_paused_rxq(dev);

+ mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+
/* deferred work (task, timer, softirq) must also stop.
* can't flush_scheduled_work() until we drop rtnl (later),
* else workers could deadlock; so make workers a NOP.
@@ -820,8 +822,7 @@ int usbnet_stop (struct net_device *net)
if (!pm)
usb_autopm_put_interface(dev->intf);

- if (info->manage_power &&
- !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+ if (info->manage_power && mpn)
info->manage_power(dev, 0);
else
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6c83846..d1494f7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2180,10 +2180,6 @@ static void vxlan_setup(struct net_device *dev)

eth_hw_addr_random(dev);
ether_setup(dev);
- if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
- dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
- else
- dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;

dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = free_netdev;
@@ -2504,8 +2500,12 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,

dev->needed_headroom = lowerdev->hard_header_len +
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- } else if (use_ipv6)
+ } else if (use_ipv6) {
vxlan->flags |= VXLAN_F_IPV6;
+ dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
+ } else {
+ dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
+ }

if (data[IFLA_VXLAN_TOS])
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 7f249b9..4ca9945 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -355,7 +355,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
ssize_t ret;

if (!tdev)
@@ -369,7 +370,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
ssize_t ret;

if (!tdev)
@@ -386,22 +388,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
.release = pci_vpd_pci22_release,
};

-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
-{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
- int ret = 0;
-
- if (!tdev)
- return -ENODEV;
- if (!tdev->vpd || !tdev->multifunction ||
- dev->class != tdev->class || dev->vendor != tdev->vendor ||
- dev->device != tdev->device)
- ret = -ENODEV;
-
- pci_dev_put(tdev);
- return ret;
-}
-
int pci_vpd_pci22_init(struct pci_dev *dev)
{
struct pci_vpd_pci22 *vpd;
@@ -410,12 +396,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
if (!cap)
return -ENODEV;
- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
- int ret = pci_vpd_f0_dev_check(dev);

- if (ret)
- return ret;
- }
vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
if (!vpd)
return -ENOMEM;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 57ab10a..33fa6bd 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1915,11 +1915,27 @@ static void quirk_netmos(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);

+/*
+ * Quirk non-zero PCI functions to route VPD access through function 0 for
+ * devices that share VPD resources between functions. The functions are
+ * expected to be identical devices.
+ */
static void quirk_f0_vpd_link(struct pci_dev *dev)
{
- if (!dev->multifunction || !PCI_FUNC(dev->devfn))
+ struct pci_dev *f0;
+
+ if (!PCI_FUNC(dev->devfn))
return;
- dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+ f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ if (!f0)
+ return;
+
+ if (f0->vpd && dev->class == f0->class &&
+ dev->vendor == f0->vendor && dev->device == f0->device)
+ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+ pci_dev_put(f0);
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 0ab2b37..2de5af9 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_BIOS_QUERY 0x9
+#define HPWMI_FEATURE_QUERY 0xb
#define HPWMI_HOTKEY_QUERY 0xc
-#define HPWMI_FEATURE_QUERY 0xd
+#define HPWMI_FEATURE2_QUERY 0xd
#define HPWMI_WIRELESS2_QUERY 0x1b
#define HPWMI_POSTCODEERROR_QUERY 0x2a

@@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void)
return (state & 0x4) ? 1 : 0;
}

-static int __init hp_wmi_bios_2009_later(void)
+static int __init hp_wmi_bios_2008_later(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
sizeof(state), sizeof(state));
- if (ret)
- return ret;
+ if (!ret)
+ return 1;

- return (state & 0x10) ? 1 : 0;
+ return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
}

-static int hp_wmi_enable_hotkeys(void)
+static int __init hp_wmi_bios_2009_later(void)
{
- int ret;
- int query = 0x6e;
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (!ret)
+ return 1;

- ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
- 0);
+ return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
+}

+static int __init hp_wmi_enable_hotkeys(void)
+{
+ int value = 0x6e;
+ int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
+ sizeof(value), 0);
if (ret)
return -EINVAL;
return 0;
@@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);

- if (hp_wmi_bios_2009_later() == 4)
+ if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
hp_wmi_enable_hotkeys();

status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 23822e7..1941a72 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -710,6 +710,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
if (!(sccr1_reg & SSCR1_TIE))
mask &= ~SSSR_TFS;

+ /* Ignore RX timeout interrupt if it is disabled */
+ if (!(sccr1_reg & SSCR1_TINTE))
+ mask &= ~SSSR_TINT;
+
if (!(status & mask))
return IRQ_NONE;

diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 2e32ea2..be6155c 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -34,13 +34,13 @@ struct xtfpga_spi {
static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
unsigned addr, u32 val)
{
- iowrite32(val, spi->regs + addr);
+ __raw_writel(val, spi->regs + addr);
}

static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
unsigned addr)
{
- return ioread32(spi->regs + addr);
+ return __raw_readl(spi->regs + addr);
}

static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index bfa47d5..2ebe805 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1492,8 +1492,7 @@ static struct class spi_master_class = {
*
* The caller is responsible for assigning the bus number and initializing
* the master's methods before calling spi_register_master(); and (after errors
- * adding the device) calling spi_master_put() and kfree() to prevent a memory
- * leak.
+ * adding the device) calling spi_master_put() to prevent a memory leak.
*/
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
{
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5c85435..e0bec45 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -579,7 +579,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;

- spidev->speed_hz = spidev->spi->max_speed_hz;
+ if (spidev->spi)
+ spidev->speed_hz = spidev->spi->max_speed_hz;

/* ... after we unbound from the underlying device? */
spin_lock_irq(&spidev->spi_lock);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 296d347..9ac97fa 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1174,13 +1174,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
mutex_unlock(&client->lock);
goto end;
}
- mutex_unlock(&client->lock);

handle = ion_handle_create(client, buffer);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
goto end;
+ }

- mutex_lock(&client->lock);
ret = ion_handle_add(client, handle);
mutex_unlock(&client->lock);
if (ret) {
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 7c14565..bd986ce 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -638,6 +638,44 @@ __acquires(hwep->lock)
return 0;
}

+static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
+{
+ struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ if (ep == NULL || hwep->ep.desc == NULL)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_isoc(hwep->ep.desc))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(hwep->lock, flags);
+
+ if (value && hwep->dir == TX && check_transfer &&
+ !list_empty(&hwep->qh.queue) &&
+ !usb_endpoint_xfer_control(hwep->ep.desc)) {
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return -EAGAIN;
+ }
+
+ direction = hwep->dir;
+ do {
+ retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
+
+ if (!value)
+ hwep->wedge = 0;
+
+ if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+ hwep->dir = (hwep->dir == TX) ? RX : TX;
+
+ } while (hwep->dir != direction);
+
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return retval;
+}
+
+
/**
* _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
* @gadget: gadget
@@ -1033,7 +1071,7 @@ __acquires(ci->lock)
num += ci->hw_ep_max / 2;

spin_unlock(&ci->lock);
- err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
+ err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
spin_lock(&ci->lock);
if (!err)
isr_setup_status_phase(ci);
@@ -1092,8 +1130,8 @@ delegate:

if (err < 0) {
spin_unlock(&ci->lock);
- if (usb_ep_set_halt(&hwep->ep))
- dev_err(ci->dev, "error: ep_set_halt\n");
+ if (_ep_set_halt(&hwep->ep, 1, false))
+ dev_err(ci->dev, "error: _ep_set_halt\n");
spin_lock(&ci->lock);
}
}
@@ -1124,9 +1162,9 @@ __acquires(ci->lock)
err = isr_setup_status_phase(ci);
if (err < 0) {
spin_unlock(&ci->lock);
- if (usb_ep_set_halt(&hwep->ep))
+ if (_ep_set_halt(&hwep->ep, 1, false))
dev_err(ci->dev,
- "error: ep_set_halt\n");
+ "error: _ep_set_halt\n");
spin_lock(&ci->lock);
}
}
@@ -1369,41 +1407,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
*/
static int ep_set_halt(struct usb_ep *ep, int value)
{
- struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
- int direction, retval = 0;
- unsigned long flags;
-
- if (ep == NULL || hwep->ep.desc == NULL)
- return -EINVAL;
-
- if (usb_endpoint_xfer_isoc(hwep->ep.desc))
- return -EOPNOTSUPP;
-
- spin_lock_irqsave(hwep->lock, flags);
-
-#ifndef STALL_IN
- /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
- if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
- !list_empty(&hwep->qh.queue)) {
- spin_unlock_irqrestore(hwep->lock, flags);
- return -EAGAIN;
- }
-#endif
-
- direction = hwep->dir;
- do {
- retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
-
- if (!value)
- hwep->wedge = 0;
-
- if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
- hwep->dir = (hwep->dir == TX) ? RX : TX;
-
- } while (hwep->dir != direction);
-
- spin_unlock_irqrestore(hwep->lock, flags);
- return retval;
+ return _ep_set_halt(ep, value, true);
}

/**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b2a540b..b9ddf0c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bmAttributes = 16;
} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
- desc->bmAttributes > 2) {
+ USB_SS_MULT(desc->bmAttributes) > 3) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 3\n", desc->bmAttributes + 1,
@@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
}

if (usb_endpoint_xfer_isoc(&ep->desc))
- max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+ max_tx = (desc->bMaxBurst + 1) *
+ (USB_SS_MULT(desc->bmAttributes)) *
usb_endpoint_maxp(&ep->desc);
else if (usb_endpoint_xfer_int(&ep->desc))
max_tx = usb_endpoint_maxp(&ep->desc) *
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d44c904..4d45551 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -833,9 +833,8 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
- init_timer(&ep->stop_cmd_timer);
- ep->stop_cmd_timer.data = (unsigned long) ep;
- ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
+ setup_timer(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
+ (unsigned long)ep);
ep->xhci = xhci;
}

@@ -1502,10 +1501,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
* use Event Data TRBs, and we don't chain in a link TRB on short
* transfers, we're basically dividing by 1.
*
- * xHCI 1.0 specification indicates that the Average TRB Length should
- * be set to 8 for control endpoints.
+ * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
+ * should be set to 8 for control endpoints.
*/
- if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
+ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
else
ep_ctx->tx_info |=
@@ -1796,8 +1795,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
int size;
int i, j, num_ports;

- if (timer_pending(&xhci->cmd_timer))
- del_timer_sync(&xhci->cmd_timer);
+ del_timer_sync(&xhci->cmd_timer);

/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -2325,6 +2323,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)

INIT_LIST_HEAD(&xhci->cmd_list);

+ /* init command timeout timer */
+ setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
+ (unsigned long)xhci);
+
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size register = 0x%x", page_size);
@@ -2509,11 +2511,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
"Wrote ERST address to ir_set 0.");
xhci_print_ir_set(xhci, 0);

- /* init command timeout timer */
- init_timer(&xhci->cmd_timer);
- xhci->cmd_timer.data = (unsigned long) xhci;
- xhci->cmd_timer.function = xhci_handle_command_timeout;
-
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 95c340c..06c247e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
if (ret < 0) {
+ /* we are about to kill xhci, give it one more chance */
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
+ udelay(1000);
+ ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
+ CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
+ if (ret == 0)
+ return 0;
+
xhci_err(xhci, "Stopped the command ring failed, "
"maybe the host is dead\n");
xhci->xhc_state |= XHCI_STATE_DYING;
@@ -3385,8 +3394,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (start_cycle == 0)
field |= 0x1;

- /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
- if (xhci->hci_version == 0x100) {
+ /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
+ if (xhci->hci_version >= 0x100) {
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f2194a0..0612a5b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~XHCI_STATE_HALTED;
+ xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+
return ret;
}

@@ -686,8 +687,11 @@ void xhci_stop(struct usb_hcd *hcd)
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);

+ mutex_lock(&xhci->mutex);
+
if (!usb_hcd_is_primary_hcd(hcd)) {
xhci_only_stop_hcd(xhci->shared_hcd);
+ mutex_unlock(&xhci->mutex);
return;
}

@@ -726,6 +730,7 @@ void xhci_stop(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_stop completed - status = %x",
readl(&xhci->op_regs->status));
+ mutex_unlock(&xhci->mutex);
}

/*
@@ -3794,6 +3799,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,

mutex_lock(&xhci->mutex);

+ if (xhci->xhc_state) /* dying or halted */
+ goto out;
+
if (!udev->slot_id) {
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Bad Slot ID %d", udev->slot_id);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 463feb8..17d04d9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_MF622 0x0001
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
+#define ZTE_PRODUCT_ZM8620_X 0x0396
+#define ZTE_PRODUCT_ME3620_MBIM 0x0426
+#define ZTE_PRODUCT_ME3620_X 0x1432
+#define ZTE_PRODUCT_ME3620_L 0x1433
#define ZTE_PRODUCT_AC2726 0xfff1
#define ZTE_PRODUCT_MG880 0xfffd
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
@@ -552,6 +556,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
.sendsetup = BIT(1) | BIT(2) | BIT(3),
};

+static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
+ .reserved = BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_me3620_xl_blacklist = {
+ .reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
+static const struct option_blacklist_info zte_zm8620_x_blacklist = {
+ .reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
static const struct option_blacklist_info huawei_cdc12_blacklist = {
.reserved = BIT(1) | BIT(2),
};
@@ -1599,6 +1615,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
+ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
+ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
+ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
+ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 790dbae..b5e5070 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2794,7 +2794,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
bio_end_io_t end_io_func,
int mirror_num,
unsigned long prev_bio_flags,
- unsigned long bio_flags)
+ unsigned long bio_flags,
+ bool force_bio_submit)
{
int ret = 0;
struct bio *bio;
@@ -2812,6 +2813,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
contig = bio_end_sector(bio) == sector;

if (prev_bio_flags != bio_flags || !contig ||
+ force_bio_submit ||
merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
bio_add_page(bio, page, page_size, offset) < page_size) {
ret = submit_one_bio(rw, bio, mirror_num,
@@ -2903,7 +2905,8 @@ static int __do_readpage(struct extent_io_tree *tree,
get_extent_t *get_extent,
struct extent_map **em_cached,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags, int rw)
+ unsigned long *bio_flags, int rw,
+ u64 *prev_em_start)
{
struct inode *inode = page->mapping->host;
u64 start = page_offset(page);
@@ -2951,6 +2954,7 @@ static int __do_readpage(struct extent_io_tree *tree,
}
while (cur <= end) {
unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+ bool force_bio_submit = false;

if (cur >= last_byte) {
char *userpage;
@@ -3001,6 +3005,49 @@ static int __do_readpage(struct extent_io_tree *tree,
block_start = em->block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
block_start = EXTENT_MAP_HOLE;
+
+ /*
+ * If we have a file range that points to a compressed extent
+ * and it's followed by a consecutive file range that points to
+ * to the same compressed extent (possibly with a different
+ * offset and/or length, so it either points to the whole extent
+ * or only part of it), we must make sure we do not submit a
+ * single bio to populate the pages for the 2 ranges because
+ * this makes the compressed extent read zero out the pages
+ * belonging to the 2nd range. Imagine the following scenario:
+ *
+ * File layout
+ * [0 - 8K] [8K - 24K]
+ * | |
+ * | |
+ * points to extent X, points to extent X,
+ * offset 4K, length of 8K offset 0, length 16K
+ *
+ * [extent X, compressed length = 4K uncompressed length = 16K]
+ *
+ * If the bio to read the compressed extent covers both ranges,
+ * it will decompress extent X into the pages belonging to the
+ * first range and then it will stop, zeroing out the remaining
+ * pages that belong to the other range that points to extent X.
+ * So here we make sure we submit 2 bios, one for the first
+ * range and another one for the third range. Both will target
+ * the same physical extent from disk, but we can't currently
+ * make the compressed bio endio callback populate the pages
+ * for both ranges because each compressed bio is tightly
+ * coupled with a single extent map, and each range can have
+ * an extent map with a different offset value relative to the
+ * uncompressed data of our extent and different lengths. This
+ * is a corner case so we prioritize correctness over
+ * non-optimal behavior (submitting 2 bios for the same extent).
+ */
+ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
+ prev_em_start && *prev_em_start != (u64)-1 &&
+ *prev_em_start != em->orig_start)
+ force_bio_submit = true;
+
+ if (prev_em_start)
+ *prev_em_start = em->orig_start;
+
free_extent_map(em);
em = NULL;

@@ -3050,7 +3097,8 @@ static int __do_readpage(struct extent_io_tree *tree,
bdev, bio, pnr,
end_bio_extent_readpage, mirror_num,
*bio_flags,
- this_bio_flag);
+ this_bio_flag,
+ force_bio_submit);
if (!ret) {
nr++;
*bio_flags = this_bio_flag;
@@ -3077,7 +3125,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
get_extent_t *get_extent,
struct extent_map **em_cached,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags, int rw)
+ unsigned long *bio_flags, int rw,
+ u64 *prev_em_start)
{
struct inode *inode;
struct btrfs_ordered_extent *ordered;
@@ -3097,7 +3146,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,

for (index = 0; index < nr_pages; index++) {
__do_readpage(tree, pages[index], get_extent, em_cached, bio,
- mirror_num, bio_flags, rw);
+ mirror_num, bio_flags, rw, prev_em_start);
page_cache_release(pages[index]);
}
}
@@ -3107,7 +3156,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
int nr_pages, get_extent_t *get_extent,
struct extent_map **em_cached,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags, int rw)
+ unsigned long *bio_flags, int rw,
+ u64 *prev_em_start)
{
u64 start = 0;
u64 end = 0;
@@ -3128,7 +3178,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
index - first_index, start,
end, get_extent, em_cached,
bio, mirror_num, bio_flags,
- rw);
+ rw, prev_em_start);
start = page_start;
end = start + PAGE_CACHE_SIZE - 1;
first_index = index;
@@ -3139,7 +3189,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
end, get_extent, em_cached, bio,
- mirror_num, bio_flags, rw);
+ mirror_num, bio_flags, rw,
+ prev_em_start);
}

static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -3165,7 +3216,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
}

ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
- bio_flags, rw);
+ bio_flags, rw, NULL);
return ret;
}

@@ -3191,7 +3242,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
int ret;

ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
- &bio_flags, READ);
+ &bio_flags, READ, NULL);
if (bio)
ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
return ret;
@@ -3444,7 +3495,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
sector, iosize, pg_offset,
bdev, &epd->bio, max_nr,
end_bio_extent_writepage,
- 0, 0, 0);
+ 0, 0, 0, false);
if (ret)
SetPageError(page);
}
@@ -3746,7 +3797,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
ret = submit_extent_page(rw, tree, p, offset >> 9,
PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
-1, end_bio_extent_buffer_writepage,
- 0, epd->bio_flags, bio_flags);
+ 0, epd->bio_flags, bio_flags, false);
epd->bio_flags = bio_flags;
if (ret) {
set_btree_ioerr(p);
@@ -4150,6 +4201,7 @@ int extent_readpages(struct extent_io_tree *tree,
struct page *page;
struct extent_map *em_cached = NULL;
int nr = 0;
+ u64 prev_em_start = (u64)-1;

for (page_idx = 0; page_idx < nr_pages; page_idx++) {
page = list_entry(pages->prev, struct page, lru);
@@ -4166,12 +4218,12 @@ int extent_readpages(struct extent_io_tree *tree,
if (nr < ARRAY_SIZE(pagepool))
continue;
__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
- &bio, 0, &bio_flags, READ);
+ &bio, 0, &bio_flags, READ, &prev_em_start);
nr = 0;
}
if (nr)
__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
- &bio, 0, &bio_flags, READ);
+ &bio, 0, &bio_flags, READ, &prev_em_start);

if (em_cached)
free_extent_map(em_cached);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 35911f0..fbcd590 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4854,7 +4854,8 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ if (!special_file(inode->i_mode))
+ btrfs_wait_ordered_range(inode, 0, (u64)-1);

btrfs_free_io_failure_record(inode, 0, (u64)-1);

diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 4ac7445..da7fbfa 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -441,6 +441,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
return 0;
}

+/* Server has provided av pairs/target info in the type 2 challenge
+ * packet and we have plucked it and stored within smb session.
+ * We parse that blob here to find the server given timestamp
+ * as part of ntlmv2 authentication (or local current time as
+ * default in case of failure)
+ */
+static __le64
+find_timestamp(struct cifs_ses *ses)
+{
+ unsigned int attrsize;
+ unsigned int type;
+ unsigned int onesize = sizeof(struct ntlmssp2_name);
+ unsigned char *blobptr;
+ unsigned char *blobend;
+ struct ntlmssp2_name *attrptr;
+
+ if (!ses->auth_key.len || !ses->auth_key.response)
+ return 0;
+
+ blobptr = ses->auth_key.response;
+ blobend = blobptr + ses->auth_key.len;
+
+ while (blobptr + onesize < blobend) {
+ attrptr = (struct ntlmssp2_name *) blobptr;
+ type = le16_to_cpu(attrptr->type);
+ if (type == NTLMSSP_AV_EOL)
+ break;
+ blobptr += 2; /* advance attr type */
+ attrsize = le16_to_cpu(attrptr->length);
+ blobptr += 2; /* advance attr size */
+ if (blobptr + attrsize > blobend)
+ break;
+ if (type == NTLMSSP_AV_TIMESTAMP) {
+ if (attrsize == sizeof(u64))
+ return *((__le64 *)blobptr);
+ }
+ blobptr += attrsize; /* advance attr value */
+ }
+
+ return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+}
+
static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
const struct nls_table *nls_cp)
{
@@ -637,6 +679,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
struct ntlmv2_resp *ntlmv2;
char ntlmv2_hash[16];
unsigned char *tiblob = NULL; /* target info blob */
+ __le64 rsp_timestamp;

if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
if (!ses->domainName) {
@@ -655,6 +698,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
}
}

+ /* Must be within 5 minutes of the server (or in range +/-2h
+ * in case of Mac OS X), so simply carry over server timestamp
+ * (as Windows 7 does)
+ */
+ rsp_timestamp = find_timestamp(ses);
+
baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
tilen = ses->auth_key.len;
tiblob = ses->auth_key.response;
@@ -671,8 +720,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
ntlmv2->blob_signature = cpu_to_le32(0x00000101);
ntlmv2->reserved = 0;
- /* Must be within 5 minutes of the server */
- ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ ntlmv2->time = rsp_timestamp;
+
get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
ntlmv2->reserved2 = 0;

diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 8b7898b..64a9bca 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
goto out_drop_write;
}

+ if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
+ rc = -EBADF;
+ cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
+ goto out_fput;
+ }
+
if ((!src_file.file->private_data) || (!dst_file->private_data)) {
rc = -EBADF;
cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index eab05e1..29be80b 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -50,9 +50,13 @@ change_conf(struct TCP_Server_Info *server)
break;
default:
server->echoes = true;
- server->oplocks = true;
+ if (enable_oplocks) {
+ server->oplocks = true;
+ server->oplock_credits = 1;
+ } else
+ server->oplocks = false;
+
server->echo_credits = 1;
- server->oplock_credits = 1;
}
server->credits -= server->echo_credits + server->oplock_credits;
return 0;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 3417340..355cc45 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -46,6 +46,7 @@
#include "smb2status.h"
#include "smb2glob.h"
#include "cifspdu.h"
+#include "cifs_spnego.h"

/*
* The following table defines the expected "StructureSize" of SMB2 requests
@@ -427,19 +428,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
cifs_dbg(FYI, "missing security blob on negprot\n");

rc = cifs_enable_signing(server, ses->sign);
-#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
if (rc)
goto neg_exit;
- if (blob_length)
+ if (blob_length) {
rc = decode_negTokenInit(security_blob, blob_length, server);
- if (rc == 1)
- rc = 0;
- else if (rc == 0) {
- rc = -EIO;
- goto neg_exit;
+ if (rc == 1)
+ rc = 0;
+ else if (rc == 0)
+ rc = -EIO;
}
-#endif
-
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
@@ -533,7 +530,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
struct TCP_Server_Info *server = ses->server;
u16 blob_length = 0;
- char *security_blob;
+ struct key *spnego_key = NULL;
+ char *security_blob = NULL;
char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */

@@ -561,7 +559,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
ses->ntlmssp->sesskey_per_smbsess = true;

/* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
- ses->sectype = RawNTLMSSP;
+ if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
+ ses->sectype = RawNTLMSSP;

ssetup_ntlmssp_authenticate:
if (phase == NtLmChallenge)
@@ -590,7 +589,48 @@ ssetup_ntlmssp_authenticate:
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and 1 for pad */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
- if (phase == NtLmNegotiate) {
+
+ if (ses->sectype == Kerberos) {
+#ifdef CONFIG_CIFS_UPCALL
+ struct cifs_spnego_msg *msg;
+
+ spnego_key = cifs_get_spnego_key(ses);
+ if (IS_ERR(spnego_key)) {
+ rc = PTR_ERR(spnego_key);
+ spnego_key = NULL;
+ goto ssetup_exit;
+ }
+
+ msg = spnego_key->payload.data;
+ /*
+ * check version field to make sure that cifs.upcall is
+ * sending us a response in an expected form
+ */
+ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
+ cifs_dbg(VFS,
+ "bad cifs.upcall version. Expected %d got %d",
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+ rc = -EKEYREJECTED;
+ goto ssetup_exit;
+ }
+ ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+ cifs_dbg(VFS,
+ "Kerberos can't allocate (%u bytes) memory",
+ msg->sesskey_len);
+ rc = -ENOMEM;
+ goto ssetup_exit;
+ }
+ ses->auth_key.len = msg->sesskey_len;
+ blob_length = msg->secblob_len;
+ iov[1].iov_base = msg->data + msg->sesskey_len;
+ iov[1].iov_len = blob_length;
+#else
+ rc = -EOPNOTSUPP;
+ goto ssetup_exit;
+#endif /* CONFIG_CIFS_UPCALL */
+ } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
GFP_KERNEL);
if (ntlmssp_blob == NULL) {
@@ -613,6 +653,8 @@ ssetup_ntlmssp_authenticate:
/* with raw NTLMSSP we don't encapsulate in SPNEGO */
security_blob = ntlmssp_blob;
}
+ iov[1].iov_base = security_blob;
+ iov[1].iov_len = blob_length;
} else if (phase == NtLmAuthenticate) {
req->hdr.SessionId = ses->Suid;
ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
@@ -640,6 +682,8 @@ ssetup_ntlmssp_authenticate:
} else {
security_blob = ntlmssp_blob;
}
+ iov[1].iov_base = security_blob;
+ iov[1].iov_len = blob_length;
} else {
cifs_dbg(VFS, "illegal ntlmssp phase\n");
rc = -EIO;
@@ -651,8 +695,6 @@ ssetup_ntlmssp_authenticate:
cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
1 /* pad */ - 4 /* rfc1001 len */);
req->SecurityBufferLength = cpu_to_le16(blob_length);
- iov[1].iov_base = security_blob;
- iov[1].iov_len = blob_length;

inc_rfc1001_len(req, blob_length - 1 /* pad */);

@@ -663,6 +705,7 @@ ssetup_ntlmssp_authenticate:

kfree(security_blob);
rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
+ ses->Suid = rsp->hdr.SessionId;
if (resp_buftype != CIFS_NO_BUFFER &&
rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
if (phase != NtLmNegotiate) {
@@ -680,7 +723,6 @@ ssetup_ntlmssp_authenticate:
/* NTLMSSP Negotiate sent now processing challenge (response) */
phase = NtLmChallenge; /* process ntlmssp challenge */
rc = 0; /* MORE_PROCESSING is not an error here but expected */
- ses->Suid = rsp->hdr.SessionId;
rc = decode_ntlmssp_challenge(rsp->Buffer,
le16_to_cpu(rsp->SecurityBufferLength), ses);
}
@@ -737,6 +779,10 @@ keygen_exit:
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
}
+ if (spnego_key) {
+ key_invalidate(spnego_key);
+ key_put(spnego_key);
+ }
kfree(ses->ntlmssp);

return rc;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 227c53d..9544693 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -491,7 +491,7 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
* for it without upsetting the slab allocator.
*/
if (((desc->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
- sizeof(struct page) > PAGE_SIZE)
+ sizeof(struct page *) > PAGE_SIZE)
return 0;

return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index fdf4b41..482cfd3 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
int found, ret;
int set_maybe;
int dispatch_assert = 0;
+ int dispatched = 0;

if (!dlm_grab(dlm))
return DLM_MASTER_RESP_NO;
@@ -1658,15 +1659,18 @@ send_response:
mlog(ML_ERROR, "failed to dispatch assert master work\n");
response = DLM_MASTER_RESP_ERROR;
dlm_lockres_put(res);
- } else
+ } else {
+ dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res);
+ }
spin_unlock(&res->spinlock);
} else {
if (res)
dlm_lockres_put(res);
}

- dlm_put(dlm);
+ if (!dispatched)
+ dlm_put(dlm);
return response;
}

@@ -2090,7 +2094,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,


/* queue up work for dlm_assert_master_worker */
- dlm_grab(dlm); /* get an extra ref for the work item */
dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
item->u.am.lockres = res; /* already have a ref */
/* can optionally ignore node numbers higher than this node */
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index cecd875..f4b4c78 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1691,6 +1691,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
unsigned int hash;
int master = DLM_LOCK_RES_OWNER_UNKNOWN;
u32 flags = DLM_ASSERT_MASTER_REQUERY;
+ int dispatched = 0;

if (!dlm_grab(dlm)) {
/* since the domain has gone away on this
@@ -1716,8 +1717,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
dlm_put(dlm);
/* sender will take care of this and retry */
return ret;
- } else
+ } else {
+ dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res);
+ }
spin_unlock(&res->spinlock);
} else {
/* put.. incase we are not the master */
@@ -1727,7 +1730,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
}
spin_unlock(&dlm->spinlock);

- dlm_put(dlm);
+ if (!dispatched)
+ dlm_put(dlm);
return master;
}

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 48b14d6..2ef0f5e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2408,13 +2408,20 @@ unsigned long nr_running(void)

/*
* Check if only the current task is running on the cpu.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race. The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptable section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
*/
bool single_task_running(void)
{
- if (cpu_rq(smp_processor_id())->nr_running == 1)
- return true;
- else
- return false;
+ return raw_rq()->nr_running == 1;
}
EXPORT_SYMBOL(single_task_running);

diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6a93185..6e80711 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1431,7 +1431,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
negative = (tick_error < 0);

/* Sort out the magnitude of the correction */
- tick_error = abs(tick_error);
+ tick_error = abs64(tick_error);
for (adj = 0; tick_error > interval; adj++)
tick_error >>= 1;

diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 185c341..aeedc3a 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -621,15 +621,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
{
int idx = 0;
struct fib_rule *rule;
+ int err = 0;

rcu_read_lock();
list_for_each_entry_rcu(rule, &ops->rules_list, list) {
if (idx < cb->args[1])
goto skip;

- if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, RTM_NEWRULE,
- NLM_F_MULTI, ops) < 0)
+ err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWRULE,
+ NLM_F_MULTI, ops);
+ if (err)
break;
skip:
idx++;
@@ -638,7 +640,7 @@ skip:
cb->args[1] = idx;
rules_ops_put(ops);

- return skb->len;
+ return err;
}

static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
@@ -654,7 +656,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
if (ops == NULL)
return -EAFNOSUPPORT;

- return dump_rules(skb, cb, ops);
+ dump_rules(skb, cb, ops);
+
+ return skb->len;
}

rcu_read_lock();
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e625be5..e10efd2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2834,6 +2834,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST);
+ skb_mstamp_get(&skb->skb_mstamp);
/* Send it off. */
if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 7226697..c7a69e8 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -552,7 +552,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)

if (it->cache == &mrt->mfc6_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == mrt->mfc6_cache_array)
+ else if (it->cache == &mrt->mfc6_cache_array[it->ct])
read_unlock(&mrt_lock);
}

diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 43c926c..58ebbf5 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register);

void nf_log_unregister(struct nf_logger *logger)
{
+ const struct nf_logger *log;
int i;

mutex_lock(&nf_log_mutex);
- for (i = 0; i < NFPROTO_NUMPROTO; i++)
- RCU_INIT_POINTER(loggers[i][logger->type], NULL);
+ for (i = 0; i < NFPROTO_NUMPROTO; i++) {
+ log = nft_log_dereference(loggers[i][logger->type]);
+ if (log == logger)
+ RCU_INIT_POINTER(loggers[i][logger->type], NULL);
+ }
mutex_unlock(&nf_log_mutex);
+ synchronize_rcu();
}
EXPORT_SYMBOL(nf_log_unregister);

diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index b636486..ccf3d18 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -555,6 +555,13 @@ struct nft_xt {

static struct nft_expr_type nft_match_type;

+static bool nft_match_cmp(const struct xt_match *match,
+ const char *name, u32 rev, u32 family)
+{
+ return strcmp(match->name, name) == 0 && match->revision == rev &&
+ (match->family == NFPROTO_UNSPEC || match->family == family);
+}
+
static const struct nft_expr_ops *
nft_match_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
@@ -562,7 +569,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
struct nft_xt *nft_match;
struct xt_match *match;
char *mt_name;
- __u32 rev, family;
+ u32 rev, family;

if (tb[NFTA_MATCH_NAME] == NULL ||
tb[NFTA_MATCH_REV] == NULL ||
@@ -577,8 +584,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
list_for_each_entry(nft_match, &nft_match_list, head) {
struct xt_match *match = nft_match->ops.data;

- if (strcmp(match->name, mt_name) == 0 &&
- match->revision == rev && match->family == family) {
+ if (nft_match_cmp(match, mt_name, rev, family)) {
if (!try_module_get(match->me))
return ERR_PTR(-ENOENT);

@@ -629,6 +635,13 @@ static LIST_HEAD(nft_target_list);

static struct nft_expr_type nft_target_type;

+static bool nft_target_cmp(const struct xt_target *tg,
+ const char *name, u32 rev, u32 family)
+{
+ return strcmp(tg->name, name) == 0 && tg->revision == rev &&
+ (tg->family == NFPROTO_UNSPEC || tg->family == family);
+}
+
static const struct nft_expr_ops *
nft_target_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
@@ -636,7 +649,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
struct nft_xt *nft_target;
struct xt_target *target;
char *tg_name;
- __u32 rev, family;
+ u32 rev, family;

if (tb[NFTA_TARGET_NAME] == NULL ||
tb[NFTA_TARGET_REV] == NULL ||
@@ -651,8 +664,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
list_for_each_entry(nft_target, &nft_target_list, head) {
struct xt_target *target = nft_target->ops.data;

- if (strcmp(target->name, tg_name) == 0 &&
- target->revision == rev && target->family == family) {
+ if (nft_target_cmp(target, tg_name, rev, family)) {
if (!try_module_get(target->me))
return ERR_PTR(-ENOENT);

diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8b57d6a..4e37b2a 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -134,6 +134,24 @@ static inline u32 netlink_group_mask(u32 group)
return group ? 1 << (group - 1) : 0;
}

+static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
+ gfp_t gfp_mask)
+{
+ unsigned int len = skb_end_offset(skb);
+ struct sk_buff *new;
+
+ new = alloc_skb(len, gfp_mask);
+ if (new == NULL)
+ return NULL;
+
+ NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
+ NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
+ NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
+
+ memcpy(skb_put(new, len), skb->data, len);
+ return new;
+}
+
int netlink_add_tap(struct netlink_tap *nt)
{
if (unlikely(nt->dev->type != ARPHRD_NETLINK))
@@ -215,7 +233,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
int ret = -ENOMEM;

dev_hold(dev);
- nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
+ nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
+ else
+ nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
nskb->dev = dev;
nskb->protocol = htons((u16) sk->sk_protocol);
@@ -287,11 +309,6 @@ static void netlink_rcv_wake(struct sock *sk)
}

#ifdef CONFIG_NETLINK_MMAP
-static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
-{
- return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
-}
-
static bool netlink_rx_is_mmaped(struct sock *sk)
{
return nlk_sk(sk)->rx_ring.pg_vec != NULL;
@@ -843,7 +860,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
}

#else /* CONFIG_NETLINK_MMAP */
-#define netlink_skb_is_mmaped(skb) false
#define netlink_rx_is_mmaped(sk) false
#define netlink_tx_is_mmaped(sk) false
#define netlink_mmap sock_no_mmap
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index f1c31b3..5a678b9 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -58,6 +58,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
return container_of(sk, struct netlink_sock, sk);
}

+static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NETLINK_MMAP
+ return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+#else
+ return false;
+#endif /* CONFIG_NETLINK_MMAP */
+}
+
struct netlink_table {
struct rhashtable hash;
struct hlist_head mc_list;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 58a6ef5..34d2643 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -884,7 +884,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (error)
goto err_kfree_flow;

- ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
+ ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, true, &mask);

/* Validate actions. */
error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
@@ -993,7 +993,7 @@ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
struct sw_flow_key masked_key;
int error;

- ovs_flow_mask_key(&masked_key, key, mask);
+ ovs_flow_mask_key(&masked_key, key, true, mask);
error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
if (error) {
OVS_NLERR(log,
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 5899bf1..514c6233 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -56,20 +56,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
}

void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
- const struct sw_flow_mask *mask)
+ bool full, const struct sw_flow_mask *mask)
{
- const long *m = (const long *)((const u8 *)&mask->key +
- mask->range.start);
- const long *s = (const long *)((const u8 *)src +
- mask->range.start);
- long *d = (long *)((u8 *)dst + mask->range.start);
+ int start = full ? 0 : mask->range.start;
+ int len = full ? sizeof *dst : range_n_bytes(&mask->range);
+ const long *m = (const long *)((const u8 *)&mask->key + start);
+ const long *s = (const long *)((const u8 *)src + start);
+ long *d = (long *)((u8 *)dst + start);
int i;

- /* The memory outside of the 'mask->range' are not set since
- * further operations on 'dst' only uses contents within
- * 'mask->range'.
+ /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
+ * if 'full' is false the memory outside of the 'mask->range' is left
+ * uninitialized. This can be used as an optimization when further
+ * operations on 'dst' only use contents within 'mask->range'.
*/
- for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+ for (i = 0; i < len; i += sizeof(long))
*d++ = *s++ & *m++;
}

@@ -421,7 +422,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
u32 hash;
struct sw_flow_key masked_key;

- ovs_flow_mask_key(&masked_key, unmasked, mask);
+ ovs_flow_mask_key(&masked_key, unmasked, true, mask);
hash = flow_hash(&masked_key, key_start, key_end);
head = find_bucket(ti, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 309fa64..9f25cf4 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -82,5 +82,5 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
const struct sw_flow_match *match);

void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
- const struct sw_flow_mask *mask);
+ bool full, const struct sw_flow_mask *mask);
#endif /* flow_table.h */
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index fef2acd..85e6f03 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -702,7 +702,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
* outstanding data and rely on the retransmission limit be reached
* to shutdown the association.
*/
- if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+ if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
t->asoc->overall_error_count = 0;

/* Clear the hb_sent flag to signal that we had a good
diff --git a/security/keys/gc.c b/security/keys/gc.c
index c795237..addf060 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -134,6 +134,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
kdebug("- %u", key->serial);
key_check(key);

+ /* Throw away the key data if the key is instantiated */
+ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
+ !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
+ key->type->destroy)
+ key->type->destroy(key);
+
security_key_free(key);

/* deal with the user's key tracking and quota */
@@ -148,10 +154,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);

- /* now throw away the key memory */
- if (key->type->destroy)
- key->type->destroy(key);
-
key_user_put(key->user);

kfree(key->description);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 0c7aea4..19bbe5e6 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -439,6 +439,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,

kenter("");

+ if (ctx->index_key.type == &key_type_keyring)
+ return ERR_PTR(-EPERM);
+
user = key_user_lookup(current_fsuid());
if (!user)
return ERR_PTR(-ENOMEM);
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 885683a..e040621 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -9,6 +9,14 @@ menuconfig SND_ARM
Drivers that are implemented on ASoC can be found in
"ALSA for SoC audio support" section.

+config SND_PXA2XX_LIB
+ tristate
+ select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+ select SND_DMAENGINE_PCM
+
+config SND_PXA2XX_LIB_AC97
+ bool
+
if SND_ARM

config SND_ARMAACI
@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
tristate
select SND_PCM

-config SND_PXA2XX_LIB
- tristate
- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
-
-config SND_PXA2XX_LIB_AC97
- bool
-
config SND_PXA2XX_AC97
tristate "AC97 driver for the Intel PXA2xx chip"
depends on ARCH_PXA
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 2434b6d..e1f501b 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -1,7 +1,6 @@
config SND_PXA2XX_SOC
tristate "SoC Audio for the Intel PXA2xx chip"
depends on ARCH_PXA
- select SND_ARM
select SND_PXA2XX_LIB
help
Say Y or M if you want to add support for codecs attached to
@@ -25,7 +24,6 @@ config SND_PXA2XX_AC97
config SND_PXA2XX_SOC_AC97
tristate
select AC97_BUS
- select SND_ARM
select SND_PXA2XX_LIB_AC97
select SND_SOC_AC97_BUS

diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 1f60546..9e4b04e 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
.reset = pxa2xx_ac97_cold_reset,
};

-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
+static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
.addr = __PREG(PCDR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
.filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
};

-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
+static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
.addr = __PREG(PCDR),
.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b20e40c..a09a8268 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1438,7 +1438,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
if (ph->needs_swap)
nr = bswap_32(nr);

- ph->env.nr_cpus_online = nr;
+ ph->env.nr_cpus_avail = nr;

ret = readn(fd, &nr, sizeof(nr));
if (ret != sizeof(nr))
@@ -1447,7 +1447,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
if (ph->needs_swap)
nr = bswap_32(nr);

- ph->env.nr_cpus_avail = nr;
+ ph->env.nr_cpus_online = nr;
return 0;
}

diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 148b239..1280096 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -768,40 +768,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
return KVM_MMIO_BUS;
}

-static int
-kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
+ enum kvm_bus bus_idx,
+ struct kvm_ioeventfd *args)
{
- enum kvm_bus bus_idx;
- struct _ioeventfd *p;
- struct eventfd_ctx *eventfd;
- int ret;
-
- bus_idx = ioeventfd_bus_from_flags(args->flags);
- /* must be natural-word sized, or 0 to ignore length */
- switch (args->len) {
- case 0:
- case 1:
- case 2:
- case 4:
- case 8:
- break;
- default:
- return -EINVAL;
- }
-
- /* check for range overflow */
- if (args->addr + args->len < args->addr)
- return -EINVAL;

- /* check for extra flags that we don't understand */
- if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
- return -EINVAL;
-
- /* ioeventfd with no length can't be combined with DATAMATCH */
- if (!args->len &&
- args->flags & (KVM_IOEVENTFD_FLAG_PIO |
- KVM_IOEVENTFD_FLAG_DATAMATCH))
- return -EINVAL;
+ struct eventfd_ctx *eventfd;
+ struct _ioeventfd *p;
+ int ret;

eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
@@ -840,16 +814,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (ret < 0)
goto unlock_fail;

- /* When length is ignored, MMIO is also put on a separate bus, for
- * faster lookups.
- */
- if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
- ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
- p->addr, 0, &p->dev);
- if (ret < 0)
- goto register_fail;
- }
-
kvm->buses[bus_idx]->ioeventfd_count++;
list_add_tail(&p->list, &kvm->ioeventfds);

@@ -857,8 +821,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)

return 0;

-register_fail:
- kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
unlock_fail:
mutex_unlock(&kvm->slots_lock);

@@ -870,14 +832,13 @@ fail:
}

static int
-kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_ioeventfd *args)
{
- enum kvm_bus bus_idx;
struct _ioeventfd *p, *tmp;
struct eventfd_ctx *eventfd;
int ret = -ENOENT;

- bus_idx = ioeventfd_bus_from_flags(args->flags);
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
@@ -898,10 +859,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
continue;

kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
- if (!p->length) {
- kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
- &p->dev);
- }
kvm->buses[bus_idx]->ioeventfd_count--;
ioeventfd_release(p);
ret = 0;
@@ -915,6 +872,71 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
return ret;
}

+static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+{
+ enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
+ int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
+
+ if (!args->len && bus_idx == KVM_MMIO_BUS)
+ kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
+
+ return ret;
+}
+
+static int
+kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+{
+ enum kvm_bus bus_idx;
+ int ret;
+
+ bus_idx = ioeventfd_bus_from_flags(args->flags);
+ /* must be natural-word sized, or 0 to ignore length */
+ switch (args->len) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check for range overflow */
+ if (args->addr + args->len < args->addr)
+ return -EINVAL;
+
+ /* check for extra flags that we don't understand */
+ if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
+ return -EINVAL;
+
+ /* ioeventfd with no length can't be combined with DATAMATCH */
+ if (!args->len &&
+ args->flags & (KVM_IOEVENTFD_FLAG_PIO |
+ KVM_IOEVENTFD_FLAG_DATAMATCH))
+ return -EINVAL;
+
+ ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
+ if (ret)
+ goto fail;
+
+ /* When length is ignored, MMIO is also put on a separate bus, for
+ * faster lookups.
+ */
+ if (!args->len && bus_idx == KVM_MMIO_BUS) {
+ ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
+ if (ret < 0)
+ goto fast_fail;
+ }
+
+ return 0;
+
+fast_fail:
+ kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
+fail:
+ return ret;
+}
+
int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0edccc8f..ce5ab83 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2871,10 +2871,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
const struct kvm_io_range *r2)
{
- if (r1->addr < r2->addr)
+ gpa_t addr1 = r1->addr;
+ gpa_t addr2 = r2->addr;
+
+ if (addr1 < addr2)
return -1;
- if (r1->addr + r1->len > r2->addr + r2->len)
+
+ /* If r2->len == 0, match the exact address. If r2->len != 0,
+ * accept any overlapping write. Any order is acceptable for
+ * overlapping ranges, because kvm_io_bus_get_first_dev ensures
+ * we process all of them.
+ */
+ if (r2->len) {
+ addr1 += r1->len;
+ addr2 += r2->len;
+ }
+
+ if (addr1 > addr2)
return 1;
+
return 0;
}


\
 
 \ /
  Last update: 2015-10-30 20:21    [W:1.468 / U:1.540 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site