lkml.org 
[lkml]   [2015]   [Nov]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectLinux 3.2.73
From
Date
I'm announcing the release of the 3.2.73 kernel.

All users of the 3.2 kernel series should upgrade.

The updated 3.2.y git tree can be found at:
        https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-3.2.y
and can be browsed at the normal kernel.org git web browser:
        https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git

The diff from 3.2.72 is attached to this message.

Ben.

------------

 Makefile                                     |  2 +-
 arch/m68k/include/asm/linkage.h              | 30 ++++++++++++++++
 arch/mips/mm/dma-default.c                   |  2 +-
 arch/powerpc/kernel/rtas.c                   |  3 ++
 arch/x86/include/asm/kvm_host.h              |  1 +
 arch/x86/kernel/process_64.c                 | 52 ++++++++++++++++++++++------
 arch/x86/kvm/mmu.c                           | 45 ++++++++++++++++++++++++
 arch/x86/kvm/svm.c                           |  8 +++++
 arch/x86/kvm/trace.h                         |  1 +
 arch/x86/kvm/vmx.c                           |  5 ++-
 arch/x86/xen/setup.c                         |  2 +-
 crypto/ablkcipher.c                          |  2 +-
 crypto/ahash.c                               |  3 +-
 crypto/algapi.c                              |  2 +-
 crypto/api.c                                 |  6 ++--
 drivers/base/regmap/regmap-debugfs.c         |  5 ++-
 drivers/block/xen-blkfront.c                 |  3 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c        |  5 +--
 drivers/infiniband/core/cm.c                 | 10 +++++-
 drivers/iommu/intel-iommu.c                  | 19 ++++++----
 drivers/md/md.c                              |  1 +
 drivers/md/persistent-data/dm-btree-remove.c | 17 +++++----
 drivers/md/persistent-data/dm-btree.c        |  2 +-
 drivers/md/raid0.c                           | 46 ++++++++++++++----------
 drivers/md/raid1.c                           | 38 ++++++++++++++++++--
 drivers/md/raid1.h                           |  5 +++
 drivers/md/raid10.c                          | 42 +++++++++++++++++++---
 drivers/md/raid10.h                          |  6 ++++
 drivers/mtd/ubi/io.c                         |  5 +++
 drivers/mtd/ubi/vtbl.c                       |  1 +
 drivers/mtd/ubi/wl.c                         |  1 +
 drivers/net/ppp/pppoe.c                      |  3 +-
 drivers/net/usb/asix.c                       | 16 ++-------
 drivers/net/wireless/ath/ath9k/init.c        |  1 +
 drivers/net/wireless/iwlwifi/iwl-agn.c       |  2 +-
 drivers/scsi/3w-9xxx.c                       | 28 +++++++++++----
 drivers/scsi/mvsas/mv_sas.c                  |  2 ++
 drivers/staging/iio/accel/sca3000_ring.c     |  2 +-
 drivers/tty/n_tty.c                          |  6 ++--
 drivers/tty/tty_io.c                         | 31 ++++++++++++++---
 drivers/usb/core/quirks.c                    | 13 +++++++
 drivers/usb/host/xhci-pci.c                  |  5 +++
 drivers/usb/host/xhci-ring.c                 | 30 +++++++++++++---
 fs/cifs/inode.c                              | 39 +--------------------
 fs/nfs/nfs4state.c                           |  2 +-
 include/linux/sched.h                        |  2 +-
 include/linux/skbuff.h                       |  6 +++-
 include/sound/wm8904.h                       |  2 +-
 kernel/irq/proc.c                            | 19 ++++++++--
 kernel/module.c                              |  8 +++--
 kernel/sched.c                               | 14 ++++----
 kernel/time/clocksource.c                    |  2 +-
 mm/filemap.c                                 |  9 ++---
 mm/hugetlb.c                                 |  8 +++++
 net/core/datagram.c                          |  6 +++-
 net/core/ethtool.c                           |  2 +-
 net/core/skbuff.c                            |  9 ++---
 net/ipv4/tcp_input.c                         |  2 +-
 net/ipv4/udp.c                               |  2 +-
 net/ipv6/raw.c                               |  2 +-
 net/ipv6/udp.c                               |  3 +-
 net/rxrpc/ar-recvmsg.c                       |  3 +-
 security/keys/gc.c                           | 10 +++---
 sound/pci/hda/patch_conexant.c               |  1 +
 sound/synth/emux/emux_oss.c                  |  3 +-
 65 files changed, 487 insertions(+), 176 deletions(-)

Andreas Schwab (1):
      m68k: Define asmlinkage_protect

Ben Hutchings (3):
      Revert "KVM: MMU: fix validation of mmio page fault"
      genirq: Fix race in register_irq_proc()
      Linux 3.2.73

Cathy Avery (1):
      xen-blkfront: check for null drvdata in blkback_changed (XenbusStateClosing)

Charles Keepax (2):
      ASoC: wm8904: Correct number of EQ registers
      asix: Do full reset during ax88772_bind

Christian Zander (1):
      iommu/vt-d: fix range computation when making room for large pages

Christoph Hellwig (1):
      3w-9xxx: don't unmap bounce buffered commands

Dan Carpenter (1):
      iio: accel: sca3000: memory corruption in sca3000_read_first_n_hw_rb()

David Henningsson (1):
      ALSA: hda - Fix inverted internal mic on Lenovo G50-80

David Howells (2):
      KEYS: Fix race between key destruction and finding a keyring by name
      KEYS: Fix crash when attempt to garbage collect an uninstantiated keyring

Denis Turischev (1):
      xhci: Switch Intel Lynx Point LP ports to EHCI on shutdown.

Doron Tsur (1):
      IB/cm: Fix rb-tree duplicate free and use-after-free

Dāvis Mosāns (1):
      mvsas: Fix NULL pointer dereference in mvs_slot_task_free

Eric Northup (1):
      KVM: x86: work around infinite loop in microcode when #AC is delivered

Felix Fietkau (1):
      ath9k: declare required extra tx headroom

Guillaume Nault (2):
      ppp: don't override sk->sk_state in pppoe_flush_dev()
      ppp: fix pppoe_dev deletion condition in pppoe_release()

Herbert Xu (1):
      crypto: api - Only abort operations on fatal signal

Ilia Mirkin (1):
      drm/nouveau/gem: return only valid domain when there's only one

James Hogan (1):
      MIPS: dma-default: Fix 32-bit fall back to GFP_DMA

Jan Kara (1):
      mm: make sendfile(2) killable

Jann Horn (1):
      drivers/tty: require read access for controlling terminal

Joe Perches (1):
      ethtool: Use kcalloc instead of kmalloc for ethtool_get_strings

Joe Thornber (1):
      dm btree remove: fix a bug when rebalancing nodes after removal

Johannes Berg (1):
      iwlwifi: dvm: fix D3 firmware PN programming

John Stultz (1):
      clocksource: Fix abs() usage w/ 64bit values

Kosuke Tatsukawa (1):
      tty: fix stall caused by missing memory barrier in drivers/tty/n_tty.c

Laura Abbott (1):
      xhci: Add spurious wakeup quirk for LynxPoint-LP controllers

Malcolm Crossley (1):
      x86/xen: Do not clip xen_e820_map to xen_e820_map_entries when sanitizing map

Mark Brown (2):
      regmap: debugfs: Ensure we don't underflow when printing access masks
      regmap: debugfs: Don't bother actually printing when calculating max length

Mathias Nyman (2):
      xhci: don't finish a TD if we get a short transfer event mid TD
      xhci: handle no ping response error properly

Mel Gorman (1):
      mm: hugetlbfs: skip shared VMAs when unmapping private pages to satisfy a fault

Michel Stam (1):
      asix: Don't reset PHY on if_up for ASIX 88772

Mike Snitzer (1):
      dm btree: fix leak of bufio-backed block in btree_split_beneath error path

NeilBrown (6):
      md/raid0: update queue parameter in a safer location.
      md/raid0: apply base queue limits *before* disk_stack_limits
      md/raid1: ensure device failure recorded before write request returns.
      md/raid1: don't clear bitmap bit when bad-block-list write fails.
      md/raid10: ensure device failure recorded before write request returns.
      md/raid10: don't clear bitmap bit when bad-block-list write fails.

Olga Kornievskaia (1):
      Failing to send a CLOSE if file is opened WRONLY and server reboots on a 4.x mount

Peter Zijlstra (2):
      module: Fix locking in symbol_put_addr()
      sched/core: Fix TASK_DEAD race in finish_task_switch()

Pravin B Shelar (2):
      skbuff: Fix skb checksum flag on skb pull
      skbuff: Fix skb checksum partial check.

Richard Guy Briggs (1):
      sched: declare pid_alive as inline

Richard Weinberger (1):
      UBI: Validate data_size

Russell King (1):
      crypto: ahash - ensure statesize is non-zero

Sabrina Dubroca (1):
      net: add length argument to skb_copy_and_csum_datagram_iovec

Steve French (1):
      Do not fall back to SMBWriteX in set_file_size error cases

Takashi Iwai (1):
      ALSA: synth: Fix conflicting OSS device registration on AWE32

Thomas Gleixner (1):
      x86/process: Add proper bound checks in 64bit get_wchan()

Vasant Hegde (1):
      powerpc/rtas: Validate rtas.entry before calling enter_rtas()

Vincent Palatin (1):
      usb: Add device quirk for Logitech PTZ cameras

Yao-Wen Mao (1):
      USB: Add reset-resume quirk for two Plantronics usb headphones.

shengyong (1):
      UBI: return ENOSPC if no enough space available

--
Ben Hutchings
It's easier to fight for one's principles than to live up to them.--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 2
-SUBLEVEL = 72
+SUBLEVEL = 73
EXTRAVERSION =
NAME = Saber-toothed Squirrel

--- a/arch/m68k/include/asm/linkage.h
+++ b/arch/m68k/include/asm/linkage.h
@@ -4,4 +4,34 @@
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"

+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+ __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+ __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+ __asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5), "m" (arg6))
+
#endif
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -67,7 +67,7 @@
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
dma_flag = __GFP_DMA;
else
#endif
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -992,6 +992,9 @@
if (!capable(CAP_SYS_ADMIN))
return -EPERM;

+ if (!rtas.entry)
+ return -EINVAL;
+
if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
return -EFAULT;

--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -83,6 +83,7 @@
#define GP_VECTOR 13
#define PF_VECTOR 14
#define MF_VECTOR 16
+#define AC_VECTOR 17
#define MC_VECTOR 18

#define SELECTOR_TI_MASK (1 << 2)
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -555,27 +555,59 @@
current_thread_info()->status |= TS_COMPAT;
}

+/*
+ * Called from fs/proc with a reference on @p to find the function
+ * which called into schedule(). This needs to be done carefully
+ * because the task might wake up and we might look at a stack
+ * changing under us.
+ */
unsigned long get_wchan(struct task_struct *p)
{
- unsigned long stack;
- u64 fp, ip;
+ unsigned long start, bottom, top, sp, fp, ip;
int count = 0;

if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack = (unsigned long)task_stack_page(p);
- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
+
+ start = (unsigned long)task_stack_page(p);
+ if (!start)
return 0;
- fp = *(u64 *)(p->thread.sp);
+
+ /*
+ * Layout of the stack page:
+ *
+ * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
+ * PADDING
+ * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+ * stack
+ * ----------- bottom = start + sizeof(thread_info)
+ * thread_info
+ * ----------- start
+ *
+ * The tasks stack pointer points at the location where the
+ * framepointer is stored. The data on the stack is:
+ * ... IP FP ... IP FP
+ *
+ * We need to read FP and IP, so we need to adjust the upper
+ * bound by another unsigned long.
+ */
+ top = start + THREAD_SIZE;
+ top -= 2 * sizeof(unsigned long);
+ bottom = start + sizeof(struct thread_info);
+
+ sp = ACCESS_ONCE(p->thread.sp);
+ if (sp < bottom || sp > top)
+ return 0;
+
+ fp = ACCESS_ONCE(*(unsigned long *)sp);
do {
- if (fp < (unsigned long)stack ||
- fp >= (unsigned long)stack+THREAD_SIZE)
+ if (fp < bottom || fp > top)
return 0;
- ip = *(u64 *)(fp+8);
+ ip = ACCESS_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
if (!in_sched_functions(ip))
return ip;
- fp = *(u64 *)fp;
- } while (count++ < 16);
+ fp = ACCESS_ONCE(*(unsigned long *)fp);
+ } while (count++ < 16 && p->state != TASK_RUNNING);
return 0;
}

--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -326,6 +326,12 @@
{
return ACCESS_ONCE(*sptep);
}
+
+static bool __check_direct_spte_mmio_pf(u64 spte)
+{
+ /* It is valid if the spte is zapped. */
+ return spte == 0ull;
+}
#else
union split_spte {
struct {
@@ -430,6 +436,23 @@

return spte.spte;
}
+
+static bool __check_direct_spte_mmio_pf(u64 spte)
+{
+ union split_spte sspte = (union split_spte)spte;
+ u32 high_mmio_mask = shadow_mmio_mask >> 32;
+
+ /* It is valid if the spte is zapped. */
+ if (spte == 0ull)
+ return true;
+
+ /* It is valid if the spte is being zapped. */
+ if (sspte.spte_low == 0ull &&
+ (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
+ return true;
+
+ return false;
+}
#endif

static bool spte_has_volatile_bits(u64 spte)
@@ -2872,6 +2895,21 @@
return vcpu_match_mmio_gva(vcpu, addr);
}

+
+/*
+ * On direct hosts, the last spte is only allows two states
+ * for mmio page fault:
+ * - It is the mmio spte
+ * - It is zapped or it is being zapped.
+ *
+ * This function completely checks the spte when the last spte
+ * is not the mmio spte.
+ */
+static bool check_direct_spte_mmio_pf(u64 spte)
+{
+ return __check_direct_spte_mmio_pf(spte);
+}
+
static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
{
struct kvm_shadow_walk_iterator iterator;
@@ -2913,6 +2951,13 @@
}

/*
+ * It's ok if the gva is remapped by other cpus on shadow guest,
+ * it's a BUG if the gfn is not a mmio page.
+ */
+ if (direct && !check_direct_spte_mmio_pf(spte))
+ return -1;
+
+ /*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1014,6 +1014,7 @@
set_exception_intercept(svm, PF_VECTOR);
set_exception_intercept(svm, UD_VECTOR);
set_exception_intercept(svm, MC_VECTOR);
+ set_exception_intercept(svm, AC_VECTOR);

set_intercept(svm, INTERCEPT_INTR);
set_intercept(svm, INTERCEPT_NMI);
@@ -1689,6 +1690,12 @@
return 1;
}

+static int ac_interception(struct vcpu_svm *svm)
+{
+ kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
+ return 1;
+}
+
static void svm_fpu_activate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3188,6 +3195,7 @@
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
[SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
+ [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
[SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
[SVM_EXIT_SMI] = nop_on_interception,
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -244,6 +244,7 @@
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
{ SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
+ { SVM_EXIT_EXCP_BASE + AC_VECTOR, "AC excp" }, \
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
{ SVM_EXIT_INTR, "interrupt" }, \
{ SVM_EXIT_NMI, "nmi" }, \
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1171,7 +1171,7 @@
u32 eb;

eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
- (1u << NM_VECTOR) | (1u << DB_VECTOR);
+ (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
if ((vcpu->guest_debug &
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
@@ -4266,6 +4266,9 @@

ex_no = intr_info & INTR_INFO_VECTOR_MASK;
switch (ex_no) {
+ case AC_VECTOR:
+ kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
+ return 1;
case DB_VECTOR:
dr6 = vmcs_readl(EXIT_QUALIFICATION);
if (!(vcpu->guest_debug &
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -273,7 +273,7 @@
xen_ignore_unusable(map, memmap.nr_entries);

/* Make sure the Xen-supplied memory map is well-ordered. */
- sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
+ sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);

max_pages = xen_get_max_pages();
if (max_pages > max_pfn)
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -700,7 +700,7 @@
err:
if (err != -EAGAIN)
break;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -462,7 +462,8 @@
struct crypto_alg *base = &alg->halg.base;

if (alg->halg.digestsize > PAGE_SIZE / 8 ||
- alg->halg.statesize > PAGE_SIZE / 8)
+ alg->halg.statesize > PAGE_SIZE / 8 ||
+ alg->halg.statesize == 0)
return -EINVAL;

base->cra_type = &crypto_ahash_type;
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -342,7 +342,7 @@
crypto_alg_tested(larval->alg.cra_driver_name, 0);
}

- err = wait_for_completion_interruptible(&larval->completion);
+ err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);

out:
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -178,7 +178,7 @@
struct crypto_larval *larval = (void *)alg;
long timeout;

- timeout = wait_for_completion_interruptible_timeout(
+ timeout = wait_for_completion_killable_timeout(
&larval->completion, 60 * HZ);

alg = larval->adult;
@@ -441,7 +441,7 @@
err:
if (err != -EAGAIN)
break;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
@@ -558,7 +558,7 @@
err:
if (err != -EAGAIN)
break;
- if (signal_pending(current)) {
+ if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -23,8 +23,7 @@
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
{
- snprintf(buf, buf_size, "%x", max_val);
- return strlen(buf);
+ return snprintf(NULL, 0, "%x", max_val);
}

static int regmap_open_file(struct inode *inode, struct file *file)
@@ -140,7 +139,7 @@
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
- if (buf_pos >= count - 1 - tot_len)
+ if (buf_pos + tot_len + 1 >= count)
break;

/* Format the register */
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1292,7 +1292,8 @@
break;
/* Missed the backend's Closing state -- fallthrough */
case XenbusStateClosing:
- blkfront_closing(info);
+ if (info)
+ blkfront_closing(info);
break;
}
}
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -172,11 +172,12 @@
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;

- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ if (is_power_of_2(nvbo->valid_domains))
+ rep->domain = nvbo->valid_domains;
+ else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
-
rep->offset = nvbo->bo.offset;
if (fpriv->vm) {
vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -856,6 +856,11 @@
case IB_CM_SIDR_REQ_RCVD:
spin_unlock_irq(&cm_id_priv->lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
+ spin_lock_irq(&cm.lock);
+ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
+ rb_erase(&cm_id_priv->sidr_id_node,
+ &cm.remote_sidr_table);
+ spin_unlock_irq(&cm.lock);
break;
case IB_CM_REQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
@@ -3092,7 +3097,10 @@
spin_unlock_irqrestore(&cm_id_priv->lock, flags);

spin_lock_irqsave(&cm.lock, flags);
- rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
+ rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
+ }
spin_unlock_irqrestore(&cm.lock, flags);
return 0;

--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1798,13 +1798,20 @@
return -ENOMEM;
/* It is large page*/
if (largepage_lvl > 1) {
+ unsigned long nr_superpages, end_pfn, lvl_pages;
+
pteval |= DMA_PTE_LARGE_PAGE;
- /* Ensure that old small page tables are removed to make room
- for superpage, if they exist. */
- dma_pte_clear_range(domain, iov_pfn,
- iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
- dma_pte_free_pagetable(domain, iov_pfn,
- iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
+ nr_superpages = sg_res / lvl_pages;
+ end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
+
+ /*
+ * Ensure that old small page tables are
+ * removed to make room for superpage(s).
+ */
+ dma_pte_clear_range(domain, iov_pfn, end_pfn);
+ dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
} else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
}
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7895,6 +7895,7 @@
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
return rv;
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -301,11 +301,16 @@
{
int s;
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
- unsigned target = (nr_left + nr_center + nr_right) / 3;
- BUG_ON(target > max_entries);
+ unsigned total = nr_left + nr_center + nr_right;
+ unsigned target_right = total / 3;
+ unsigned remainder = (target_right * 3) != total;
+ unsigned target_left = target_right + remainder;
+
+ BUG_ON(target_left > max_entries);
+ BUG_ON(target_right > max_entries);

if (nr_left < nr_right) {
- s = nr_left - target;
+ s = nr_left - target_left;

if (s < 0 && nr_center < -s) {
/* not enough in central node */
@@ -316,10 +321,10 @@
} else
shift(left, center, s);

- shift(center, right, target - nr_right);
+ shift(center, right, target_right - nr_right);

} else {
- s = target - nr_right;
+ s = target_right - nr_right;
if (s > 0 && nr_center < s) {
/* not enough in central node */
shift(center, right, nr_center);
@@ -329,7 +334,7 @@
} else
shift(center, right, s);

- shift(left, center, nr_left - target);
+ shift(left, center, nr_left - target_left);
}

*key_ptr(parent, c->index) = center->keys[0];
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -503,7 +503,7 @@

r = new_block(s->info, &right);
if (r < 0) {
- /* FIXME: put left */
+ unlock_block(s->info, left);
return r;
}

--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -88,6 +88,7 @@
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+ unsigned short blksize = 512;

if (!conf)
return -ENOMEM;
@@ -102,6 +103,9 @@
sector_div(sectors, mddev->chunk_sectors);
rdev1->sectors = sectors * mddev->chunk_sectors;

+ blksize = max(blksize, queue_logical_block_size(
+ rdev1->bdev->bd_disk->queue));
+
list_for_each_entry(rdev2, &mddev->disks, same_set) {
pr_debug("md/raid0:%s: comparing %s(%llu)"
" with %s(%llu)\n",
@@ -138,6 +142,18 @@
}
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
+ /*
+ * now since we have the hard sector sizes, we can make sure
+ * chunk size is a multiple of that sector size
+ */
+ if ((mddev->chunk_sectors << 9) % blksize) {
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9, blksize);
+ err = -EINVAL;
+ goto abort;
+ }
+
err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
@@ -186,8 +202,6 @@
}
dev[j] = rdev1;

- disk_stack_limits(mddev->gendisk, rdev1->bdev,
- rdev1->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_segments to 1, lying within
* a single page.
@@ -263,21 +277,6 @@
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev;

- /*
- * now since we have the hard sector sizes, we can make sure
- * chunk size is a multiple of that sector size
- */
- if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
- mdname(mddev),
- mddev->chunk_sectors << 9);
- goto abort;
- }
-
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- blk_queue_io_opt(mddev->queue,
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
-
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;

@@ -340,6 +339,7 @@
{
struct r0conf *conf;
int ret;
+ struct md_rdev *rdev;

if (mddev->chunk_sectors == 0) {
printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
@@ -348,7 +348,6 @@
}
if (md_check_no_bitmap(mddev))
return -EINVAL;
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);

/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -359,6 +358,17 @@
}
conf = mddev->private;

+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ }
+
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));

--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1240,6 +1240,7 @@
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
printk(KERN_ALERT
"md/raid1:%s: Disk failure on %s, disabling device.\n"
"md/raid1:%s: Operation continuing on %d devices.\n",
@@ -1949,6 +1950,7 @@
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{
int m;
+ bool fail = false;
for (m = 0; m < conf->raid_disks ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
@@ -1961,6 +1963,7 @@
* narrow down and record precise write
* errors.
*/
+ fail = true;
if (!narrow_write_error(r1_bio, m)) {
md_error(conf->mddev,
conf->mirrors[m].rdev);
@@ -1970,9 +1973,16 @@
rdev_dec_pending(conf->mirrors[m].rdev,
conf->mddev);
}
- if (test_bit(R1BIO_WriteError, &r1_bio->state))
- close_write(r1_bio);
- raid_end_bio_io(r1_bio);
+ if (fail) {
+ spin_lock_irq(&conf->device_lock);
+ list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(conf->mddev->thread);
+ } else {
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+ }
}

static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
@@ -2075,6 +2085,27 @@

md_check_recovery(mddev);

+ if (!list_empty_careful(&conf->bio_end_io_list) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ LIST_HEAD(tmp);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ list_add(&tmp, &conf->bio_end_io_list);
+ list_del_init(&conf->bio_end_io_list);
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ while (!list_empty(&tmp)) {
+ r1_bio = list_first_entry(&conf->bio_end_io_list,
+ struct r1bio, retry_list);
+ list_del(&r1_bio->retry_list);
+ if (mddev->degraded)
+ set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+ }
+ }
+
blk_start_plug(&plug);
for (;;) {

@@ -2473,6 +2504,7 @@
conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
INIT_LIST_HEAD(&conf->retry_list);
+ INIT_LIST_HEAD(&conf->bio_end_io_list);

spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -43,6 +43,11 @@
* block, or anything else.
*/
struct list_head retry_list;
+ /* A separate list of r1bio which just need raid_end_bio_io called.
+ * This mustn't happen for writes which had any errors if the superblock
+ * needs to be written.
+ */
+ struct list_head bio_end_io_list;

/* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list;
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1280,6 +1280,7 @@
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
printk(KERN_ALERT
"md/raid10:%s: Disk failure on %s, disabling device.\n"
"md/raid10:%s: Operation continuing on %d devices.\n",
@@ -2215,6 +2216,7 @@
}
put_buf(r10_bio);
} else {
+ bool fail = false;
for (m = 0; m < conf->copies; m++) {
int dev = r10_bio->devs[m].devnum;
struct bio *bio = r10_bio->devs[m].bio;
@@ -2227,6 +2229,7 @@
rdev_dec_pending(rdev, conf->mddev);
} else if (bio != NULL &&
!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ fail = true;
if (!narrow_write_error(r10_bio, m)) {
md_error(conf->mddev, rdev);
set_bit(R10BIO_Degraded,
@@ -2235,10 +2238,17 @@
rdev_dec_pending(rdev, conf->mddev);
}
}
- if (test_bit(R10BIO_WriteError,
- &r10_bio->state))
- close_write(r10_bio);
- raid_end_bio_io(r10_bio);
+ if (fail) {
+ spin_lock_irq(&conf->device_lock);
+ list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(conf->mddev->thread);
+ } else {
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+ close_write(r10_bio);
+ raid_end_bio_io(r10_bio);
+ }
}
}

@@ -2252,6 +2262,29 @@

md_check_recovery(mddev);

+ if (!list_empty_careful(&conf->bio_end_io_list) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ LIST_HEAD(tmp);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+ list_add(&tmp, &conf->bio_end_io_list);
+ list_del_init(&conf->bio_end_io_list);
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ while (!list_empty(&tmp)) {
+ r10_bio = list_first_entry(&conf->bio_end_io_list,
+ struct r10bio, retry_list);
+ list_del(&r10_bio->retry_list);
+ if (mddev->degraded)
+ set_bit(R10BIO_Degraded, &r10_bio->state);
+
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+ close_write(r10_bio);
+ raid_end_bio_io(r10_bio);
+ }
+ }
+
blk_start_plug(&plug);
for (;;) {

@@ -2860,6 +2893,7 @@

spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
+ INIT_LIST_HEAD(&conf->bio_end_io_list);

spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -40,6 +40,12 @@
sector_t chunk_mask;

struct list_head retry_list;
+ /* A separate list of r1bio which just need raid_end_bio_io called.
+ * This mustn't happen for writes which had any errors if the superblock
+ * needs to be written.
+ */
+ struct list_head bio_end_io_list;
+
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
int pending_count;
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -943,6 +943,11 @@
goto bad;
}

+ if (data_size > ubi->leb_size) {
+ ubi_err("bad data_size");
+ goto bad;
+ }
+
if (vol_type == UBI_VID_STATIC) {
/*
* Although from high-level point of view static volumes may
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -656,6 +656,7 @@
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
+ return -ENOSPC;
}
ubi->rsvd_pebs += reserved_pebs;
ubi->avail_pebs -= reserved_pebs;
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1512,6 +1512,7 @@
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
+ err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= WL_RESERVED_PEBS;
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -319,7 +319,6 @@
if (po->pppoe_dev == dev &&
sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
pppox_unbind_sock(sk);
- sk->sk_state = PPPOX_ZOMBIE;
sk->sk_state_change(sk);
po->pppoe_dev = NULL;
dev_put(dev);
@@ -576,7 +575,7 @@

po = pppox_sk(sk);

- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ if (po->pppoe_dev) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1132,19 +1132,7 @@
return ret;
}

- ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
- if (ret < 0)
- return ret;
-
- msleep(150);
-
- ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
- if (ret < 0)
- return ret;
-
- msleep(150);
-
- ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
+ ax88772_reset(dev);

/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);
@@ -1555,7 +1543,7 @@
.unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88772_link_reset,
- .reset = ax88772_reset,
+ .reset = ax88772_link_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -708,6 +708,7 @@
hw->max_rate_tries = 10;
hw->sta_data_size = sizeof(struct ath_node);
hw->vif_data_size = sizeof(struct ath_vif);
+ hw->extra_tx_headroom = 4;

hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2001,7 +2001,7 @@
u8 *pn = seq.ccmp.pn;

ieee80211_get_key_rx_seq(key, i, &seq);
- aes_sc->pn = cpu_to_le64(
+ aes_sc[i].pn = cpu_to_le64(
(u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -225,6 +225,17 @@
.llseek = noop_llseek,
};

+/*
+ * The controllers use an inline buffer instead of a mapped SGL for small,
+ * single entry buffers. Note that we treat a zero-length transfer like
+ * a mapped SGL.
+ */
+static bool twa_command_mapped(struct scsi_cmnd *cmd)
+{
+ return scsi_sg_count(cmd) != 1 ||
+ scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
+}
+
/* This function will complete an aen request from the isr */
static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
{
@@ -1351,7 +1362,8 @@
}

/* Now complete the io */
- scsi_dma_unmap(cmd);
+ if (twa_command_mapped(cmd))
+ scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
@@ -1594,7 +1606,8 @@
struct scsi_cmnd *cmd = tw_dev->srb[i];

cmd->result = (DID_RESET << 16);
- scsi_dma_unmap(cmd);
+ if (twa_command_mapped(cmd))
+ scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
}
}
@@ -1777,12 +1790,14 @@
retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
switch (retval) {
case SCSI_MLQUEUE_HOST_BUSY:
- scsi_dma_unmap(SCpnt);
+ if (twa_command_mapped(SCpnt))
+ scsi_dma_unmap(SCpnt);
twa_free_request_id(tw_dev, request_id);
break;
case 1:
SCpnt->result = (DID_ERROR << 16);
- scsi_dma_unmap(SCpnt);
+ if (twa_command_mapped(SCpnt))
+ scsi_dma_unmap(SCpnt);
done(SCpnt);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
@@ -1843,8 +1858,7 @@
/* Map sglist from scsi layer to cmd packet */

if (scsi_sg_count(srb)) {
- if ((scsi_sg_count(srb) == 1) &&
- (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+ if (!twa_command_mapped(srb)) {
if (srb->sc_data_direction == DMA_TO_DEVICE ||
srb->sc_data_direction == DMA_BIDIRECTIONAL)
scsi_sg_copy_to_buffer(srb,
@@ -1917,7 +1931,7 @@
{
struct scsi_cmnd *cmd = tw_dev->srb[request_id];

- if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+ if (!twa_command_mapped(cmd) &&
(cmd->sc_data_direction == DMA_FROM_DEVICE ||
cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
if (scsi_sg_count(cmd) == 1) {
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -991,6 +991,8 @@
static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
struct mvs_slot_info *slot, u32 slot_idx)
{
+ if (!slot)
+ return;
if (!slot->task)
return;
if (!sas_protocol_ata(task->task_proto))
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -120,7 +120,7 @@
if (ret)
goto error_ret;

- for (i = 0; i < num_read; i++)
+ for (i = 0; i < num_read / sizeof(u16); i++)
*(((u16 *)rx) + i) = be16_to_cpup((u16 *)rx + i);

if (copy_to_user(buf, rx, num_read))
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1298,8 +1298,7 @@
tty->canon_data++;
spin_unlock_irqrestore(&tty->read_lock, flags);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- if (waitqueue_active(&tty->read_wait))
- wake_up_interruptible(&tty->read_wait);
+ wake_up_interruptible(&tty->read_wait);
return;
}
}
@@ -1422,8 +1421,7 @@
if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- if (waitqueue_active(&tty->read_wait))
- wake_up_interruptible(&tty->read_wait);
+ wake_up_interruptible(&tty->read_wait);
}

/*
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1985,8 +1985,24 @@
if (!noctty &&
current->signal->leader &&
!current->signal->tty &&
- tty->session == NULL)
- __proc_set_tty(current, tty);
+ tty->session == NULL) {
+ /*
+ * Don't let a process that only has write access to the tty
+ * obtain the privileges associated with having a tty as
+ * controlling terminal (being able to reopen it with full
+ * access through /dev/tty, being able to perform pushback).
+ * Many distributions set the group of all ttys to "tty" and
+ * grant write-only access to all terminals for setgid tty
+ * binaries, which should not imply full privileges on all ttys.
+ *
+ * This could theoretically break old code that performs open()
+ * on a write-only file descriptor. In that case, it might be
+ * necessary to also permit this if
+ * inode_permission(inode, MAY_READ) == 0.
+ */
+ if (filp->f_mode & FMODE_READ)
+ __proc_set_tty(current, tty);
+ }
spin_unlock_irq(&current->sighand->siglock);
tty_unlock();
mutex_unlock(&tty_mutex);
@@ -2266,7 +2282,7 @@
* Takes ->siglock() when updating signal->tty
*/

-static int tiocsctty(struct tty_struct *tty, int arg)
+static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
{
int ret = 0;
if (current->signal->leader && (task_session(current) == tty->session))
@@ -2299,6 +2315,13 @@
goto unlock;
}
}
+
+ /* See the comment in tty_open(). */
+ if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
proc_set_tty(current, tty);
unlock:
mutex_unlock(&tty_mutex);
@@ -2653,7 +2676,7 @@
no_tty();
return 0;
case TIOCSCTTY:
- return tiocsctty(tty, arg);
+ return tiocsctty(tty, file, arg);
case TIOCGPGRP:
return tiocgpgrp(tty, real_tty, p);
case TIOCSPGRP:
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -53,6 +53,13 @@
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },

+ /* Logitech ConferenceCam CC3000e */
+ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
+
+ /* Logitech PTZ Pro Camera */
+ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },

@@ -77,6 +84,12 @@
/* Philips PSC805 audio device */
{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },

+ /* Plantronic Audio 655 DSP */
+ { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Plantronic Audio 648 USB */
+ { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Artisman Watchdog Dongle */
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -123,6 +123,11 @@
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2187,6 +2187,10 @@
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) {
+ if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
+ return finish_td(xhci, td, event_trb, event, ep,
+ status, false);
+
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length =
td->urb->transfer_buffer_length -
@@ -2238,6 +2242,12 @@
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ if (trb_comp_code == COMP_SHORT_TX) {
+ xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
+ td->urb_length_set = true;
+ return 0;
+ }
}

return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2268,6 +2278,7 @@
u32 trb_comp_code;
int ret = 0;
int td_num = 0;
+ bool handling_skipped_tds = false;

slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
@@ -2381,6 +2392,10 @@
ep->skip = true;
xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
goto cleanup;
+ case COMP_PING_ERR:
+ ep->skip = true;
+ xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
+ goto cleanup;
default:
if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
@@ -2512,13 +2527,18 @@
ep, &status);

cleanup:
+
+
+ handling_skipped_tds = ep->skip &&
+ trb_comp_code != COMP_MISSED_INT &&
+ trb_comp_code != COMP_PING_ERR;
+
/*
- * Do not update event ring dequeue pointer if ep->skip is set.
- * Will roll back to continue process missed tds.
+ * Do not update event ring dequeue pointer if we're in a loop
+ * processing missed tds.
*/
- if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
+ if (!handling_skipped_tds)
inc_deq(xhci, xhci->event_ring, true);
- }

if (ret) {
urb = td->urb;
@@ -2553,7 +2573,7 @@
* Process them as short transfer until reach the td pointed by
* the event.
*/
- } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
+ } while (handling_skipped_tds);

return 0;
}
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1873,7 +1873,6 @@
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
struct cifs_tcon *pTcon = NULL;
- struct cifs_io_parms io_parms;

/*
* To avoid spurious oplock breaks from server, in the case of
@@ -1893,18 +1892,6 @@
npid, false);
cifsFileInfo_put(open_file);
cFYI(1, "SetFSize for attrs rc = %d", rc);
- if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
- unsigned int bytes_written;
-
- io_parms.netfid = nfid;
- io_parms.pid = npid;
- io_parms.tcon = pTcon;
- io_parms.offset = 0;
- io_parms.length = attrs->ia_size;
- rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
- NULL, NULL, 1);
- cFYI(1, "Wrt seteof rc %d", rc);
- }
} else
rc = -EINVAL;

@@ -1925,31 +1912,7 @@
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cFYI(1, "SetEOF by path (setattrs) rc = %d", rc);
- if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
- __u16 netfid;
- int oplock = 0;

- rc = SMBLegacyOpen(xid, pTcon, full_path,
- FILE_OPEN, GENERIC_WRITE,
- CREATE_NOT_DIR, &netfid, &oplock, NULL,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc == 0) {
- unsigned int bytes_written;
-
- io_parms.netfid = netfid;
- io_parms.pid = current->tgid;
- io_parms.tcon = pTcon;
- io_parms.offset = 0;
- io_parms.length = attrs->ia_size;
- rc = CIFSSMBWrite(xid, &io_parms,
- &bytes_written,
- NULL, NULL, 1);
- cFYI(1, "wrt seteof rc %d", rc);
- CIFSSMBClose(xid, pTcon, netfid);
- }
- }
if (tlink)
cifs_put_tlink(tlink);
}
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1192,7 +1192,7 @@
}
spin_unlock(&state->state_lock);
nfs4_put_open_state(state);
- clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
+ clear_bit(NFS_STATE_RECLAIM_NOGRACE,
&state->flags);
goto restart;
}
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1690,7 +1690,7 @@
}


-static int pid_alive(const struct task_struct *p);
+static inline int pid_alive(const struct task_struct *p);
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2045,6 +2045,9 @@
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
}

unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
@@ -2131,7 +2134,8 @@
int size);
extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
int hlen,
- struct iovec *iov);
+ struct iovec *iov,
+ int len);
extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
int offset,
const struct iovec *from,
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -119,7 +119,7 @@
#define WM8904_MIC_REGS 2
#define WM8904_GPIO_REGS 4
#define WM8904_DRC_REGS 4
-#define WM8904_EQ_REGS 25
+#define WM8904_EQ_REGS 24

/**
* DRC configurations are specified with a label and a set of register
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/mutex.h>

#include "internals.h"

@@ -326,18 +327,29 @@

void register_irq_proc(unsigned int irq, struct irq_desc *desc)
{
+ static DEFINE_MUTEX(register_lock);
char name [MAX_NAMELEN];

- if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
+ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
return;

+ /*
+ * irq directories are registered only when a handler is
+ * added, not when the descriptor is created, so multiple
+ * tasks might try to register at the same time.
+ */
+ mutex_lock(&register_lock);
+
+ if (desc->dir)
+ goto out_unlock;
+
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%d", irq);

/* create /proc/irq/1234 */
desc->dir = proc_mkdir(name, root_irq_dir);
if (!desc->dir)
- return;
+ goto out_unlock;

#ifdef CONFIG_SMP
/* create /proc/irq/<irq>/smp_affinity */
@@ -358,6 +370,9 @@

proc_create_data("spurious", 0444, desc->dir,
&irq_spurious_proc_fops, (void *)(long)irq);
+
+out_unlock:
+ mutex_unlock(&register_lock);
}

void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -893,11 +893,15 @@
if (core_kernel_text(a))
return;

- /* module_text_address is safe here: we're supposed to have reference
- * to module from symbol_get, so it can't go away. */
+ /*
+ * Even though we hold a reference on the module; we still need to
+ * disable preemption in order to safely traverse the data structure.
+ */
+ preempt_disable();
modaddr = __module_text_address(a);
BUG_ON(!modaddr);
module_put(modaddr);
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(symbol_put_addr);

--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1016,8 +1016,10 @@
* After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
+ *
+ * Pairs with the control dependency and rmb in try_to_wake_up().
*/
- smp_wmb();
+ smp_mb();
prev->on_cpu = 0;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
@@ -3191,11 +3193,11 @@
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
- * The test for TASK_DEAD must occur while the runqueue locks are
- * still held, otherwise prev could be scheduled on another cpu, die
- * there before we look at prev->state, and then the reference would
- * be dropped twice.
- * Manfred Spraul <manfred@colorfullife.com>
+ *
+ * We must observe prev->state before clearing prev->on_cpu (in
+ * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * running on another CPU and we could rave with its RUNNING -> DEAD
+ * transition, resulting in a double drop.
*/
prev_state = prev->state;
finish_arch_switch(prev);
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -291,7 +291,7 @@
continue;

/* Check the deviation from the watchdog clocksource. */
- if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
+ if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
clocksource_unstable(cs, cs_nsec - wd_nsec);
continue;
}
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2408,6 +2408,11 @@
break;
}

+ if (fatal_signal_pending(current)) {
+ status = -EINTR;
+ break;
+ }
+
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata);
if (unlikely(status))
@@ -2448,10 +2453,6 @@
written += copied;

balance_dirty_pages_ratelimited(mapping);
- if (fatal_signal_pending(current)) {
- status = -EINTR;
- break;
- }
} while (iov_iter_count(i));

return written ? written : status;
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2502,6 +2502,14 @@
continue;

/*
+ * Shared VMAs have their own reserves and do not affect
+ * MAP_PRIVATE accounting but it is possible that a shared
+ * VMA is using the same page so check and skip such VMAs.
+ */
+ if (iter_vma->vm_flags & VM_MAYSHARE)
+ continue;
+
+ /*
* Unmap the page from other VMAs without their own reserves.
* They get marked to be SIGKILLed if they fault in these
* areas. This is because a future no-page fault on this VMA
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -709,6 +709,7 @@
* @skb: skbuff
* @hlen: hardware length
* @iov: io vector
+ * @len: amount of data to copy from skb to iov
*
* Caller _must_ check that skb will fit to this iovec.
*
@@ -718,11 +719,14 @@
* can be modified!
*/
int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
- int hlen, struct iovec *iov)
+ int hlen, struct iovec *iov, int len)
{
__wsum csum;
int chunk = skb->len - hlen;

+ if (chunk > len)
+ chunk = len;
+
if (!chunk)
return 0;

--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1359,7 +1359,7 @@

gstrings.len = ret;

- data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
if (!data)
return -ENOMEM;

--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2579,11 +2579,12 @@
*/
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
{
+ unsigned char *data = skb->data;
+
BUG_ON(len > skb->len);
- skb->len -= len;
- BUG_ON(skb->len < skb->data_len);
- skb_postpull_rcsum(skb, skb->data, len);
- return skb->data += len;
+ __skb_pull(skb, len);
+ skb_postpull_rcsum(skb, data, len);
+ return skb->data;
}
EXPORT_SYMBOL_GPL(skb_pull_rcsum);

--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5198,7 +5198,7 @@
err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
else
err = skb_copy_and_csum_datagram_iovec(skb, hlen,
- tp->ucopy.iov);
+ tp->ucopy.iov, chunk);

if (!err) {
tp->ucopy.len -= chunk;
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1207,7 +1207,7 @@
else {
err = skb_copy_and_csum_datagram_iovec(skb,
sizeof(struct udphdr),
- msg->msg_iov);
+ msg->msg_iov, copied);

if (err == -EINVAL)
goto csum_copy_err;
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -479,7 +479,7 @@
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else {
- err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
+ err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err == -EINVAL)
goto csum_copy_err;
}
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -383,7 +383,8 @@
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
msg->msg_iov, copied );
else {
- err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
+ err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr),
+ msg->msg_iov, copied);
if (err == -EINVAL)
goto csum_copy_err;
}
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -185,7 +185,8 @@
msg->msg_iov, copy);
} else {
ret = skb_copy_and_csum_datagram_iovec(skb, offset,
- msg->msg_iov);
+ msg->msg_iov,
+ copy);
if (ret == -EINVAL)
goto csum_copy_error;
}
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -172,6 +172,12 @@
{
key_check(key);

+ /* Throw away the key data if the key is instantiated */
+ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
+ !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
+ key->type->destroy)
+ key->type->destroy(key);
+
security_key_free(key);

/* deal with the user's key tracking and quota */
@@ -186,10 +192,6 @@
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);

- /* now throw away the key memory */
- if (key->type->destroy)
- key->type->destroy(key);
-
key_user_put(key->user);

kfree(key->description);
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -4495,6 +4495,7 @@
SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
--- a/sound/synth/emux/emux_oss.c
+++ b/sound/synth/emux/emux_oss.c
@@ -69,7 +69,8 @@
struct snd_seq_oss_reg *arg;
struct snd_seq_device *dev;

- if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
+ /* using device#1 here for avoiding conflicts with OPL3 */
+ if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
sizeof(struct snd_seq_oss_reg), &dev) < 0)
return;
[unhandled content-type:application/pgp-signature]
\
 
 \ /
  Last update: 2015-11-18 00:01    [W:0.670 / U:0.200 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site