lkml.org 
[lkml]   [2017]   [Feb]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectLinux 3.2.85
I'm announcing the release of the 3.2.85 kernel.

All users of the 3.2 kernel series should upgrade.

The updated 3.2.y git tree can be found at:
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-3.2.y
and can be browsed at the normal kernel.org git web browser:
https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git

The diff from 3.2.84 is attached to this message.

Ben.

------------

Makefile | 2 +-
arch/arm/mm/dma-mapping.c | 4 +-
arch/parisc/kernel/syscall.S | 11 +-
arch/powerpc/kernel/idle_power7.S | 2 +-
arch/powerpc/kernel/nvram_64.c | 6 +-
arch/powerpc/kernel/vdso64/datapage.S | 2 +-
arch/powerpc/kernel/vdso64/gettimeofday.S | 2 +-
arch/powerpc/lib/copyuser_64.S | 2 +-
arch/x86/include/asm/uaccess.h | 10 +-
arch/x86/kvm/emulate.c | 36 +--
arch/x86/kvm/x86.c | 17 +-
block/bsg.c | 3 +
crypto/gcm.c | 2 +-
drivers/acpi/apei/ghes.c | 2 +-
drivers/firewire/net.c | 8 +-
drivers/hid/hid-core.c | 3 +
drivers/hid/hid-ids.h | 2 +
drivers/hid/usbhid/hid-quirks.c | 2 +
drivers/hv/hv_util.c | 10 +-
drivers/i2c/i2c-core.c | 2 +-
drivers/infiniband/core/uverbs_main.c | 7 +-
drivers/infiniband/hw/mlx4/cq.c | 5 +-
drivers/input/serio/i8042-x86ia64io.h | 7 +
drivers/iommu/amd_iommu.c | 3 +
drivers/isdn/gigaset/ser-gigaset.c | 15 +-
drivers/md/dm-table.c | 24 +-
drivers/media/dvb/dvb-usb/dib0700_core.c | 5 +-
drivers/media/dvb/frontends/mb86a20s.c | 12 +-
drivers/media/media-device.c | 3 +
drivers/media/video/cx231xx/cx231xx-avcore.c | 5 +-
drivers/media/video/cx231xx/cx231xx-cards.c | 2 +-
drivers/media/video/cx231xx/cx231xx-core.c | 3 +-
drivers/mfd/mfd-core.c | 2 +
drivers/mmc/card/block.c | 3 +-
drivers/mmc/host/mxs-mmc.c | 4 +-
drivers/net/ethernet/cirrus/ep93xx_eth.c | 4 +
drivers/net/ethernet/mellanox/mlx4/cmd.c | 19 +-
drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 7 +
drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 +
drivers/net/wireless/mwifiex/cfg80211.c | 13 +-
drivers/net/wireless/rtlwifi/regd.c | 46 +++-
drivers/net/wireless/rtlwifi/regd.h | 1 +
drivers/regulator/tps65910-regulator.c | 6 +
drivers/s390/char/con3270.c | 11 +-
drivers/s390/scsi/zfcp_dbf.c | 162 ++++++++++--
drivers/s390/scsi/zfcp_dbf.h | 14 +-
drivers/s390/scsi/zfcp_erp.c | 12 +-
drivers/s390/scsi/zfcp_ext.h | 8 +-
drivers/s390/scsi/zfcp_fsf.c | 22 +-
drivers/s390/scsi/zfcp_fsf.h | 4 +-
drivers/s390/scsi/zfcp_scsi.c | 8 +-
drivers/scsi/arcmsr/arcmsr_hba.c | 9 -
drivers/scsi/ibmvscsi/ibmvfc.c | 1 -
drivers/scsi/megaraid/megaraid_sas.h | 2 +-
drivers/scsi/megaraid/megaraid_sas_base.c | 13 +-
drivers/scsi/scsi_debug.c | 1 +
drivers/scsi/scsi_scan.c | 2 +-
drivers/scsi/sg.c | 8 +-
drivers/staging/iio/impedance-analyzer/ad5933.c | 17 +-
drivers/tty/tty_ldisc.c | 7 +
drivers/tty/vt/vt.c | 7 +-
drivers/usb/gadget/u_ether.c | 7 -
drivers/usb/host/xhci-pci.c | 4 +-
drivers/usb/misc/legousbtower.c | 35 ++-
drivers/usb/serial/cp210x.c | 2 +
drivers/usb/serial/ftdi_sio.c | 2 +
drivers/usb/serial/ftdi_sio_ids.h | 6 +
drivers/usb/storage/transport.c | 7 +-
drivers/uwb/lc-rc.c | 16 +-
drivers/video/efifb.c | 6 +-
drivers/video/fbcmap.c | 26 +-
fs/exec.c | 6 +-
fs/ext4/ext4.h | 1 +
fs/ext4/inode.c | 8 +-
fs/ext4/namei.c | 14 +-
fs/ext4/super.c | 17 +-
fs/fuse/dir.c | 63 ++++-
fs/ioprio.c | 2 +
fs/isofs/inode.c | 8 +-
fs/nfs/nfs4state.c | 3 +
fs/reiserfs/super.c | 12 +-
fs/ubifs/dir.c | 16 +-
fs/ubifs/xattr.c | 2 +
include/linux/can.h | 1 +
include/linux/filter.h | 6 +-
include/linux/lockdep.h | 2 +-
include/net/sock.h | 10 +-
include/net/tcp.h | 1 +
kernel/events/core.c | 317 ++++++++++++++++++++----
kernel/rtmutex.c | 68 ++++-
lib/genalloc.c | 3 +-
mm/swapfile.c | 2 +
net/can/bcm.c | 32 ++-
net/can/raw.c | 3 +
net/core/filter.c | 10 +-
net/core/rtnetlink.c | 1 +
net/core/sock.c | 68 +++--
net/dccp/ipv4.c | 2 +-
net/dccp/ipv6.c | 3 +-
net/ipv4/ping.c | 4 +
net/ipv4/tcp_ipv4.c | 19 +-
net/ipv6/ip6_output.c | 2 +-
net/ipv6/ip6_tunnel.c | 1 +
net/ipv6/tcp_ipv6.c | 6 +-
net/mac80211/rx.c | 24 +-
net/netfilter/nf_conntrack_core.c | 7 +
net/netfilter/nf_conntrack_sip.c | 5 +-
net/packet/af_packet.c | 18 +-
net/rose/rose_in.c | 3 +-
net/sctp/sm_sideeffect.c | 16 +-
net/sctp/sm_statefuns.c | 12 +-
net/tipc/bearer.h | 16 ++
net/tipc/eth_media.c | 12 +-
sound/core/pcm_lib.c | 2 +-
sound/pci/ali5451/ali5451.c | 2 +
sound/usb/mixer_quirks.c | 22 +-
sound/usb/quirks-table.h | 17 ++
117 files changed, 1211 insertions(+), 403 deletions(-)

Al Viro (1):
sg_write()/bsg_write() is not fit to be called under KERNEL_DS

Andrey Ryabinin (1):
coredump: fix unfreezable coredumping task

Anssi Hannula (1):
ALSA: usb-audio: Extend DragonFly dB scale quirk to cover other variants

Anton Blanchard (1):
powerpc/vdso64: Use double word compare on pointers

Arnd Bergmann (1):
staging: iio: ad5933: avoid uninitialized variable in error case

Baoquan He (1):
iommu/amd: Free domain id when free a domain of struct dma_ops_domain

Ben Hutchings (2):
net: Add __sock_queue_rcv_skb()
Linux 3.2.85

Benjamin Tissoires (1):
HID: core: prevent out-of-bound readings

Brian King (1):
scsi: ibmvfc: Fix I/O hang when port is not mapped

Brian Norris (1):
mwifiex: printk() overflow with 32-byte SSIDs

Calvin Owens (1):
sg: Fix double-free when drives detach during SG_IO

Ching Huang (1):
scsi: arcmsr: Send SYNCHRONIZE_CACHE command to firmware

Daeho Jeong (1):
ext4: reinforce check of i_dtime when clearing high fields of uid and gid

Dan Carpenter (3):
scsi: zfcp: spin_lock_irqsave() is not nestable
ser_gigaset: return -ENOMEM on error instead of success
media: info leak in __media_device_enum_links()

Daniel Glöckner (1):
mmc: block: don't use CMD23 with very old MMC cards

Daniel Mentz (1):
lib/genalloc.c: start search from start of chunk

Dmitry Vyukov (1):
tty: limit terminal size to 4M chars

Doug Brown (1):
USB: serial: ftdi_sio: add support for TI CC3200 LaunchPad

Eli Cooper (1):
ip6_tunnel: Clear IP6CB in ip6tunnel_xmit()

Erez Shitrit (1):
net/mlx4_en: Process all completions in RX rings after port goes up

Eric Dumazet (4):
ipv6: dccp: add missing bind_conflict to dccp_ipv6_mapped
net: cleanups in sock_setsockopt()
tcp: take care of truncations done by sk_filter()
net: avoid signed overflows for SO_{SND|RCV}BUFFORCE

Ewan D. Milne (1):
scsi: scsi_debug: Fix memory leak if LBP enabled and module is unloaded

Fabio Estevam (1):
mmc: mxs: Initialize the spinlock prior to using it

Felipe Balbi (1):
usb: gadget: u_ether: remove interrupt throttling

Florian Fainelli (1):
net: ep93xx_eth: Do not crash unloading module

Florian Westphal (1):
netfilter: restart search if moved to other chain

Greg Kroah-Hartman (1):
usb: misc: legousbtower: Fix NULL pointer deference

Ido Yariv (1):
KVM: x86: fix wbinvd_dirty_mask use-after-free

Ignacio Alvarado (1):
KVM: Disable irq while unregistering user notifier

Jack Morgenstein (1):
net/mlx4_core: Fix deadlock when switching between polling and event fw commands

Jakub Sitnicki (1):
ipv6: Don't use ufo handling on later transformed packets

Jan Kara (1):
isofs: Do not return EACCES for unknown filesystems

Jan Remmet (1):
regulator: tps65910: Work around silicon erratum SWCZ010

Jann Horn (1):
swapfile: fix memory corruption via malformed swapfile

Jiri Slaby (1):
tty: vt, fix bogus division in csi_J

Johan Hovold (2):
uwb: fix device reference leaks
mfd: core: Fix device reference leak in mfd_clone_cell

Johannes Berg (1):
mac80211: discard multicast and 4-addr A-MSDUs

John David Anglin (1):
parisc: Ensure consistent state when switching to kernel stack at syscall entry

Kashyap Desai (1):
scsi: megaraid_sas: Fix data integrity failure for JBOD (passthrough) devices

Kees Cook (2):
net: ping: check minimum size on ICMP header length
fbdev: color map copying bounds checking

Kyle Jones (1):
USB: serial: cp210x: Add ID for a Juniper console

Larry Finger (1):
rtlwifi: Fix missing country code for Great Britain

Laura Abbott (1):
HID: usbhid: Add HID_QUIRK_NOGET for Aten DVI KVM switch

Linus Torvalds (1):
Fix potential infoleak in older kernels

Long Li (1):
hv: do not lose pending heartbeat vmbus packets

Marc Kleine-Budde (1):
can: raw: raw_setsockopt: limit number of can_filter that can be set

Marcel Hasler (1):
ALSA: usb-audio: Add quirk for Syntek STK1160

Marcelo Ricardo Leitner (1):
sctp: validate chunk len before actually using it

Matan Barak (1):
IB/mlx4: Fix create CQ error flow

Mathias Krause (1):
rtnl: reset calcit fptr in rtnl_unregister()

Mathias Nyman (1):
xhci: add restart quirk for Intel Wildcatpoint PCH

Mauro Carvalho Chehab (3):
mb86a20s: fix the locking logic
cx231xx: don't return error on success
cx231xx: fix GPIOs for Pixelview SBTVD hybrid

Max Staudt (1):
fbdev/efifb: Fix 16 color palette entry calculation

Michael Ellerman (1):
perf: Fix perf_event_for_each() to use sibling

Michal Kubeček (1):
tipc: check minimum bearer MTU

Mike Galbraith (1):
reiserfs: Unlock superblock before calling reiserfs_quota_on_mount()

Miklos Szeredi (4):
fuse: invalidate dir dentry after chmod
fuse: fix killing s[ug]id in setattr
fuse: listxattr: verify xattr list
fuse: fix clearing suid, sgid for chown()

Ming Lei (1):
scsi: Fix use-after-free

Oliver Hartkopp (1):
can: bcm: fix warning in bcm_connect/proc_register

Oliver Neukum (1):
HID: usbhid: add ATEN CS962 to list of quirky devices

Omar Sandoval (1):
block: fix use-after-free in sys_ioprio_get()

Ondrej Mosnáček (1):
crypto: gcm - Fix IV buffer size in crypto_gcm_setkey

Pan Xinhui (1):
powerpc/nvram: Fix an incorrect partition merge

Patrick Scheuring (1):
Input: i8042 - add XMG C504 to keyboard reset table

Paul Bolle (1):
lockdep: Silence warning if CONFIG_LOCKDEP isn't set

Paul Jakma (1):
USB: serial: cp210x: add ID for the Zone DPMX

Paul Mackerras (1):
powerpc/64: Fix incorrect return value from __copy_tofrom_user

Peter Hurley (1):
tty: Prevent ldisc drivers from re-using stale tty fields

Peter Zijlstra (4):
perf: Fix race in swevent hash
perf: Fix event->ctx locking
perf: Do not double free
perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race

Petr Vandrovec (1):
Fix USB CB/CBI storage devices with CONFIG_VMAP_STACK=y

Philip Pettersson (1):
packet: fix race condition in packet_set_ring

Punit Agrawal (1):
ACPI / APEI: Fix incorrect return value of ghes_proc()

Radim Krčmář (1):
KVM: x86: drop error recovery in em_jmp_far and em_ret_far

Richard Weinberger (3):
ubifs: Fix xattr_names length in exit paths
ubifs: Abort readdir upon error
ubifs: Fix regression in ubifs_readdir()

Russell King (1):
ARM: dma-mapping: don't allow DMA mappings to be marked executable

Sascha Silbe (2):
s390/con3270: fix use of uninitialised data
s390/con3270: fix insufficient space padding

Scot Doyle (1):
vt: clear selection before resizing

Sean Young (1):
dib0700: fix nec repeat handling

Segher Boessenkool (1):
powerpc: Convert cmp to cmpd in idle enter sequence

Shao Fu (1):
rtlwifi: Update regulatory database

Stefan Richter (1):
firewire: net: fix fragmented datagram_size off-by-one

Steffen Maier (10):
zfcp: fix fc_host port_type with NPIV
zfcp: fix ELS/GS request&response length for hardware data router
zfcp: close window with unblocked rport during rport gone
zfcp: retain trace level for SCSI and HBA FSF response records
zfcp: restore: Dont use 0 to indicate invalid LUN in rec trace
zfcp: trace on request for open and close of WKA port
zfcp: restore tracing of handle for port and LUN with HBA records
zfcp: fix D_ID field with actual value on tracing SAN responses
zfcp: fix payload trace length for SAN request&response
zfcp: trace full payload of all SAN records (req,resp,iels)

Sumit Saxena (1):
scsi: megaraid_sas: fix macro MEGASAS_IS_LOGICAL to avoid regression

Takashi Iwai (2):
ALSA: ali5451: Fix out-of-bound position reporting
ALSA: pcm : Call kill_fasync() in stream lock

Tariq Toukan (1):
IB/uverbs: Fix leak of XRC target QPs

Theodore Ts'o (1):
ext4: sanity check the block and cluster size at mount time

Thomas Gleixner (1):
locking/rtmutex: Prevent dequeue vs. unlock race

Tilman Schmidt (1):
isdn/gigaset: reset tty->receive_room when attaching ser_gigaset

Trond Myklebust (1):
NFSv4: Open state recovery must account for file permission changes

Ulrich Weber (1):
netfilter: nf_conntrack_sip: extend request line validation

Vladimir Zapolskiy (1):
i2c: core: fix NULL pointer dereference under race condition

Willem de Bruijn (2):
rose: limit sk_filter trim to payload
dccp: limit sk_filter trim to payload

Xin Long (1):
sctp: do not return the transmit err back to sctp_sendmsg

gmail (1):
ext4: release bh in make_indexed_dir

tang.junhui (1):
dm table: fix missing dm_put_target_type() in dm_table_add_target()

diff --git a/Makefile b/Makefile
index ba25eb96c9a2..6858bcc57239 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 2
-SUBLEVEL = 84
+SUBLEVEL = 85
EXTRAVERSION =
NAME = Saber-toothed Squirrel

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e1dd92c04939..f79610107988 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -374,7 +374,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
return memory;

return __dma_alloc(dev, size, handle, gfp,
- pgprot_dmacoherent(pgprot_kernel));
+ pgprot_dmacoherent(PAGE_KERNEL));
}
EXPORT_SYMBOL(dma_alloc_coherent);

@@ -386,7 +386,7 @@ void *
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
return __dma_alloc(dev, size, handle, gfp,
- pgprot_writecombine(pgprot_kernel));
+ pgprot_writecombine(PAGE_KERNEL));
}
EXPORT_SYMBOL(dma_alloc_writecombine);

diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index ed3df443e02d..c9cbada24969 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -82,8 +82,6 @@ linux_gateway_entry:
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
- mfsp %sr7,%r1 /* save user sr7 */
- mtsp %r1,%sr3 /* and store it in sr3 */

#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
@@ -109,6 +107,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21
1:
#endif
+
+ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
+ * by external interrupts.
+ */
+ mfsp %sr7,%r1 /* save user sr7 */
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
@@ -123,6 +129,7 @@ linux_gateway_entry:
*/

mtsp %r0,%sr7 /* get kernel space into sr7 */
+ ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 3a70845a51c7..7e49fdb57f3c 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -67,7 +67,7 @@ _GLOBAL(power7_idle)
std r0,0(r1)
ptesync
ld r0,0(r1)
-1: cmp cr0,r0,r0
+1: cmpd cr0,r0,r0
bne 1b
PPC_NAP
b .
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index bec1e930ed73..c2160489f9cb 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -280,7 +280,7 @@ int __init nvram_remove_partition(const char *name, int sig,

/* Make partition a free partition */
part->header.signature = NVRAM_SIG_FREE;
- strncpy(part->header.name, "wwwwwwwwwwww", 12);
+ memset(part->header.name, 'w', 12);
part->header.checksum = nvram_checksum(&part->header);
rc = nvram_write_header(part);
if (rc <= 0) {
@@ -298,8 +298,8 @@ int __init nvram_remove_partition(const char *name, int sig,
}
if (prev) {
prev->header.length += part->header.length;
- prev->header.checksum = nvram_checksum(&part->header);
- rc = nvram_write_header(part);
+ prev->header.checksum = nvram_checksum(&prev->header);
+ rc = nvram_write_header(prev);
if (rc <= 0) {
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
return rc;
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
index 79796de11737..3263ee23170d 100644
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ b/arch/powerpc/kernel/vdso64/datapage.S
@@ -57,7 +57,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
bl V_LOCAL_FUNC(__get_datapage)
mtlr r12
addi r3,r3,CFG_SYSCALL_MAP64
- cmpli cr0,r4,0
+ cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
li r0,__NR_syscalls
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index e97a9a0dc4ac..d0295cd6229e 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
bne cr0,99f

li r3,0
- cmpli cr0,r4,0
+ cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
lis r5,CLOCK_REALTIME_RES@h
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 578b625d6a3c..83f480148bf3 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -330,6 +330,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
addi r3,r3,8
171:
177:
+179:
addi r3,r3,8
370:
372:
@@ -344,7 +345,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
173:
174:
175:
-179:
181:
184:
186:
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 36361bf6fdd1..7bdc8f4075cd 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -347,7 +347,7 @@ do { \
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
- __get_user_asm_ex(x, ptr, "q", "", "=r")
+ __get_user_asm_ex(x, ptr, "q", "", "=&r")
#endif

#define __get_user_size(x, ptr, size, retval, errret) \
@@ -389,13 +389,13 @@ do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
+ __get_user_asm_ex(x, ptr, "b", "b", "=&q"); \
break; \
case 2: \
- __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
+ __get_user_asm_ex(x, ptr, "w", "w", "=&r"); \
break; \
case 4: \
- __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
+ __get_user_asm_ex(x, ptr, "l", "k", "=&r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
@@ -409,7 +409,7 @@ do { \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
- : ltype(x) : "m" (__m(addr)))
+ : ltype(x) : "m" (__m(addr)), "0" (0))

#define __put_user_nocheck(x, ptr, size) \
({ \
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9af0b8200d5a..73e065b787dd 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1699,16 +1699,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
- unsigned short sel, old_sel;
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
+ unsigned short sel;
+ struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);

- /* Assignment of RIP may only fail in 64-bit mode */
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
- VCPU_SREG_CS);
-
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);

rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
@@ -1717,12 +1711,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return rc;

rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- /* assigning eip failed; restore the old cs */
- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
- return rc;
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}

@@ -1876,14 +1868,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
- u16 old_cs;
int cpl = ctxt->ops->cpl(ctxt);
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
-
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
- VCPU_SREG_CS);
+ struct desc_struct new_desc;

rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
@@ -1899,10 +1885,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, new_desc.l);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4ae334a1bd99..4408aee8236d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -172,7 +172,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
+ unsigned long flags;

+ /*
+ * Disabling irqs at this point since the following code could be
+ * interrupted and executed through kvm_arch_hardware_disable()
+ */
+ local_irq_save(flags);
+ if (locals->registered) {
+ locals->registered = false;
+ user_return_notifier_unregister(urn);
+ }
+ local_irq_restore(flags);
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
@@ -180,8 +191,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
values->curr = values->host;
}
}
- locals->registered = false;
- user_return_notifier_unregister(urn);
}

static void shared_msr_update(unsigned slot, u32 msr)
@@ -6406,11 +6415,13 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+
kvmclock_reset(vcpu);

- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fx_free(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
+ free_cpumask_var(wbinvd_dirty_mask);
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
diff --git a/block/bsg.c b/block/bsg.c
index c0ab25c2acf2..2a019402723f 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -675,6 +675,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)

dprintk("%s: write %Zd bytes\n", bd->name, count);

+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
bsg_set_block(bd, file);

bytes_written = 0;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 9e47c4dfa91c..dd3bf30f260b 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -103,7 +103,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_ablkcipher *ctr = ctx->ctr;
struct {
be128 hash;
- u8 iv[8];
+ u8 iv[16];

struct crypto_gcm_setkey_result result;

diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b8e08cb67a18..e8c48999b8d7 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -656,7 +656,7 @@ static int ghes_proc(struct ghes *ghes)
ghes_do_proc(ghes->estatus);
out:
ghes_clear_estatus(ghes);
- return 0;
+ return rc;
}

static void ghes_add_timer(struct ghes *ghes)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 418c4da54fdc..791f053a2714 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -92,13 +92,13 @@ struct rfc2734_header {

#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)

-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)

#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@@ -722,7 +722,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+ dg_size = fwnet_get_hdr_dg_size(&hdr);

if (fg_off + len > dg_size)
return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 61660f8273fa..1ec8825313ba 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -989,6 +989,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
/* Ignore report if ErrorRollOver */
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
value[n] >= min && value[n] <= max &&
+ value[n] - min < field->maxusage &&
field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
goto exit;
}
@@ -1001,11 +1002,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
}

if (field->value[n] >= min && field->value[n] <= max
+ && field->value[n] - min < field->maxusage
&& field->usage[field->value[n] - min].hid
&& search(value, field->value[n], count))
hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);

if (value[n] >= min && value[n] <= max
+ && value[n] - min < field->maxusage
&& field->usage[value[n] - min].hid
&& search(field->value, value[n], count))
hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ccc89b0e14d9..219d35f61825 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -152,6 +152,8 @@
#define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
+#define USB_DEVICE_ID_ATEN_CS682 0x2213
+#define USB_DEVICE_ID_ATEN_CS692 0x8021

#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 71c258295045..759318993846 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -59,6 +59,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET },
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 55d58f21e6d4..3fe1ce222854 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -229,10 +229,14 @@ static void heartbeat_onchannelcallback(void *context)
struct heartbeat_msg_data *heartbeat_msg;
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;

- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ while (1) {
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (!recvlen)
+ break;

- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];

diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 1e5606185b4f..c3793a79fcd2 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1139,6 +1139,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);

/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -1157,7 +1158,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)

pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);

- INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver);

diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 9cdcb5038622..16616504defb 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -213,12 +213,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
container_of(uobj, struct ib_uqp_object, uevent.uobject);

idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
- if (qp != qp->real_qp) {
- ib_close_qp(qp);
- } else {
+ if (qp == qp->real_qp)
ib_uverbs_detach_umcast(qp, uqp);
- ib_destroy_qp(qp);
- }
+ ib_destroy_qp(qp);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index e8df155bc3b0..d83e4e0e3c08 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -233,11 +233,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
- goto err_dbmap;
+ goto err_cq_free;
}

return &cq->ibcq;

+err_cq_free:
+ mlx4_cq_free(dev->dev, &cq->mcq);
+
err_dbmap:
if (context)
mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 6ceccc2f2d64..c86da501ca7d 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -762,6 +762,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
},
+ {
+ /* Schenker XMG C504 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ },
+ },
{ }
};

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 486982f8449f..43a84e32c893 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1541,6 +1541,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
kfree(dom->aperture[i]);
}

+ if (dom->domain.id)
+ domain_id_free(dom->domain.id);
+
kfree(dom);
}

diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index cf3c1d483cc9..3ec81a10840c 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -526,9 +526,18 @@ gigaset_tty_open(struct tty_struct *tty)
cs->hw.ser->tty = tty;
atomic_set(&cs->hw.ser->refcnt, 1);
init_completion(&cs->hw.ser->dead_cmp);
-
tty->disc_data = cs;

+ /* Set the amount of data we're willing to receive per call
+ * from the hardware driver to half of the input buffer size
+ * to leave some reserve.
+ * Note: We don't do flow control towards the hardware driver.
+ * If more data is received than will fit into the input buffer,
+ * it will be dropped and an error will be logged. This should
+ * never happen as the device is slow and the buffer size ample.
+ */
+ tty->receive_room = RBUFSIZE/2;
+
/* OK.. Initialization of the datastructures and the HW is done.. Now
* startup system and notify the LL that we are ready to run
*/
@@ -779,8 +788,10 @@ static int __init ser_gigaset_init(void)
driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
GIGASET_MODULENAME, GIGASET_DEVNAME,
&ops, THIS_MODULE);
- if (!driver)
+ if (!driver) {
+ rc = -ENOMEM;
goto error;
+ }

rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
if (rc != 0) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 1e379312fa66..0d0bc044a69c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -795,37 +795,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,

tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
- type);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}

if (dm_target_needs_singleton(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: target type %s must appear alone in table",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "singleton target type must appear alone in table";
+ goto bad;
}
t->singleton = 1;
}

if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
- DMERR("%s: target type %s may not be included in read-only tables",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "target type may not be included in a read-only table";
+ goto bad;
}

if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), t->immutable_target_type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), tgt->type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
t->immutable_target_type = tgt->type;
}
@@ -840,7 +835,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
- r = -EINVAL;
goto bad;
}

diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 156cbfc9c79d..c4ff052336f3 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -673,7 +673,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
{
struct dvb_usb_device *d = purb->context;
struct dib0700_rc_response *poll_reply;
- u32 uninitialized_var(keycode);
+ u32 keycode;
u8 toggle;

deb_info("%s()\n", __func__);
@@ -713,7 +713,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
if ((poll_reply->system == 0x00) && (poll_reply->data == 0x00)
&& (poll_reply->not_data == 0xff)) {
poll_reply->data_state = 2;
- break;
+ rc_repeat(d->rc_dev);
+ goto resubmit;
}

if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index 0f867a5055fb..9deee56e4689 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -477,7 +477,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status)
if (val >= 7)
*status |= FE_HAS_SYNC;

- if (val >= 8) /* Maybe 9? */
+ /*
+ * Actually, on state S8, it starts receiving TS, but the TS
+ * output is only on normal state after the transition to S9.
+ */
+ if (val >= 9)
*status |= FE_HAS_LOCK;

dprintk("val = %d, status = 0x%02x\n", val, *status);
@@ -562,6 +566,11 @@ static void mb86a20s_release(struct dvb_frontend *fe)
kfree(state);
}

+static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
static struct dvb_frontend_ops mb86a20s_ops;

struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
@@ -632,6 +641,7 @@ static struct dvb_frontend_ops mb86a20s_ops = {
.read_status = mb86a20s_read_status,
.read_signal_strength = mb86a20s_read_signal_strength,
.tune = mb86a20s_tune,
+ .get_frontend_algo = mb86a20s_get_frontend_algo,
};

MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 298703fca3b5..dfc338968458 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -142,6 +142,8 @@ static long media_device_enum_links(struct media_device *mdev,

for (p = 0; p < entity->num_pads; p++) {
struct media_pad_desc pad;
+
+ memset(&pad, 0, sizeof(pad));
media_device_kpad_to_upad(&entity->pads[p], &pad);
if (copy_to_user(&links.pads[p], &pad, sizeof(pad)))
return -EFAULT;
@@ -159,6 +161,7 @@ static long media_device_enum_links(struct media_device *mdev,
if (entity->links[l].source->entity != entity)
continue;

+ memset(&link, 0, sizeof(link));
media_device_kpad_to_upad(entity->links[l].source,
&link.source);
media_device_kpad_to_upad(entity->links[l].sink,
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 53ff26e7abf7..065d81448b46 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -1264,7 +1264,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
dev->board.agc_analog_digital_select_gpio,
analog_or_digital);

- return status;
+ if (status < 0)
+ return status;
+
+ return 0;
}

int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 53dae2a8272d..77d106493d8e 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -448,7 +448,7 @@ struct cx231xx_board cx231xx_boards[] = {
.output_mode = OUT_MODE_VIP11,
.demod_xfer_mode = 0,
.ctl_pin_status_mask = 0xFFFFFFC4,
- .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */
+ .agc_analog_digital_select_gpio = 0x1c,
.tuner_sif_gpio = -1,
.tuner_scl_gpio = -1,
.tuner_sda_gpio = -1,
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index d4457f9488ee..7cc1ce557aa2 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -720,6 +720,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
break;
case CX231XX_BOARD_CNXT_RDE_253S:
case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
break;
case CX231XX_BOARD_HAUPPAUGE_EXETER:
@@ -744,7 +745,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
- errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
break;
default:
break;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 6dad2ef9b620..1ce9602b685d 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -241,6 +241,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
clones[i]);
}

+ put_device(dev);
+
return 0;
}
EXPORT_SYMBOL(mfd_clone_cell);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f53d5c806a9e..1a3d6229fac3 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1493,7 +1493,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
set_capacity(md->disk, size);

if (mmc_host_cmd23(card->host)) {
- if (mmc_card_mmc(card) ||
+ if ((mmc_card_mmc(card) &&
+ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
(mmc_card_sd(card) &&
card->scr.cmds & SD_SCR_CMD23_SUPPORT))
md->flags |= MMC_BLK_CMD23;
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index f201bed74442..0c781acf3df4 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -752,12 +752,12 @@ static int mxs_mmc_probe(struct platform_device *pdev)

platform_set_drvdata(pdev, mmc);

+ spin_lock_init(&host->lock);
+
ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
if (ret)
goto out_free_dma;

- spin_lock_init(&host->lock);
-
ret = mmc_add_host(mmc);
if (ret)
goto out_free_irq;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 4317af8d2f0a..6c9bd559a7ef 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -469,6 +469,9 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
struct device *dev = ep->dev->dev.parent;
int i;

+ if (!ep->descs)
+ return;
+
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
dma_addr_t d;

@@ -493,6 +496,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)

dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
ep->descs_dma_addr);
+ ep->descs = NULL;
}

static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78f5a1a0b8c8..37f4995db07a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -313,12 +313,18 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
{
+ int ret;
+
+ down_read(&mlx4_priv(dev)->cmd.switch_sem);
if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ ret = mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
else
- return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ ret = mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
+
+ up_read(&mlx4_priv(dev)->cmd.switch_sem);
+ return ret;
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);

@@ -326,6 +332,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);

+ init_rwsem(&priv->cmd.switch_sem);
mutex_init(&priv->cmd.hcr_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
@@ -372,6 +379,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context)
return -ENOMEM;

+ down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
@@ -390,6 +398,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
--priv->cmd.token_mask;

priv->cmd.use_events = 1;
+ up_write(&priv->cmd.switch_sem);

down(&priv->cmd.poll_sem);

@@ -404,6 +413,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;

+ down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;

for (i = 0; i < priv->cmd.max_cmds; ++i)
@@ -412,6 +422,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
kfree(priv->cmd.context);

up(&priv->cmd.poll_sem);
+ up_write(&priv->cmd.switch_sem);
}

struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 308349a43d60..3926b766148e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -684,6 +684,13 @@ int mlx4_en_start_port(struct net_device *dev)
queue_work(mdev->workqueue, &priv->mcast_task);

priv->port_up = true;
+
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+ for (i = 0; i < priv->rx_ring_num; i++)
+ napi_schedule(&priv->rx_cq[i].napi);
+
netif_tx_start_all_queues(dev);
return 0;

diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 169853d938f7..1bf26bafcaea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -42,6 +42,7 @@
#include <linux/timer.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
+#include <linux/rwsem.h>

#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -190,6 +191,7 @@ struct mlx4_cmd {
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct rw_semaphore switch_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 45ac407deda9..4d7e334603c3 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -944,8 +944,9 @@ done:
}
is_scanning_required = 1;
} else {
- dev_dbg(priv->adapter->dev, "info: trying to associate to %s and bssid %pM\n",
- (char *) req_ssid.ssid, bss->bssid);
+ dev_dbg(priv->adapter->dev, "info: trying to associate to '%.*s' bssid %pM\n",
+ req_ssid.ssid_len, (char *)req_ssid.ssid,
+ bss->bssid);
memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
break;
}
@@ -989,8 +990,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,

priv->assoc_request = -EINPROGRESS;

- wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
- (char *) sme->ssid, sme->bssid);
+ wiphy_dbg(wiphy, "info: Trying to associate to %.*s and bssid %pM\n",
+ (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);

ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
@@ -1026,8 +1027,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,

priv->ibss_join_request = -EINPROGRESS;

- wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
- (char *) params->ssid, params->bssid);
+ wiphy_dbg(wiphy, "info: trying to join to %.*s and bssid %pM\n",
+ params->ssid_len, (char *)params->ssid, params->bssid);

ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index 9fedb1f70919..1ed5ae7d03b8 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -44,6 +44,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
{COUNTRY_CODE_WORLD_WIDE_13, "EC"},
{COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+ {COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"},
};

/*
@@ -135,6 +136,17 @@ static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
}
};

+static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = {
+ .n_reg_rules = 4,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_5GHZ_5150_5350,
+ RTL819x_5GHZ_5470_5850,
+ }
+};
+
static const struct ieee80211_regdomain rtl_regdom_14 = {
.n_reg_rules = 3,
.alpha2 = "99",
@@ -331,9 +343,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_no_midband;
case COUNTRY_CODE_IC:
return &rtl_regdom_11;
- case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_TELEC_NETGEAR:
return &rtl_regdom_60_64;
+ case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL:
@@ -346,6 +358,8 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_14_60_64;
case COUNTRY_CODE_GLOBAL_DOMAIN:
return &rtl_regdom_14;
+ case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
+ return &rtl_regdom_12_13_5g_all;
default:
return &rtl_regdom_no_midband;
}
@@ -383,6 +397,27 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
return NULL;
}

+static u8 channel_plan_to_country_code(u8 channelplan)
+{
+ switch (channelplan) {
+ case 0x20:
+ case 0x21:
+ return COUNTRY_CODE_WORLD_WIDE_13;
+ case 0x22:
+ return COUNTRY_CODE_IC;
+ case 0x25:
+ return COUNTRY_CODE_ETSI;
+ case 0x32:
+ return COUNTRY_CODE_TELEC_NETGEAR;
+ case 0x41:
+ return COUNTRY_CODE_GLOBAL_DOMAIN;
+ case 0x7f:
+ return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL;
+ default:
+ return COUNTRY_CODE_MAX; /*Error*/
+ }
+}
+
int rtl_regd_init(struct ieee80211_hw *hw,
int (*reg_notifier) (struct wiphy *wiphy,
struct regulatory_request *request))
@@ -395,11 +430,12 @@ int rtl_regd_init(struct ieee80211_hw *hw,
return -EINVAL;

/* init country_code from efuse channel plan */
- rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+ rtlpriv->regd.country_code =
+ channel_plan_to_country_code(rtlpriv->efuse.channel_plan);

- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
- rtlpriv->regd.country_code));
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
+ (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n",
+ rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code));

if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
index d23118938fac..883cebb09755 100644
--- a/drivers/net/wireless/rtlwifi/regd.h
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -49,6 +49,7 @@ enum country_code_type_t {
COUNTRY_CODE_GLOBAL_DOMAIN = 10,
COUNTRY_CODE_WORLD_WIDE_13 = 11,
COUNTRY_CODE_TELEC_NETGEAR = 12,
+ COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,

/*add new channel plan above this line */
COUNTRY_CODE_MAX
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index b552aae55b41..c79fb30fd536 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -904,6 +904,12 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
info = tps65910_regs;
+ /* Work around silicon erratum SWCZ010: output programmed
+ * voltage level can go higher than expected or crash
+ * Workaround: use no synchronization of DCDC clocks
+ */
+ tps65910_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
+ DCDCCTRL_DCDCCKSYNC_MASK);
break;
case TPS65911:
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index bb07577e8fd4..d9c6ae973c87 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -121,7 +121,12 @@ con3270_create_status(struct con3270 *cp)
static void
con3270_update_string(struct con3270 *cp, struct string *s, int nr)
{
- if (s->len >= cp->view.cols - 5)
+ if (s->len < 4) {
+ /* This indicates a bug, but printing a warning would
+ * cause a deadlock. */
+ return;
+ }
+ if (s->string[s->len - 4] != TO_RA)
return;
raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
cp->view.cols * (nr + 1));
@@ -452,11 +457,11 @@ con3270_cline_end(struct con3270 *cp)
cp->cline->len + 4 : cp->view.cols;
s = con3270_alloc_string(cp, size);
memcpy(s->string, cp->cline->string, cp->cline->len);
- if (s->len < cp->view.cols - 5) {
+ if (cp->cline->len < cp->view.cols - 5) {
s->string[s->len - 4] = TO_RA;
s->string[s->len - 1] = 0;
} else {
- while (--size > cp->cline->len)
+ while (--size >= cp->cline->len)
s->string[size] = cp->view.ascebc[' '];
}
/* Replace cline with allocated line s and reset cline. */
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 79b98482547b..c846a63ea672 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2016
*/

#define KMSG_COMPONENT "zfcp"
@@ -58,7 +58,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request for which a response was received
*/
-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
@@ -78,6 +78,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
rec->u.res.req_issued = req->issued;
rec->u.res.prot_status = q_pref->prot_status;
rec->u.res.fsf_status = q_head->fsf_status;
+ rec->u.res.port_handle = q_head->port_handle;
+ rec->u.res.lun_handle = q_head->lun_handle;

memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
FSF_PROT_STATUS_QUAL_SIZE);
@@ -90,7 +92,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
rec->pl_len, "fsf_res", req->req_id);
}

- debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}

@@ -234,7 +236,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
if (sdev) {
rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
rec->lun = zfcp_scsi_dev_lun(sdev);
- }
+ } else
+ rec->lun = ZFCP_DBF_INVALID_LUN;
}

/**
@@ -313,13 +316,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}

+/**
+ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
+ * @tag: identifier for event
+ * @wka_port: well known address port
+ * @req_id: request ID to correlate with potential HBA trace record
+ */
+void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
+ u64 req_id)
+{
+ struct zfcp_dbf *dbf = wka_port->adapter->dbf;
+ struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dbf->rec_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ rec->id = ZFCP_DBF_REC_RUN;
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->port_status = wka_port->status;
+ rec->d_id = wka_port->d_id;
+ rec->lun = ZFCP_DBF_INVALID_LUN;
+
+ rec->u.run.fsf_req_id = req_id;
+ rec->u.run.rec_status = ~0;
+ rec->u.run.rec_step = ~0;
+ rec->u.run.rec_action = ~0;
+ rec->u.run.rec_count = ~0;
+
+ debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
static inline
-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
- u64 req_id, u32 d_id)
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
+ char *paytag, struct scatterlist *sg, u8 id, u16 len,
+ u64 req_id, u32 d_id, u16 cap_len)
{
struct zfcp_dbf_san *rec = &dbf->san_buf;
u16 rec_len;
unsigned long flags;
+ struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+ u16 pay_sum = 0;

spin_lock_irqsave(&dbf->san_lock, flags);
memset(rec, 0, sizeof(*rec));
@@ -327,10 +365,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
rec->id = id;
rec->fsf_req_id = req_id;
rec->d_id = d_id;
- rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
- memcpy(rec->payload, data, rec_len);
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->pl_len = len; /* full length even if we cap pay below */
+ if (!sg)
+ goto out;
+ rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
+ memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
+ if (len <= rec_len)
+ goto out; /* skip pay record if full content in rec->payload */
+
+ /* if (len > rec_len):
+ * dump data up to cap_len ignoring small duplicate in rec->payload
+ */
+ spin_lock(&dbf->pay_lock);
+ memset(payload, 0, sizeof(*payload));
+ memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
+ payload->fsf_req_id = req_id;
+ payload->counter = 0;
+ for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
+ u16 pay_len, offset = 0;
+
+ while (offset < sg->length && pay_sum < cap_len) {
+ pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
+ (u16)(sg->length - offset));
+ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
+ memcpy(payload->data, sg_virt(sg) + offset, pay_len);
+ debug_event(dbf->pay, 1, payload,
+ zfcp_dbf_plen(pay_len));
+ payload->counter++;
+ offset += pay_len;
+ pay_sum += pay_len;
+ }
+ }
+ spin_unlock(&dbf->pay_lock);

+out:
debug_event(dbf->san, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
@@ -347,9 +416,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;

- length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
- fsf->req_id, d_id);
+ length = (u16)zfcp_qdio_real_bytes(ct_els->req);
+ zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
+ length, fsf->req_id, d_id, length);
+}
+
+static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+ struct zfcp_fsf_req *fsf,
+ u16 len)
+{
+ struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+ struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_gpn_ft_resp *acc;
+ int max_entries, x, last = 0;
+
+ if (!(memcmp(tag, "fsscth2", 7) == 0
+ && ct_els->d_id == FC_FID_DIR_SERV
+ && reqh->ct_rev == FC_CT_REV
+ && reqh->ct_in_id[0] == 0
+ && reqh->ct_in_id[1] == 0
+ && reqh->ct_in_id[2] == 0
+ && reqh->ct_fs_type == FC_FST_DIR
+ && reqh->ct_fs_subtype == FC_NS_SUBTYPE
+ && reqh->ct_options == 0
+ && reqh->_ct_resvd1 == 0
+ && reqh->ct_cmd == FC_NS_GPN_FT
+ /* reqh->ct_mr_size can vary so do not match but read below */
+ && reqh->_ct_resvd2 == 0
+ && reqh->ct_reason == 0
+ && reqh->ct_explan == 0
+ && reqh->ct_vendor == 0
+ && reqn->fn_resvd == 0
+ && reqn->fn_domain_id_scope == 0
+ && reqn->fn_area_id_scope == 0
+ && reqn->fn_fc4_type == FC_TYPE_FCP))
+ return len; /* not GPN_FT response so do not cap */
+
+ acc = sg_virt(resp_entry);
+ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+ * to account for header as 1st pseudo "entry" */;
+
+ /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
+ * response, allowing us to skip special handling for it - just skip it
+ */
+ for (x = 1; x < max_entries && !last; x++) {
+ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+ acc++;
+ else
+ acc = sg_virt(++resp_entry);
+
+ last = acc->fp_flags & FC_NS_FID_LAST;
+ }
+ len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
+ return len; /* cap after last entry */
}

/**
@@ -363,9 +485,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;

- length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
- fsf->req_id, 0);
+ length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
+ zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
+ length, fsf->req_id, ct_els->d_id,
+ zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
}

/**
@@ -379,11 +502,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
struct fsf_status_read_buffer *srb =
(struct fsf_status_read_buffer *) fsf->data;
u16 length;
+ struct scatterlist sg;

length = (u16)(srb->length -
offsetof(struct fsf_status_read_buffer, payload));
- zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
- fsf->req_id, ntoh24(srb->d_id));
+ sg_init_one(&sg, srb->payload.data, length);
+ zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
+ fsf->req_id, ntoh24(srb->d_id), length);
}

/**
@@ -392,7 +517,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
* @sc: pointer to struct scsi_cmnd
* @fsf: pointer to struct zfcp_fsf_req
*/
-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+ struct zfcp_fsf_req *fsf)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sc->device->host->hostdata[0];
@@ -434,7 +560,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
}
}

- debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+ debug_event(dbf->scsi, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}

diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 3ac7a4b30dd9..440aa619da1d 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2015
*/

#ifndef ZFCP_DBF_H
@@ -17,6 +17,11 @@

#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull

+enum zfcp_dbf_pseudo_erp_act_type {
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
+};
+
/**
* struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
* @ready: number of ready recovery actions
@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
u32 d_id;
#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+ u16 pl_len;
} __packed;

/**
@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
u32 fsf_status;
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+ u32 port_handle;
+ u32 lun_handle;
} __packed;

/**
@@ -279,7 +287,7 @@ static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
if (level <= req->adapter->dbf->hba->level)
- zfcp_dbf_hba_fsf_res(tag, req);
+ zfcp_dbf_hba_fsf_res(tag, level, req);
}

/**
@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
scmd->device->host->hostdata[0];

if (level <= adapter->dbf->scsi->level)
- zfcp_dbf_scsi(tag, scmd, req);
+ zfcp_dbf_scsi(tag, level, scmd, req);
}

/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 5c872708a574..723cbe3985f0 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/

#define KMSG_COMPONENT "zfcp"
@@ -1225,8 +1225,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
break;

case ZFCP_ERP_ACTION_REOPEN_PORT:
- if (result == ZFCP_ERP_SUCCEEDED)
- zfcp_scsi_schedule_rport_register(port);
+ /* This switch case might also happen after a forced reopen
+ * was successfully done and thus overwritten with a new
+ * non-forced reopen at `ersfs_2'. In this case, we must not
+ * do the clean-up of the non-forced version.
+ */
+ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
+ if (result == ZFCP_ERP_SUCCEEDED)
+ zfcp_scsi_schedule_rport_register(port);
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index ef9e50225ee7..b51500accce2 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/

#ifndef ZFCP_EXT_H
@@ -49,8 +49,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
@@ -58,7 +59,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
+ struct zfcp_fsf_req *);

/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 8bfd579e2543..0cbaebb52402 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
*/

#define KMSG_COMPONENT "zfcp"
@@ -484,7 +484,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case FSF_TOPO_FABRIC:
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
@@ -589,7 +592,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)

if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
fc_host_permanent_port_name(shost) = bottom->wwpn;
- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
} else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
@@ -958,8 +960,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
if (zfcp_adapter_multi_buffer_active(adapter)) {
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
return -EIO;
+ qtcb->bottom.support.req_buf_length =
+ zfcp_qdio_real_bytes(sg_req);
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
return -EIO;
+ qtcb->bottom.support.resp_buf_length =
+ zfcp_qdio_real_bytes(sg_resp);

zfcp_qdio_set_data_div(qdio, &req->qdio_req,
zfcp_qdio_sbale_count(sg_req));
@@ -1049,6 +1055,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,

req->handler = zfcp_fsf_send_ct_handler;
req->qtcb->header.port_handle = wka_port->handle;
+ ct->d_id = wka_port->d_id;
req->data = ct;

zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
@@ -1152,6 +1159,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,

hton24(req->qtcb->bottom.support.d_id, d_id);
req->handler = zfcp_fsf_send_els_handler;
+ els->d_id = d_id;
req->data = els;

zfcp_dbf_san_req("fssels1", req, d_id);
@@ -1569,7 +1577,7 @@ out:
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req;
+ struct zfcp_fsf_req *req = NULL;
int retval = -EIO;

spin_lock_irq(&qdio->req_q_lock);
@@ -1598,6 +1606,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
+ if (req && !IS_ERR(req))
+ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
return retval;
}

@@ -1622,7 +1632,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req;
+ struct zfcp_fsf_req *req = NULL;
int retval = -EIO;

spin_lock_irq(&qdio->req_q_lock);
@@ -1651,6 +1661,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
+ if (req && !IS_ERR(req))
+ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
return retval;
}

diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index db8c85382dca..8cad41ffb6b8 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/

#ifndef FSF_H
@@ -462,6 +462,7 @@ struct zfcp_blk_drv_data {
* @handler_data: data passed to handler function
* @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
* @status: used to pass error status to calling function
+ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
*/
struct zfcp_fsf_ct_els {
struct scatterlist *req;
@@ -470,6 +471,7 @@ struct zfcp_fsf_ct_els {
void *handler_data;
struct zfcp_port *port;
int status;
+ u32 d_id;
};

#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7b353647cb90..38ee0df633a3 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
*/

#define KMSG_COMPONENT "zfcp"
@@ -577,6 +577,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;

+ zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
@@ -598,6 +601,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
struct fc_rport *rport = port->rport;

if (rport) {
+ zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 2dcbb970deba..49ff38037df6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2069,18 +2069,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
struct CommandControlBlock *ccb;
int target = cmd->device->id;
- int lun = cmd->device->lun;
- uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
- cmd->result = (DID_NO_CONNECT << 16);
- }
- cmd->scsi_done(cmd);
- return 0;
- }
if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index bdfa223a7dbb..df021b40813e 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -717,7 +717,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
spin_lock_irqsave(vhost->host->host_lock, flags);
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);

/* Clean out the queue */
memset(crq->msgs, 0, PAGE_SIZE);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 458fb34fa91b..bc72bf0c47d9 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1402,7 +1402,7 @@ struct megasas_instance_template {
};

#define MEGASAS_IS_LOGICAL(scp) \
- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)

#define MEGASAS_DEV_INDEX(inst, scp) \
((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 10550f3c7b45..6e1ae5951dea 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1486,16 +1486,13 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
goto out_done;
}

- switch (scmd->cmnd[0]) {
- case SYNCHRONIZE_CACHE:
- /*
- * FW takes care of flush cache on its own
- * No need to send it down
- */
+ /*
+ * FW takes care of flush cache on its own for Virtual Disk.
+ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
+ */
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
scmd->result = DID_OK << 16;
goto out_done;
- default:
- break;
}

if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b3a729c90aa2..a9d1fea2ea0c 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3474,6 +3474,7 @@ static void __exit scsi_debug_exit(void)
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);

+ vfree(map_storep);
if (dif_storep)
vfree(dif_storep);

diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 29f57512b856..5d207f1aca7a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1522,12 +1522,12 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
out_err:
kfree(lun_data);
out:
- scsi_device_put(sdev);
if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
__scsi_remove_device(sdev);
+ scsi_device_put(sdev);
return ret;
}

diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index fddce4eefcd2..fb59a49dd01d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -544,6 +544,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
sg_io_hdr_t *hp;
unsigned char cmnd[MAX_COMMAND_SIZE];

+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
@@ -742,8 +745,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
return k; /* probably out of space --> ENOMEM */
}
if (sdp->detached) {
- if (srp->bio)
+ if (srp->bio) {
blk_end_request_all(srp->rq, -EIO);
+ srp->rq = NULL;
+ }
+
sg_finish_rem_req(srp);
return -ENODEV;
}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index b8819de1cd8a..1ce569a405b2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -626,6 +626,7 @@ static void ad5933_work(struct work_struct *work)
struct iio_buffer *ring = indio_dev->buffer;
signed short buf[2];
unsigned char status;
+ int ret;

mutex_lock(&indio_dev->mlock);
if (st->state == AD5933_CTRL_INIT_START_FREQ) {
@@ -633,17 +634,20 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
st->state = AD5933_CTRL_START_SWEEP;
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}

- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ if (ret)
+ goto out;

if (status & AD5933_STAT_DATA_VALID) {
- ad5933_i2c_read(st->client,
+ ret = ad5933_i2c_read(st->client,
test_bit(1, ring->scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
ring->scan_count * 2, (u8 *)buf);
+ if (ret)
+ goto out;

if (ring->scan_count == 2) {
buf[0] = be16_to_cpu(buf[0]);
@@ -656,8 +660,7 @@ static void ad5933_work(struct work_struct *work)
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}

if (status & AD5933_STAT_SWEEP_DONE) {
@@ -669,7 +672,7 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
schedule_delayed_work(&st->work, st->poll_time_jiffies);
}
-
+out:
mutex_unlock(&indio_dev->mlock);
}

diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 8e0924f55446..82358c2c9fb0 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -424,6 +424,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
* they are not on hot paths so a little discipline won't do
* any harm.
*
+ * The line discipline-related tty_struct fields are reset to
+ * prevent the ldisc driver from re-using stale information for
+ * the new ldisc instance.
+ *
* Locking: takes termios_mutex
*/

@@ -432,6 +436,9 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
mutex_lock(&tty->termios_mutex);
tty->termios->c_line = num;
mutex_unlock(&tty->termios_mutex);
+
+ tty->disc_data = NULL;
+ tty->receive_room = 0;
}

/**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 4df5cff7f298..1c469ad1e0f6 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -882,10 +882,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;

+ if (new_screen_size > (4 << 20))
+ return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;

+ if (vc == sel_cons)
+ clear_selection();
+
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;

@@ -1197,7 +1202,7 @@ static void csi_J(struct vc_data *vc, int vpar)
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
- vc->vc_screenbuf_size >> 1);
+ vc->vc_screenbuf_size);
set_origin(vc);
if (CON_IS_VISIBLE(vc))
update_screen(vc);
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 9a2a1ae9b667..3749269d7954 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -590,13 +590,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,

req->length = length;

- /* throttle high/super speed IRQ rate back slightly */
- if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)
- ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
- : 0;
-
retval = usb_ep_queue(in, req, GFP_ATOMIC);
switch (retval) {
default:
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cc6aa66f4278..fce63553f397 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -36,6 +36,7 @@

#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
@@ -127,7 +128,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index a989356f693e..1d6521105bdd 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -951,24 +951,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;

- /* we can register the device now, as it is ready */
- usb_set_intfdata (interface, dev);
-
- retval = usb_register_dev (interface, &tower_class);
-
- if (retval) {
- /* something prevented us from registering this driver */
- err ("Not able to get a minor for this device.");
- usb_set_intfdata (interface, NULL);
- goto error;
- }
- dev->minor = interface->minor;
-
- /* let the user know what node this device is now attached to */
- dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
- "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
- USB_MAJOR, dev->minor);
-
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
@@ -989,6 +971,23 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));

+ /* we can register the device now, as it is ready */
+ usb_set_intfdata (interface, dev);
+
+ retval = usb_register_dev (interface, &tower_class);
+
+ if (retval) {
+ /* something prevented us from registering this driver */
+ err ("Not able to get a minor for this device.");
+ usb_set_intfdata (interface, NULL);
+ goto error;
+ }
+ dev->minor = interface->minor;
+
+ /* let the user know what node this device is now attached to */
+ dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
+ "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
+ USB_MAJOR, dev->minor);

exit:
dbg(2, "%s: leave, return value 0x%.8lx (dev)", __func__, (long) dev);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index e941e2d33265..4238c275edfe 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -123,6 +123,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
@@ -135,6 +136,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 93fe0077f108..dbc437c61fb5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1033,6 +1033,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
+ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 559669786a20..6181b79c2ed0 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -596,6 +596,12 @@
#define STK541_PID 0x2109 /* Zigbee Controller */

/*
+ * Texas Instruments
+ */
+#define TI_VID 0x0451
+#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
+
+/*
* Blackfin gnICE JTAG
* http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
*/
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 366395c5fa92..197fcfa0659a 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -922,10 +922,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)

/* COMMAND STAGE */
/* let's send the command via the control pipe */
+ /*
+ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
+ * Stack may be vmallocated. So no DMA for us. Make a copy.
+ */
+ memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
US_CBI_ADSC,
USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
- us->ifnum, srb->cmnd, srb->cmd_len);
+ us->ifnum, us->iobuf, srb->cmd_len);

/* check the return code for the command */
US_DEBUGP("Call to usb_stor_ctrl_transfer() returned %d\n", result);
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
index 4d688c750801..cf96b93bad14 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/uwb/lc-rc.c
@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
struct uwb_rc *rc = NULL;

dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}

@@ -368,7 +371,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
+ put_device(dev);
}
+
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
@@ -421,8 +426,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)

dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev,
find_rc_grandpa);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
@@ -455,8 +463,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)

dev = class_find_device(&uwb_rc_class, NULL, (void *)addr,
find_rc_dev);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }

return rc;
}
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 784139aed079..1bb5918cfa3b 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -268,9 +268,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;

if (regno < 16) {
- red >>= 8;
- green >>= 8;
- blue >>= 8;
+ red >>= 16 - info->var.red.length;
+ green >>= 16 - info->var.green.length;
+ blue >>= 16 - info->var.blue.length;
((u32 *)(info->pseudo_palette))[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
index 5c3960da755a..71666c02dea8 100644
--- a/drivers/video/fbcmap.c
+++ b/drivers/video/fbcmap.c
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)

int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;

if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);

@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)

int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;

if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);

diff --git a/fs/exec.c b/fs/exec.c
index a0006d85785c..3f8d8f331a98 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/freezer.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
@@ -1974,8 +1975,11 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
complete(vfork_done);
}

- if (core_waiters)
+ if (core_waiters > 0) {
+ freezer_do_not_count();
wait_for_completion(&core_state->startup);
+ freezer_count();
+ }
fail:
return core_waiters;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8c8115ce232f..eef0d1ab0cc7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -244,6 +244,7 @@ struct ext4_io_submit {
#define EXT4_MAX_BLOCK_SIZE 65536
#define EXT4_MIN_BLOCK_LOG_SIZE 10
#define EXT4_MAX_BLOCK_LOG_SIZE 16
+#define EXT4_MAX_CLUSTER_LOG_SIZE 30
#ifdef __KERNEL__
# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
#else
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ff2e369de040..093a741d965c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4093,14 +4093,14 @@ static int ext4_do_update_inode(handle_t *handle,
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
- if (!ei->i_dtime) {
+ if (ei->i_dtime && list_empty(&ei->i_orphan)) {
+ raw_inode->i_uid_high = 0;
+ raw_inode->i_gid_high = 0;
+ } else {
raw_inode->i_uid_high =
cpu_to_le16(high_16_bits(inode->i_uid));
raw_inode->i_gid_high =
cpu_to_le16(high_16_bits(inode->i_gid));
- } else {
- raw_inode->i_uid_high = 0;
- raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low =
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index cd1dd498297e..ede3a33a2f1a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1413,32 +1413,30 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
frame->entries = entries;
frame->at = entries;
frame->bh = bh;
- bh = bh2;

retval = ext4_handle_dirty_metadata(handle, dir, frame->bh);
if (retval)
goto out_frames;
- retval = ext4_handle_dirty_metadata(handle, dir, bh);
+ retval = ext4_handle_dirty_metadata(handle, dir, bh2);
if (retval)
goto out_frames;

- de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
+ de = do_split(handle,dir, &bh2, frame, &hinfo, &retval);
if (!de) {
goto out_frames;
}
- dx_release(frames);

- retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
- brelse(bh);
- return retval;
+ retval = add_dirent_to_buf(handle, dentry, inode, de, bh2);
out_frames:
/*
* Even if the block split failed, we have to properly write
* out all the changes we did so far. Otherwise we can end up
* with corrupted filesystem.
*/
- ext4_mark_inode_dirty(handle, dir);
+ if (retval)
+ ext4_mark_inode_dirty(handle, dir);
dx_release(frames);
+ brelse(bh2);
return retval;
}

diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7266a2e5befc..e3ccddb6b425 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3451,7 +3451,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
- "Unsupported filesystem blocksize %d", blocksize);
+ "Unsupported filesystem blocksize %d (%d log_block_size)",
+ blocksize, le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
+ if (le32_to_cpu(es->s_log_block_size) >
+ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log block size: %u",
+ le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}

@@ -3568,6 +3576,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"block size (%d)", clustersize, blocksize);
goto failed_mount;
}
+ if (le32_to_cpu(es->s_log_cluster_size) >
+ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log cluster size: %u",
+ le32_to_cpu(es->s_log_cluster_size));
+ goto failed_mount;
+ }
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 15c1d2948f4a..d0f57b18c01c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1393,10 +1393,42 @@ error:

static int fuse_setattr(struct dentry *entry, struct iattr *attr)
{
- if (attr->ia_valid & ATTR_FILE)
- return fuse_do_setattr(entry, attr, attr->ia_file);
- else
- return fuse_do_setattr(entry, attr, NULL);
+ struct inode *inode = entry->d_inode;
+ struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
+ int ret;
+
+ if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
+ attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
+ ATTR_MODE);
+ /*
+ * ia_mode calculation may have used stale i_mode. Refresh and
+ * recalculate.
+ */
+ ret = fuse_do_getattr(inode, NULL, file);
+ if (ret)
+ return ret;
+
+ attr->ia_mode = inode->i_mode;
+ if (inode->i_mode & S_ISUID) {
+ attr->ia_valid |= ATTR_MODE;
+ attr->ia_mode &= ~S_ISUID;
+ }
+ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+ attr->ia_valid |= ATTR_MODE;
+ attr->ia_mode &= ~S_ISGID;
+ }
+ }
+ if (!attr->ia_valid)
+ return 0;
+
+ ret = fuse_do_setattr(entry, attr, file);
+ if (!ret) {
+ /* Directory mode changed, may need to revalidate access */
+ if (S_ISDIR(entry->d_inode->i_mode) &&
+ (attr->ia_valid & ATTR_MODE))
+ fuse_invalidate_entry_cache(entry);
+ }
+ return ret;
}

static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
@@ -1501,6 +1533,23 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
return ret;
}

+static int fuse_verify_xattr_list(char *list, size_t size)
+{
+ size_t origsize = size;
+
+ while (size) {
+ size_t thislen = strnlen(list, size);
+
+ if (!thislen || thislen == size)
+ return -EIO;
+
+ size -= thislen + 1;
+ list += thislen + 1;
+ }
+
+ return origsize;
+}
+
static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
{
struct inode *inode = entry->d_inode;
@@ -1539,9 +1588,11 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
}
fuse_request_send(fc, req);
ret = req->out.h.error;
- if (!ret)
+ if (!ret) {
ret = size ? req->out.args[0].size : outarg.size;
- else {
+ if (ret > 0 && size)
+ ret = fuse_verify_xattr_list(list, ret);
+ } else {
if (ret == -ENOSYS) {
fc->no_listxattr = 1;
ret = -EOPNOTSUPP;
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 5b55511945ef..e1ba1e607689 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -161,8 +161,10 @@ static int get_task_ioprio(struct task_struct *p)
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+ task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
+ task_unlock(p);
out:
return ret;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 2f9197f5ba02..e5657200eadd 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -717,6 +717,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
pri_bh = NULL;

root_found:
+ /* We don't support read-write mounts */
+ if (!(s->s_flags & MS_RDONLY)) {
+ error = -EACCES;
+ goto out_freebh;
+ }

if (joliet_level && (pri == NULL || !opt.rock)) {
/* This is the case of Joliet with the norock mount flag.
@@ -1527,9 +1532,6 @@ struct inode *__isofs_iget(struct super_block *sb,
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- /* We don't support read-write mounts */
- if (!(flags & MS_RDONLY))
- return ERR_PTR(-EACCES);
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}

diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index c4c82439d60e..c8b51f404eda 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1203,6 +1203,9 @@ restart:
__func__, status);
case -ENOENT:
case -ENOMEM:
+ case -EACCES:
+ case -EROFS:
+ case -EIO:
case -ESTALE:
/*
* Open state on this file cannot be recovered
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 569498ae164e..08b50ee8fb4a 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -140,7 +140,15 @@ static int remove_save_link_only(struct super_block *s,
static int reiserfs_quota_on_mount(struct super_block *, int);
#endif

-/* look for uncompleted unlinks and truncates and complete them */
+/*
+ * Look for uncompleted unlinks and truncates and complete them
+ *
+ * Called with superblock write locked. If quotas are enabled, we have to
+ * release/retake lest we call dquot_quota_on_mount(), proceed to
+ * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per
+ * cpu worklets to complete flush_async_commits() that in turn wait for the
+ * superblock write lock.
+ */
static int finish_unfinished(struct super_block *s)
{
INITIALIZE_PATH(path);
@@ -187,7 +195,9 @@ static int finish_unfinished(struct super_block *s)
quota_enabled[i] = 0;
continue;
}
+ reiserfs_write_unlock(s);
ret = reiserfs_quota_on_mount(s, i);
+ reiserfs_write_lock(s);
if (ret < 0)
reiserfs_warning(s, "reiserfs-2500",
"cannot turn on journaled "
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index aaebf0f21ab5..348dcb2d2e8f 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -356,7 +356,7 @@ static unsigned int vfs_dent_type(uint8_t type)
*/
static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
- int err, over = 0;
+ int err = 0, over = 0;
loff_t pos = file->f_pos;
struct qstr nm;
union ubifs_key key;
@@ -475,16 +475,22 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
}

out:
- if (err != -ENOENT) {
+ if (err != -ENOENT)
ubifs_err("cannot find next direntry, error %d", err);
- return err;
- }
+ else
+ /*
+ * -ENOENT is a non-fatal error in this context, the TNC uses
+ * it to indicate that the cursor moved past the current directory
+ * and readdir() has to stop.
+ */
+ err = 0;
+

kfree(file->private_data);
file->private_data = NULL;
/* 2 is a special value indicating that there are no more direntries */
file->f_pos = 2;
- return 0;
+ return err;
}

static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index bf18f7a04544..28b7ebf5d0d9 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -168,6 +168,7 @@ out_cancel:
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
+ host_ui->xattr_names -= nm->len;
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@@ -516,6 +517,7 @@ out_cancel:
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
+ host_ui->xattr_names += nm->len;
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
diff --git a/include/linux/can.h b/include/linux/can.h
index 9a19bcb3eeaf..86a96a6e9795 100644
--- a/include/linux/can.h
+++ b/include/linux/can.h
@@ -105,5 +105,6 @@ struct can_filter {
};

#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
+#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */

#endif /* CAN_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 8eeb205f298b..f4805e15b29b 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -150,7 +150,11 @@ static inline unsigned int sk_filter_len(const struct sk_filter *fp)
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
}

-extern int sk_filter(struct sock *sk, struct sk_buff *skb);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+ return sk_filter_trim_cap(sk, skb, 1);
+}
extern unsigned int sk_run_filter(const struct sk_buff *skb,
const struct sock_filter *filter);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 8b9bbf1d5b6e..8abaedffa901 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -394,7 +394,7 @@ struct lock_class_key { };

#define lockdep_depth(tsk) (0)

-#define lockdep_assert_held(l) do { } while (0)
+#define lockdep_assert_held(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)

#endif /* !LOCKDEP */
diff --git a/include/net/sock.h b/include/net/sock.h
index 38fcee90bde2..1444b523264b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1268,8 +1268,13 @@ static inline void sock_put(struct sock *sk)
sk_free(sk);
}

-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
- const int nested);
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+ unsigned int trim_cap);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested)
+{
+ return __sk_receive_skb(sk, skb, nested, 1);
+}

static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
@@ -1629,6 +1634,7 @@ extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,

extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);

+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);

extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0f4e1d419bfb..95c4211f47a7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -966,6 +966,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
return 1;
}

+int tcp_filter(struct sock *sk, struct sk_buff *skb);

#undef STATE_TRACE

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7c0b4f07f947..49a1db47a378 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -665,6 +665,76 @@ static void put_ctx(struct perf_event_context *ctx)
}
}

+/*
+ * Because of perf_event::ctx migration in sys_perf_event_open::move_group we
+ * need some magic.
+ *
+ * Those places that change perf_event::ctx will hold both
+ * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
+ *
+ * Lock ordering is by mutex address. There is one other site where
+ * perf_event_context::mutex nests and that is put_event(). But remember that
+ * that is a parent<->child context relation, and migration does not affect
+ * children, therefore these two orderings should not interact.
+ *
+ * The change in perf_event::ctx does not affect children (as claimed above)
+ * because the sys_perf_event_open() case will install a new event and break
+ * the ctx parent<->child relation.
+ *
+ * The places that change perf_event::ctx will issue:
+ *
+ * perf_remove_from_context();
+ * synchronize_rcu();
+ * perf_install_in_context();
+ *
+ * to affect the change. The remove_from_context() + synchronize_rcu() should
+ * quiesce the event, after which we can install it in the new location. This
+ * means that only external vectors (perf_fops, prctl) can perturb the event
+ * while in transit. Therefore all such accessors should also acquire
+ * perf_event_context::mutex to serialize against this.
+ *
+ * However; because event->ctx can change while we're waiting to acquire
+ * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
+ * function.
+ *
+ * Lock order:
+ * task_struct::perf_event_mutex
+ * perf_event_context::mutex
+ * perf_event_context::lock
+ * perf_event::child_mutex;
+ * perf_event::mmap_mutex
+ * mmap_sem
+ */
+static struct perf_event_context *perf_event_ctx_lock(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+again:
+ rcu_read_lock();
+ ctx = ACCESS_ONCE(event->ctx);
+ if (!atomic_inc_not_zero(&ctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock(&ctx->mutex);
+ if (event->ctx != ctx) {
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
+ goto again;
+ }
+
+ return ctx;
+}
+
+static void perf_event_ctx_unlock(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
+}
+
static void unclone_ctx(struct perf_event_context *ctx)
{
if (ctx->parent_ctx) {
@@ -1325,7 +1395,7 @@ static int __perf_event_disable(void *info)
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
-void perf_event_disable(struct perf_event *event)
+static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -1367,6 +1437,19 @@ retry:
raw_spin_unlock_irq(&ctx->lock);
}

+/*
+ * Strictly speaking kernel users cannot create groups and therefore this
+ * interface does not need the perf_event_ctx_lock() magic.
+ */
+void perf_event_disable(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+ ctx = perf_event_ctx_lock(event);
+ _perf_event_disable(event);
+ perf_event_ctx_unlock(event, ctx);
+}
+
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
@@ -1813,7 +1896,7 @@ unlock:
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
-void perf_event_enable(struct perf_event *event)
+static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -1870,7 +1953,19 @@ out:
raw_spin_unlock_irq(&ctx->lock);
}

-int perf_event_refresh(struct perf_event *event, int refresh)
+/*
+ * See perf_event_disable();
+ */
+void perf_event_enable(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+ ctx = perf_event_ctx_lock(event);
+ _perf_event_enable(event);
+ perf_event_ctx_unlock(event, ctx);
+}
+
+static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
@@ -1879,10 +1974,25 @@ int perf_event_refresh(struct perf_event *event, int refresh)
return -EINVAL;

atomic_add(refresh, &event->event_limit);
- perf_event_enable(event);
+ _perf_event_enable(event);

return 0;
}
+
+/*
+ * See perf_event_disable()
+ */
+int perf_event_refresh(struct perf_event *event, int refresh)
+{
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_event_refresh(event, refresh);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(perf_event_refresh);

static void ctx_sched_out(struct perf_event_context *ctx,
@@ -3110,7 +3220,16 @@ static void put_event(struct perf_event *event)
rcu_read_unlock();

if (owner) {
- mutex_lock(&owner->perf_event_mutex);
+ /*
+ * If we're here through perf_event_exit_task() we're already
+ * holding ctx->mutex which would be an inversion wrt. the
+ * normal lock order.
+ *
+ * However we can safely take this lock because its the child
+ * ctx->mutex.
+ */
+ mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
+
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
@@ -3162,12 +3281,13 @@ static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
- int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
- u64 values[5];
+ int n = 0, size = 0, ret;
u64 count, enabled, running;
+ u64 values[5];
+
+ lockdep_assert_held(&ctx->mutex);

- mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);

values[n++] = 1 + leader->nr_siblings;
@@ -3182,7 +3302,7 @@ static int perf_event_read_group(struct perf_event *event,
size = n * sizeof(u64);

if (copy_to_user(buf, values, size))
- goto unlock;
+ return -EFAULT;

ret = size;

@@ -3196,14 +3316,11 @@ static int perf_event_read_group(struct perf_event *event,
size = n * sizeof(u64);

if (copy_to_user(buf + ret, values, size)) {
- ret = -EFAULT;
- goto unlock;
+ return -EFAULT;
}

ret += size;
}
-unlock:
- mutex_unlock(&ctx->mutex);

return ret;
}
@@ -3262,8 +3379,14 @@ static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = perf_read_hw(event, buf, count);
+ perf_event_ctx_unlock(event, ctx);

- return perf_read_hw(event, buf, count);
+ return ret;
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
@@ -3287,7 +3410,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
return events;
}

-static void perf_event_reset(struct perf_event *event)
+static void _perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
@@ -3306,6 +3429,7 @@ static void perf_event_for_each_child(struct perf_event *event,
struct perf_event *child;

WARN_ON_ONCE(event->ctx->parent_ctx);
+
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
@@ -3319,15 +3443,14 @@ static void perf_event_for_each(struct perf_event *event,
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;

- WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
+ lockdep_assert_held(&ctx->mutex);
+
event = event->group_leader;

perf_event_for_each_child(event, func);
func(event);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
- perf_event_for_each_child(event, func);
- mutex_unlock(&ctx->mutex);
+ perf_event_for_each_child(sibling, func);
}

static int perf_event_period(struct perf_event *event, u64 __user *arg)
@@ -3386,25 +3509,24 @@ static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);

-static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
- struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;

switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
- func = perf_event_enable;
+ func = _perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
- func = perf_event_disable;
+ func = _perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
- func = perf_event_reset;
+ func = _perf_event_reset;
break;

case PERF_EVENT_IOC_REFRESH:
- return perf_event_refresh(event, arg);
+ return _perf_event_refresh(event, arg);

case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
@@ -3445,6 +3567,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}

+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx;
+ long ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_ioctl(event, cmd, arg);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -3466,11 +3601,15 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd,

int perf_event_task_enable(void)
{
+ struct perf_event_context *ctx;
struct perf_event *event;

mutex_lock(&current->perf_event_mutex);
- list_for_each_entry(event, &current->perf_event_list, owner_entry)
- perf_event_for_each_child(event, perf_event_enable);
+ list_for_each_entry(event, &current->perf_event_list, owner_entry) {
+ ctx = perf_event_ctx_lock(event);
+ perf_event_for_each_child(event, _perf_event_enable);
+ perf_event_ctx_unlock(event, ctx);
+ }
mutex_unlock(&current->perf_event_mutex);

return 0;
@@ -3478,11 +3617,15 @@ int perf_event_task_enable(void)

int perf_event_task_disable(void)
{
+ struct perf_event_context *ctx;
struct perf_event *event;

mutex_lock(&current->perf_event_mutex);
- list_for_each_entry(event, &current->perf_event_list, owner_entry)
- perf_event_for_each_child(event, perf_event_disable);
+ list_for_each_entry(event, &current->perf_event_list, owner_entry) {
+ ctx = perf_event_ctx_lock(event);
+ perf_event_for_each_child(event, _perf_event_disable);
+ perf_event_ctx_unlock(event, ctx);
+ }
mutex_unlock(&current->perf_event_mutex);

return 0;
@@ -4958,9 +5101,6 @@ struct swevent_htable {

/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
-
- /* Keeps track of cpu being initialized/exited */
- bool online;
};

static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5203,14 +5343,8 @@ static int perf_swevent_add(struct perf_event *event, int flags)
hwc->state = !(flags & PERF_EF_START);

head = find_swevent_head(swhash, event);
- if (!head) {
- /*
- * We can race with cpu hotplug code. Do not
- * WARN if the cpu just got unplugged.
- */
- WARN_ON_ONCE(swhash->online);
+ if (WARN_ON_ONCE(!head))
return -EINVAL;
- }

hlist_add_head_rcu(&event->hlist_entry, head);

@@ -5282,7 +5416,6 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
int err = 0;

mutex_lock(&swhash->hlist_mutex);
-
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;

@@ -6332,6 +6465,46 @@ out:
return ret;
}

+static void mutex_lock_double(struct mutex *a, struct mutex *b)
+{
+ if (b < a)
+ swap(a, b);
+
+ mutex_lock(a);
+ mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
+}
+
+/*
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
+ * mutexes.
+ */
+static struct perf_event_context *
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
+ struct perf_event_context *ctx)
+{
+ struct perf_event_context *gctx;
+
+again:
+ rcu_read_lock();
+ gctx = ACCESS_ONCE(group_leader->ctx);
+ if (!atomic_inc_not_zero(&gctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+ if (group_leader->ctx != gctx) {
+ mutex_unlock(&ctx->mutex);
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
+ goto again;
+ }
+
+ return gctx;
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -6347,7 +6520,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
- struct perf_event_context *ctx;
+ struct perf_event_context *ctx, *uninitialized_var(gctx);
struct file *event_file = NULL;
struct file *group_file = NULL;
struct task_struct *task = NULL;
@@ -6519,9 +6692,31 @@ SYSCALL_DEFINE5(perf_event_open,
}

if (move_group) {
- struct perf_event_context *gctx = group_leader->ctx;
+ gctx = __perf_event_ctx_lock_double(group_leader, ctx);

- mutex_lock(&gctx->mutex);
+ /*
+ * Check if we raced against another sys_perf_event_open() call
+ * moving the software group underneath us.
+ */
+ if (!(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+ /*
+ * If someone moved the group out from under us, check
+ * if this new event wound up on the same ctx, if so
+ * its the regular !move_group case, otherwise fail.
+ */
+ if (gctx != ctx) {
+ err = -EINVAL;
+ goto err_locked;
+ } else {
+ perf_event_ctx_unlock(group_leader, gctx);
+ move_group = 0;
+ }
+ }
+
+ /*
+ * See perf_event_ctx_lock() for comments on the details
+ * of swizzling perf_event::ctx.
+ */
perf_remove_from_context(group_leader, false);

/*
@@ -6536,14 +6731,19 @@ SYSCALL_DEFINE5(perf_event_open,
perf_event__state_init(sibling);
put_ctx(gctx);
}
- mutex_unlock(&gctx->mutex);
- put_ctx(gctx);
+ } else {
+ mutex_lock(&ctx->mutex);
}

WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);

if (move_group) {
+ /*
+ * Wait for everybody to stop referencing the events through
+ * the old lists, before installing it on new lists.
+ */
+ synchronize_rcu();
+
perf_install_in_context(ctx, group_leader, cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
@@ -6556,6 +6756,11 @@ SYSCALL_DEFINE5(perf_event_open,
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
perf_unpin_context(ctx);
+
+ if (move_group) {
+ perf_event_ctx_unlock(group_leader, gctx);
+ put_ctx(gctx);
+ }
mutex_unlock(&ctx->mutex);

event->owner = current;
@@ -6580,11 +6785,21 @@ SYSCALL_DEFINE5(perf_event_open,
fd_install(event_fd, event_file);
return event_fd;

+err_locked:
+ if (move_group)
+ perf_event_ctx_unlock(group_leader, gctx);
+ mutex_unlock(&ctx->mutex);
+ fput(event_file);
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
- free_event(event);
+ /*
+ * If event_file is set, the fput() above will have called ->release()
+ * and that will take care of freeing the event.
+ */
+ if (!event_file)
+ free_event(event);
err_task:
if (task)
put_task_struct(task);
@@ -7149,7 +7364,6 @@ static void __cpuinit perf_event_init_cpu(int cpu)
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);

mutex_lock(&swhash->hlist_mutex);
- swhash->online = true;
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;

@@ -7202,14 +7416,7 @@ static void perf_event_exit_cpu_context(int cpu)

static void perf_event_exit_cpu(int cpu)
{
- struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
-
perf_event_exit_cpu_context(cpu);
-
- mutex_lock(&swhash->hlist_mutex);
- swhash->online = false;
- swevent_hlist_release(swhash);
- mutex_unlock(&swhash->hlist_mutex);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 1928f3db77b0..c24b2c506d50 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -63,8 +63,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)

static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
{
- if (!rt_mutex_has_waiters(lock))
- clear_rt_mutex_waiters(lock);
+ unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+ if (rt_mutex_has_waiters(lock))
+ return;
+
+ /*
+ * The rbtree has no waiters enqueued, now make sure that the
+ * lock->owner still has the waiters bit set, otherwise the
+ * following can happen:
+ *
+ * CPU 0 CPU 1 CPU2
+ * l->owner=T1
+ * rt_mutex_lock(l)
+ * lock(l->lock)
+ * l->owner = T1 | HAS_WAITERS;
+ * enqueue(T2)
+ * boost()
+ * unlock(l->lock)
+ * block()
+ *
+ * rt_mutex_lock(l)
+ * lock(l->lock)
+ * l->owner = T1 | HAS_WAITERS;
+ * enqueue(T3)
+ * boost()
+ * unlock(l->lock)
+ * block()
+ * signal(->T2) signal(->T3)
+ * lock(l->lock)
+ * dequeue(T2)
+ * deboost()
+ * unlock(l->lock)
+ * lock(l->lock)
+ * dequeue(T3)
+ * ==> wait list is empty
+ * deboost()
+ * unlock(l->lock)
+ * lock(l->lock)
+ * fixup_rt_mutex_waiters()
+ * if (wait_list_empty(l) {
+ * l->owner = owner
+ * owner = l->owner & ~HAS_WAITERS;
+ * ==> l->owner = T1
+ * }
+ * lock(l->lock)
+ * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
+ * if (wait_list_empty(l) {
+ * owner = l->owner & ~HAS_WAITERS;
+ * cmpxchg(l->owner, T1, NULL)
+ * ===> Success (l->owner = NULL)
+ *
+ * l->owner = owner
+ * ==> l->owner = T1
+ * }
+ *
+ * With the check for the waiter bit in place T3 on CPU2 will not
+ * overwrite. All tasks fiddling with the waiters bit are
+ * serialized by l->lock, so nothing else can modify the waiters
+ * bit. If the bit is set then nothing can change l->owner either
+ * so the simple RMW is safe. The cmpxchg() will simply fail if it
+ * happens in the middle of the RMW because the waiters bit is
+ * still set.
+ */
+ owner = ACCESS_ONCE(*p);
+ if (owner & RT_MUTEX_HAS_WAITERS)
+ ACCESS_ONCE(*p) = owner & ~RT_MUTEX_HAS_WAITERS;
}

/*
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 716f94747c81..266e0e67d0aa 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -263,7 +263,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit = 0, end_bit, remain;
+ int nbits, start_bit, end_bit, remain;

#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -278,6 +278,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
if (size > atomic_read(&chunk->avail))
continue;

+ start_bit = 0;
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
retry:
start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
diff --git a/mm/swapfile.c b/mm/swapfile.c
index dbd2b67bb50d..960b0dab5182 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1913,6 +1913,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
+ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+ return 0;
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 3910c1fefd04..c297974311b3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1494,24 +1494,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct bcm_sock *bo = bcm_sk(sk);
+ int ret = 0;

if (len < sizeof(*addr))
return -EINVAL;

- if (bo->bound)
- return -EISCONN;
+ lock_sock(sk);
+
+ if (bo->bound) {
+ ret = -EISCONN;
+ goto fail;
+ }

/* bind a device to this socket */
if (addr->can_ifindex) {
struct net_device *dev;

dev = dev_get_by_index(&init_net, addr->can_ifindex);
- if (!dev)
- return -ENODEV;
-
+ if (!dev) {
+ ret = -ENODEV;
+ goto fail;
+ }
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}

bo->ifindex = dev->ifindex;
@@ -1522,17 +1529,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
bo->ifindex = 0;
}

- bo->bound = 1;
-
if (proc_dir) {
/* unique socket address as filename */
sprintf(bo->procname, "%lu", sock_i_ino(sk));
bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
proc_dir,
&bcm_proc_fops, sk);
+ if (!bo->bcm_proc_read) {
+ ret = -ENOMEM;
+ goto fail;
+ }
}

- return 0;
+ bo->bound = 1;
+
+fail:
+ release_sock(sk);
+
+ return ret;
}

static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
diff --git a/net/can/raw.c b/net/can/raw.c
index 46cca3a91d19..7320197b8ea7 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -459,6 +459,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (optlen % sizeof(struct can_filter) != 0)
return -EINVAL;

+ if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
+ return -EINVAL;
+
count = optlen / sizeof(struct can_filter);

if (count > 1) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 9c88080e4049..62667722b574 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -64,9 +64,10 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
}

/**
- * sk_filter - run a packet through a socket filter
+ * sk_filter_trim_cap - run a packet through a socket filter
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
+ * @cap: limit on how short the eBPF program may trim the packet
*
* Run the filter code and then cut skb->data to correct size returned by
* sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
@@ -75,7 +76,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
* be accepted or -EPERM if the packet should be tossed.
*
*/
-int sk_filter(struct sock *sk, struct sk_buff *skb)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{
int err;
struct sk_filter *filter;
@@ -88,14 +89,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
-
- err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
}
rcu_read_unlock();

return err;
}
-EXPORT_SYMBOL(sk_filter);
+EXPORT_SYMBOL(sk_filter_trim_cap);

/**
* sk_run_filter - run a filter on a socket
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 72a7ed82659b..684b57380b2b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -249,6 +249,7 @@ int rtnl_unregister(int protocol, int msgtype)

rtnl_msg_handlers[protocol][msgindex].doit = NULL;
rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
+ rtnl_msg_handlers[protocol][msgindex].calcit = NULL;

return 0;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index e0935282945f..07c078e3f570 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -281,9 +281,8 @@ static void sock_disable_timestamp(struct sock *sk, int flag)
}


-int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- int err;
int skb_len;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
@@ -294,10 +293,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return -ENOMEM;
}

- err = sk_filter(sk, skb);
- if (err)
- return err;
-
if (!sk_rmem_schedule(sk, skb->truesize)) {
atomic_inc(&sk->sk_drops);
return -ENOBUFS;
@@ -327,13 +322,26 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk->sk_data_ready(sk, skb_len);
return 0;
}
+EXPORT_SYMBOL(__sock_queue_rcv_skb);
+
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+
+ err = sk_filter(sk, skb);
+ if (err)
+ return err;
+
+ return __sock_queue_rcv_skb(sk, skb);
+}
EXPORT_SYMBOL(sock_queue_rcv_skb);

-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested, unsigned int trim_cap)
{
int rc = NET_RX_SUCCESS;

- if (sk_filter(sk, skb))
+ if (sk_filter_trim_cap(sk, skb, trim_cap))
goto discard_and_relse;

skb->dev = NULL;
@@ -369,7 +377,7 @@ discard_and_relse:
kfree_skb(skb);
goto out;
}
-EXPORT_SYMBOL(sk_receive_skb);
+EXPORT_SYMBOL(__sk_receive_skb);

void sk_reset_txq(struct sock *sk)
{
@@ -526,23 +534,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_SNDBUF:
/* Don't error on this BSD doesn't and if you think
- about it this is right. Otherwise apps have to
- play 'guess the biggest size' games. RCVBUF/SNDBUF
- are treated in BSD as hints */
-
- if (val > sysctl_wmem_max)
- val = sysctl_wmem_max;
+ * about it this is right. Otherwise apps have to
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+ val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- if ((val * 2) < SOCK_MIN_SNDBUF)
- sk->sk_sndbuf = SOCK_MIN_SNDBUF;
- else
- sk->sk_sndbuf = val * 2;
-
- /*
- * Wake up sending tasks if we
- * upped the value.
- */
+ sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ /* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;

@@ -555,12 +555,11 @@ set_sndbuf:

case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
- about it this is right. Otherwise apps have to
- play 'guess the biggest size' games. RCVBUF/SNDBUF
- are treated in BSD as hints */
-
- if (val > sysctl_rmem_max)
- val = sysctl_rmem_max;
+ * about it this is right. Otherwise apps have to
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+ val = min_t(u32, val, sysctl_rmem_max);
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
@@ -578,10 +577,7 @@ set_rcvbuf:
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
- if ((val * 2) < SOCK_MIN_RCVBUF)
- sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
- else
- sk->sk_rcvbuf = val * 2;
+ sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break;

case SO_RCVBUFFORCE:
@@ -923,7 +919,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;

case SO_PASSCRED:
- v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
+ v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
break;

case SO_PEERCRED:
@@ -958,7 +954,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;

case SO_PASSSEC:
- v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+ v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
break;

case SO_PEERSEC:
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 72416c8f0859..d336501f3451 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -877,7 +877,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
goto discard_and_relse;
nf_reset(skb);

- return sk_receive_skb(sk, skb, 1);
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);

no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 0987c89031c2..b00bc115a2f0 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -817,7 +817,7 @@ static int dccp_v6_rcv(struct sk_buff *skb)
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;

- return sk_receive_skb(sk, skb, 1) ? -1 : 0;
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;

no_dccp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
@@ -1037,6 +1037,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
+ .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 5d28677345e0..cf479f2d1e8c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -482,6 +482,10 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (len > 0xFFFF)
return -EMSGSIZE;

+ /* Must have at least a full ICMP header. */
+ if (len < sizeof(struct icmphdr))
+ return -EINVAL;
+
/*
* Check the flags.
*/
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b4e0eb49f56d..4ba77eea6b85 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1647,6 +1647,21 @@ csum_err:
}
EXPORT_SYMBOL(tcp_v4_do_rcv);

+int tcp_filter(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcphdr *th = (struct tcphdr *)skb->data;
+ unsigned int eaten = skb->len;
+ int err;
+
+ err = sk_filter_trim_cap(sk, skb, th->doff * 4);
+ if (!err) {
+ eaten -= skb->len;
+ TCP_SKB_CB(skb)->end_seq -= eaten;
+ }
+ return err;
+}
+EXPORT_SYMBOL(tcp_filter);
+
/*
* From tcp_input.c
*/
@@ -1709,8 +1724,10 @@ process:
goto discard_and_relse;
nf_reset(skb);

- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard_and_relse;
+ th = (const struct tcphdr *)skb->data;
+ iph = ip_hdr(skb);

skb->dev = NULL;

diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f1ca6588307c..c59f646a7d4f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1338,7 +1338,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
if (((length > mtu) ||
(skb && skb_has_frags(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
(sk->sk_type == SOCK_DGRAM)) {
err = ip6_ufo_append_data(sk, getfrag, from, length,
hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1828c27ba7aa..741917c3aa5d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -983,6 +983,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
nf_reset(skb);
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
pkt_len = skb->len;
err = ip6_local_out(skb);

diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b20f7a08b4f7..2c2d71123251 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1585,7 +1585,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
#endif

- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard;

/*
@@ -1743,8 +1743,10 @@ process:
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;

- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard_and_relse;
+ th = (const struct tcphdr *)skb->data;
+ hdr = ipv6_hdr(skb);

skb->dev = NULL;

diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7c53effb6eda..c1d02d860489 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1854,16 +1854,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;

- if (ieee80211_has_a4(hdr->frame_control) &&
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- !rx->sdata->u.vlan.sta)
- return RX_DROP_UNUSABLE;
+ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
+ switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ if (!rx->sdata->u.vlan.sta)
+ return RX_DROP_UNUSABLE;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sdata->u.mgd.use_4addr)
+ return RX_DROP_UNUSABLE;
+ break;
+ default:
+ return RX_DROP_UNUSABLE;
+ }
+ }

- if (is_multicast_ether_addr(hdr->addr1) &&
- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- rx->sdata->u.vlan.sta) ||
- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
- rx->sdata->u.mgd.use_4addr)))
+ if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;

skb->dev = dev;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index c855673ceb23..69ce23fc81eb 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -550,6 +550,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
* least once for the stats anyway.
*/
rcu_read_lock_bh();
+ begin:
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (ct != ignored_conntrack &&
@@ -561,6 +562,12 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
}
NF_CT_STAT_INC(net, searched);
}
+
+ if (get_nulls_value(n) != hash) {
+ NF_CT_STAT_INC(net, search_restart);
+ goto begin;
+ }
+
rcu_read_unlock_bh();

return 0;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 4a8c55bd60ee..7cbf9bdfeaa8 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1372,9 +1372,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
handler = &sip_handlers[i];
if (handler->request == NULL)
continue;
- if (*datalen < handler->len ||
+ if (*datalen < handler->len + 2 ||
strnicmp(*dptr, handler->method, handler->len))
continue;
+ if ((*dptr)[handler->len] != ' ' ||
+ !isalpha((*dptr)[handler->len+1]))
+ continue;

if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
&matchoff, &matchlen) <= 0)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4a912f78e423..dae9476fa4d8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3077,19 +3077,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv

if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
- po->tp_version = val;
- return 0;
+ break;
default:
return -EINVAL;
}
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_version = val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_RESERVE:
{
@@ -3560,6 +3566,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;

+ lock_sock(sk);
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
WARN(1, "Tx-ring is not supported.\n");
@@ -3637,7 +3644,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
}

- lock_sock(sk);

/* Detach socket from network */
spin_lock(&po->bind_lock);
@@ -3686,11 +3692,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
}
- release_sock(sk);

if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
+ release_sock(sk);
return err;
}

diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 7f7fcb46b4fa..e5029c58e472 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -165,7 +165,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (sock_queue_rcv_skb(sk, skb) == 0) {
+ if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+ __sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
} else {
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 5437b33fd33e..6dca588b14af 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -995,19 +995,13 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
* This way the whole message is queued up and bundling if
* encouraged for small fragments.
*/
-static int sctp_cmd_send_msg(struct sctp_association *asoc,
- struct sctp_datamsg *msg)
+static void sctp_cmd_send_msg(struct sctp_association *asoc,
+ struct sctp_datamsg *msg)
{
struct sctp_chunk *chunk;
- int error = 0;
-
- list_for_each_entry(chunk, &msg->chunks, frag_list) {
- error = sctp_outq_tail(&asoc->outqueue, chunk);
- if (error)
- break;
- }

- return error;
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_outq_tail(&asoc->outqueue, chunk);
}


@@ -1685,7 +1679,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_outq_cork(&asoc->outqueue);
local_cork = 1;
}
- error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
+ sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 4e0a9b934233..6b22d6a2dd63 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3354,6 +3354,12 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);

+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
@@ -3385,12 +3391,6 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
}
}

- /* Report violation if chunk len overflows */
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
- return sctp_sf_violation_chunklen(ep, asoc, type, arg,
- commands);
-
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));

diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index d696f9e414e3..d97d03f4d31b 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -47,6 +47,13 @@
*/
#define TIPC_MEDIA_TYPE_ETH 1

+/* Message header sizes from msg.h - duplicated to avoid mutual inclusion */
+#define INT_H_SIZE 40
+#define MAX_H_SIZE 60
+
+/* minimum bearer MTU */
+#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
+
/*
* Destination address structure used by TIPC bearers when sending messages
*
@@ -209,4 +216,13 @@ static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
return !b_ptr->media->send_msg(buf, b_ptr, dest);
}

+/* check if device MTU is too low for tipc headers */
+static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
+{
+ if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
+ return false;
+ netdev_warn(dev, "MTU too low for tipc bearer\n");
+ return true;
+}
+
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index a224a38f0218..2e79e84660fb 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -167,6 +167,10 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
read_unlock(&dev_base_lock);
if (!dev)
return -ENODEV;
+ if (tipc_mtu_bad(dev, 0)) {
+ dev_put(dev);
+ return -EINVAL;
+ }

/* Create Ethernet bearer for device */

@@ -227,8 +231,6 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
if (!eb_ptr->bearer)
return NOTIFY_DONE; /* bearer had been disabled */

- eb_ptr->bearer->mtu = dev->mtu;
-
switch (evt) {
case NETDEV_CHANGE:
if (netif_carrier_ok(dev))
@@ -243,6 +245,12 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
tipc_block_bearer(eb_ptr->bearer->name);
break;
case NETDEV_CHANGEMTU:
+ if (tipc_mtu_bad(dev, 0)) {
+ tipc_disable_bearer(eb_ptr->bearer->name);
+ break;
+ }
+ eb_ptr->bearer->mtu = dev->mtu;
+ /* fall through */
case NETDEV_CHANGEADDR:
tipc_block_bearer(eb_ptr->bearer->name);
tipc_continue(eb_ptr->bearer);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 7f00d34f0abd..b36544a62b40 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1766,10 +1766,10 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
if (substream->timer_running)
snd_timer_interrupt(substream->timer, 1);
_end:
- snd_pcm_stream_unlock_irqrestore(substream, flags);
if (runtime->transfer_ack_end)
runtime->transfer_ack_end(substream);
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}

EXPORT_SYMBOL(snd_pcm_period_elapsed);
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index be662c9a2113..8e935dea58d4 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1422,6 +1422,7 @@ snd_ali_playback_pointer(struct snd_pcm_substream *substream)
spin_unlock(&codec->reg_lock);
snd_ali_printk("playback pointer returned cso=%xh.\n", cso);

+ cso %= runtime->buffer_size;
return cso;
}

@@ -1442,6 +1443,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2));
spin_unlock(&codec->reg_lock);

+ cso %= runtime->buffer_size;
return cso;
}

diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 21f4d448b242..d422d162724a 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -684,6 +684,7 @@ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
}

static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
+ struct usb_mixer_elem_info *cval,
struct snd_kcontrol *kctl)
{
/* Approximation using 10 ranges based on output measurement on hw v1.2.
@@ -701,10 +702,19 @@ static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
41, 50, TLV_DB_MINMAX_ITEM(-441, 0),
);

- dev_info(&mixer->chip->dev->dev, "applying DragonFly dB scale quirk\n");
- kctl->tlv.p = scale;
- kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
- kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+ if (cval->min == 0 && cval->max == 50) {
+ dev_info(&mixer->chip->dev->dev, "applying DragonFly dB scale quirk (0-50 variant)\n");
+ kctl->tlv.p = scale;
+ kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+ kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+
+ } else if (cval->min == 0 && cval->max <= 1000) {
+ /* Some other clearly broken DragonFly variant.
+ * At least a 0..53 variant (hw v1.0) exists.
+ */
+ dev_info(&mixer->chip->dev->dev, "ignoring too narrow dB range on a DragonFly device");
+ kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+ }
}

void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
@@ -713,8 +723,8 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
{
switch (mixer->chip->usb_id) {
case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
- if (unitid == 7 && cval->min == 0 && cval->max == 50)
- snd_dragonfly_quirk_db_scale(mixer, kctl);
+ if (unitid == 7 && cval->control == UAC_FU_VOLUME)
+ snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
break;
}
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 0f18c753e660..172799a4f41e 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2689,6 +2689,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),

+/* Syntek STK1160 */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x05e1,
+ .idProduct = 0x0408,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Syntek",
+ .product_name = "STK1160",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER
+ }
+},
+
/* Digidesign Mbox */
{
/* Thanks to Clemens Ladisch <clemens@ladisch.de> */[unhandled content-type:application/pgp-signature]
\
 
 \ /
  Last update: 2017-02-23 16:30    [W:0.279 / U:0.440 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site