lkml.org 
[lkml]   [2019]   [Jul]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: Linux 5.1.20
diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt
index 913396ac5824..ed0d814df7e0 100644
--- a/Documentation/atomic_t.txt
+++ b/Documentation/atomic_t.txt
@@ -177,6 +177,9 @@ These helper barriers exist because architectures have varying implicit
ordering on their SMP atomic primitives. For example our TSO architectures
provide full ordered atomics and these barriers are no-ops.

+NOTE: when the atomic RmW ops are fully ordered, they should also imply a
+compiler barrier.
+
Thus:

atomic_fetch_add();
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
index 42cd81090a2c..3f3cfc1d8d4d 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
@@ -16,7 +16,7 @@ Required properties:

Optional properties:
- interrupts: interrupt line number for the SMI error/done interrupt
-- clocks: phandle for up to three required clocks for the MDIO instance
+- clocks: phandle for up to four required clocks for the MDIO instance

The child nodes of the MDIO driver are the individual PHY devices
connected to this MDIO bus. They must have a "reg" property given the
diff --git a/Documentation/scheduler/sched-pelt.c b/Documentation/scheduler/sched-pelt.c
index e4219139386a..7238b355919c 100644
--- a/Documentation/scheduler/sched-pelt.c
+++ b/Documentation/scheduler/sched-pelt.c
@@ -20,7 +20,8 @@ void calc_runnable_avg_yN_inv(void)
int i;
unsigned int x;

- printf("static const u32 runnable_avg_yN_inv[] = {");
+ /* To silence -Wunused-but-set-variable warnings. */
+ printf("static const u32 runnable_avg_yN_inv[] __maybe_unused = {");
for (i = 0; i < HALFLIFE; i++) {
x = ((1UL<<32)-1)*pow(y, i);

diff --git a/Makefile b/Makefile
index 432a62fec680..ef6daeb823b9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 1
-SUBLEVEL = 19
+SUBLEVEL = 20
EXTRAVERSION =
NAME = Shy Crocodile

diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index 592111c8d6fd..dfe3da77b437 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -64,7 +64,7 @@
gpio-sck = <&gpio1 5 GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
num-chipselects = <1>;

panel: display@0 {
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d218729ec852..dc3e62a18b62 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -258,7 +258,8 @@ config GENERIC_CALIBRATE_DELAY
def_bool y

config ZONE_DMA32
- def_bool y
+ bool "Support DMA32 zone" if EXPERT
+ default y

config HAVE_GENERIC_GUP
def_bool y
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 053458a5db55..ede86856ec39 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -322,7 +322,8 @@
regulator-max-microvolt = <1320000>;
enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
regulator-ramp-delay = <80>;
- regulator-enable-ramp-delay = <1000>;
+ regulator-enable-ramp-delay = <2000>;
+ regulator-settling-time-us = <160>;
};
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 6574396d2257..d7baff79205c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -1250,7 +1250,7 @@
compatible = "nvidia,tegra210-agic";
#interrupt-cells = <3>;
interrupt-controller;
- reg = <0x702f9000 0x2000>,
+ reg = <0x702f9000 0x1000>,
<0x702fa000 0x2000>;
interrupts = <GIC_SPI 102 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
clocks = <&tegra_car TEGRA210_CLK_APE>;
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 17fac2889f56..d8c521c757e8 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -54,7 +54,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
+ bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;

if (!may_use_simd())
return crypto_sha1_finup(desc, data, len, out);
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 261f5195cab7..c47d1a28ff6b 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -59,7 +59,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
+ bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;

if (!may_use_simd()) {
if (len)
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 43d8366c1e87..efa2976ca050 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -92,7 +92,7 @@ static inline unsigned long arch_local_save_flags(void)
ARM64_HAS_IRQ_PRIO_MASKING)
: "=&r" (flags), "+r" (daif_bits)
: "r" ((unsigned long) GIC_PRIO_IRQOFF)
- : "memory");
+ : "cc", "memory");

return flags;
}
@@ -136,7 +136,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
ARM64_HAS_IRQ_PRIO_MASKING)
: "=&r" (res)
: "r" ((int) flags)
- : "memory");
+ : "cc", "memory");

return res;
}
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 803f0494dd3e..7722e85fb69c 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -155,10 +155,14 @@ static int __init acpi_fadt_sanity_check(void)
*/
if (table->revision < 5 ||
(table->revision == 5 && fadt->minor_revision < 1)) {
- pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
+ pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
table->revision, fadt->minor_revision);
- ret = -EINVAL;
- goto out;
+
+ if (!fadt->arm_boot_flags) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pr_err("FADT has ARM boot flags set, assuming 5.1\n");
}

if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index c50a7a75f2e0..381ff2a96e0b 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -420,6 +420,20 @@ tsk .req x28 // current thread_info
irq_stack_exit
.endm

+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ /*
+ * Set res to 0 if irqs were unmasked in interrupted context.
+ * Otherwise set res to non-0 value.
+ */
+ .macro test_irqs_unmasked res:req, pmr:req
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ sub \res, \pmr, #GIC_PRIO_IRQON
+alternative_else
+ mov \res, xzr
+alternative_endif
+ .endm
+#endif
+
.text

/*
@@ -616,19 +630,19 @@ ENDPROC(el1_sync)
el1_irq:
kernel_entry 1
enable_da_f
-#ifdef CONFIG_TRACE_IRQFLAGS
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
ldr x20, [sp, #S_PMR_SAVE]
-alternative_else
- mov x20, #GIC_PRIO_IRQON
-alternative_endif
- cmp x20, #GIC_PRIO_IRQOFF
- /* Irqs were disabled, don't trace */
- b.ls 1f
+alternative_else_nop_endif
+ test_irqs_unmasked res=x0, pmr=x20
+ cbz x0, 1f
+ bl asm_nmi_enter
+1:
#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
-1:
#endif

irq_handler
@@ -647,14 +661,22 @@ alternative_else_nop_endif
bl preempt_schedule_irq // irq en/disable is done inside
1:
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
/*
* if IRQs were disabled when we received the interrupt, we have an NMI
* and we are not re-enabling interrupt upon eret. Skip tracing.
*/
- cmp x20, #GIC_PRIO_IRQOFF
- b.ls 1f
+ test_irqs_unmasked res=x0, pmr=x20
+ cbz x0, 1f
+ bl asm_nmi_exit
+1:
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ test_irqs_unmasked res=x0, pmr=x20
+ cbnz x0, 1f
#endif
bl trace_hardirqs_on
1:
@@ -855,7 +877,7 @@ el0_dbg:
mov x1, x25
mov x2, sp
bl do_debug_exception
- enable_daif
+ enable_da_f
ct_user_exit
b ret_to_user
el0_inv:
@@ -907,7 +929,7 @@ el0_error_naked:
enable_dbg
mov x0, sp
bl do_serror
- enable_daif
+ enable_da_f
ct_user_exit
b ret_to_user
ENDPROC(el0_error)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 33f14e484040..b22e8ad071b1 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -78,7 +78,11 @@

#ifdef CONFIG_EFI

-__efistub_stext_offset = stext - _text;
+/*
+ * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
+ * https://github.com/ClangBuiltLinux/linux/issues/561
+ */
+__efistub_stext_offset = ABSOLUTE(stext - _text);

/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 92fa81798fb9..fdd9cb27fed5 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -27,8 +27,10 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/irqchip.h>
+#include <linux/kprobes.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <asm/daifflags.h>
#include <asm/vmap_stack.h>

unsigned long irq_err_count;
@@ -76,3 +78,18 @@ void __init init_IRQ(void)
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
+
+/*
+ * Stubs to make nmi_enter/exit() code callable from ASM
+ */
+asmlinkage void notrace asm_nmi_enter(void)
+{
+ nmi_enter();
+}
+NOKPROBE_SYMBOL(asm_nmi_enter);
+
+asmlinkage void notrace asm_nmi_exit(void)
+{
+ nmi_exit();
+}
+NOKPROBE_SYMBOL(asm_nmi_exit);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 7cae155e81a5..fff8c61ff608 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -191,8 +191,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};

- if (IS_ENABLED(CONFIG_ZONE_DMA32))
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
+#endif
max_zone_pfns[ZONE_NORMAL] = max;

free_area_init_nodes(max_zone_pfns);
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 3c453a1f1ff1..172801ed35b8 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -78,6 +78,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
$(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
$(call if_changed,objcopy)

+HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
+
# Calculate the load address of the compressed kernel image
hostprogs-y := calc_vmlinuz_load_addr

diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 542c3ede9722..d14f75ec8273 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,7 +13,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
-#include "../../../../include/linux/sizes.h"
+#include <linux/sizes.h>

int main(int argc, char *argv[])
{
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
index c2917b39966b..bba2c8837951 100644
--- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h
+++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
@@ -27,8 +27,8 @@
#define AR933X_UART_CS_PARITY_S 0
#define AR933X_UART_CS_PARITY_M 0x3
#define AR933X_UART_CS_PARITY_NONE 0
-#define AR933X_UART_CS_PARITY_ODD 1
-#define AR933X_UART_CS_PARITY_EVEN 2
+#define AR933X_UART_CS_PARITY_ODD 2
+#define AR933X_UART_CS_PARITY_EVEN 3
#define AR933X_UART_CS_IF_MODE_S 2
#define AR933X_UART_CS_IF_MODE_M 0x3
#define AR933X_UART_CS_IF_MODE_NONE 0
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 0964c236e3e5..de2998cb189e 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -167,6 +167,9 @@ long arch_ptrace(struct task_struct *child, long request,
if ((addr & (sizeof(unsigned long)-1)) ||
addr >= sizeof(struct pt_regs))
break;
+ if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
+ data |= 3; /* ensure userspace privilege */
+ }
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
@@ -228,16 +231,18 @@ long arch_ptrace(struct task_struct *child, long request,

static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
{
- if (offset < 0)
- return sizeof(struct pt_regs);
- else if (offset <= 32*4) /* gr[0..31] */
- return offset * 2 + 4;
- else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
- return offset + 32*4;
- else if (offset < sizeof(struct pt_regs)/2 + 32*4)
- return offset * 2 + 4 - 32*8;
+ compat_ulong_t pos;
+
+ if (offset < 32*4) /* gr[0..31] */
+ pos = offset * 2 + 4;
+ else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */
+ pos = (offset - 32*4) + PT_FR0;
+ else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
+ pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
else
- return sizeof(struct pt_regs);
+ pos = sizeof(struct pt_regs);
+
+ return pos;
}

long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
@@ -281,9 +286,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
addr = translate_usr_offset(addr);
if (addr >= sizeof(struct pt_regs))
break;
+ if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
+ data |= 3; /* ensure userspace privilege */
+ }
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
- *(__u64 *) ((char *) task_regs(child) + addr) = data;
+ *(__u32 *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
@@ -496,7 +504,8 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
return;
case RI(iaoq[0]):
case RI(iaoq[1]):
- regs->iaoq[num - RI(iaoq[0])] = val;
+ /* set 2 lowest bits to ensure userspace privilege: */
+ regs->iaoq[num - RI(iaoq[0])] = val | 3;
return;
case RI(sar): regs->sar = val;
return;
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 505550fb2935..22f53f9c4071 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -137,6 +137,20 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
}
#endif

+#ifdef CONFIG_PPC64
+#define is_ioremap_addr is_ioremap_addr
+static inline bool is_ioremap_addr(const void *x)
+{
+#ifdef CONFIG_MMU
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= IOREMAP_BASE && addr < IOREMAP_END;
+#else
+ return false;
+#endif
+}
+#endif /* CONFIG_PPC64 */
+
#endif /* __ASSEMBLY__ */

#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9481a117e242..28f230a58bf5 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1738,7 +1738,7 @@ handle_page_fault:
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
cmpdi r3,0
- beq+ 12f
+ beq+ ret_from_except_lite
bl save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1753,7 +1753,12 @@ handle_dabr_fault:
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_break
-12: b ret_from_except_lite
+ /*
+ * do_break() may have changed the NV GPRS while handling a breakpoint.
+ * If so, we need to restore them with their updated values. Don't use
+ * ret_from_except_lite here.
+ */
+ b ret_from_except


#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index 7a919e9a3400..cbdf86228eaa 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -25,11 +25,19 @@
#define SL_IBAT2 0x48
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
-#define SL_TB 0x60
-#define SL_R2 0x68
-#define SL_CR 0x6c
-#define SL_LR 0x70
-#define SL_R12 0x74 /* r12 to r31 */
+#define SL_DBAT4 0x60
+#define SL_IBAT4 0x68
+#define SL_DBAT5 0x70
+#define SL_IBAT5 0x78
+#define SL_DBAT6 0x80
+#define SL_IBAT6 0x88
+#define SL_DBAT7 0x90
+#define SL_IBAT7 0x98
+#define SL_TB 0xa0
+#define SL_R2 0xa8
+#define SL_CR 0xac
+#define SL_LR 0xb0
+#define SL_R12 0xb4 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)

.section .data
@@ -114,6 +122,41 @@ _GLOBAL(swsusp_arch_suspend)
mfibatl r4,3
stw r4,SL_IBAT3+4(r11)

+BEGIN_MMU_FTR_SECTION
+ mfspr r4,SPRN_DBAT4U
+ stw r4,SL_DBAT4(r11)
+ mfspr r4,SPRN_DBAT4L
+ stw r4,SL_DBAT4+4(r11)
+ mfspr r4,SPRN_DBAT5U
+ stw r4,SL_DBAT5(r11)
+ mfspr r4,SPRN_DBAT5L
+ stw r4,SL_DBAT5+4(r11)
+ mfspr r4,SPRN_DBAT6U
+ stw r4,SL_DBAT6(r11)
+ mfspr r4,SPRN_DBAT6L
+ stw r4,SL_DBAT6+4(r11)
+ mfspr r4,SPRN_DBAT7U
+ stw r4,SL_DBAT7(r11)
+ mfspr r4,SPRN_DBAT7L
+ stw r4,SL_DBAT7+4(r11)
+ mfspr r4,SPRN_IBAT4U
+ stw r4,SL_IBAT4(r11)
+ mfspr r4,SPRN_IBAT4L
+ stw r4,SL_IBAT4+4(r11)
+ mfspr r4,SPRN_IBAT5U
+ stw r4,SL_IBAT5(r11)
+ mfspr r4,SPRN_IBAT5L
+ stw r4,SL_IBAT5+4(r11)
+ mfspr r4,SPRN_IBAT6U
+ stw r4,SL_IBAT6(r11)
+ mfspr r4,SPRN_IBAT6L
+ stw r4,SL_IBAT6+4(r11)
+ mfspr r4,SPRN_IBAT7U
+ stw r4,SL_IBAT7(r11)
+ mfspr r4,SPRN_IBAT7L
+ stw r4,SL_IBAT7+4(r11)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
#if 0
/* Backup various CPU config stuffs */
bl __save_cpu_setup
@@ -279,27 +322,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtibatu 3,r4
lwz r4,SL_IBAT3+4(r11)
mtibatl 3,r4
-#endif
-
BEGIN_MMU_FTR_SECTION
- li r4,0
+ lwz r4,SL_DBAT4(r11)
mtspr SPRN_DBAT4U,r4
+ lwz r4,SL_DBAT4+4(r11)
mtspr SPRN_DBAT4L,r4
+ lwz r4,SL_DBAT5(r11)
mtspr SPRN_DBAT5U,r4
+ lwz r4,SL_DBAT5+4(r11)
mtspr SPRN_DBAT5L,r4
+ lwz r4,SL_DBAT6(r11)
mtspr SPRN_DBAT6U,r4
+ lwz r4,SL_DBAT6+4(r11)
mtspr SPRN_DBAT6L,r4
+ lwz r4,SL_DBAT7(r11)
mtspr SPRN_DBAT7U,r4
+ lwz r4,SL_DBAT7+4(r11)
mtspr SPRN_DBAT7L,r4
+ lwz r4,SL_IBAT4(r11)
mtspr SPRN_IBAT4U,r4
+ lwz r4,SL_IBAT4+4(r11)
mtspr SPRN_IBAT4L,r4
+ lwz r4,SL_IBAT5(r11)
mtspr SPRN_IBAT5U,r4
+ lwz r4,SL_IBAT5+4(r11)
mtspr SPRN_IBAT5L,r4
+ lwz r4,SL_IBAT6(r11)
mtspr SPRN_IBAT6U,r4
+ lwz r4,SL_IBAT6+4(r11)
mtspr SPRN_IBAT6L,r4
+ lwz r4,SL_IBAT7(r11)
mtspr SPRN_IBAT7U,r4
+ lwz r4,SL_IBAT7+4(r11)
mtspr SPRN_IBAT7L,r4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+#endif

/* Flush all TLBs */
lis r4,0x1000
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6d4f0f72231f..335fac522234 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3568,6 +3568,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,

vcpu->arch.slb_max = 0;
dec = mfspr(SPRN_DEC);
+ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
+ dec = (s32) dec;
tb = mftb();
vcpu->arch.dec_expires = dec + tb;
vcpu->cpu = -1;
@@ -4082,8 +4084,15 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,

preempt_enable();

- /* cancel pending decrementer exception if DEC is now positive */
- if (get_tb() < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
+ /*
+ * cancel pending decrementer exception if DEC is now positive, or if
+ * entering a nested guest in which case the decrementer is now owned
+ * by L2 and the L1 decrementer is provided in hdec_expires
+ */
+ if (kvmppc_core_pending_dec(vcpu) &&
+ ((get_tb() < vcpu->arch.dec_expires) ||
+ (trap == BOOK3S_INTERRUPT_SYSCALL &&
+ kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
kvmppc_core_dequeue_dec(vcpu);

trace_kvm_guest_exit(vcpu);
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index 888e2609e3f1..31cd0f327c8a 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -131,7 +131,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
}
/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
/* L=1 => tresume, L=0 => tsuspend */
if (instr & (1 << 21)) {
if (MSR_TM_SUSPENDED(msr))
@@ -175,7 +175,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)

/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST;

@@ -205,7 +205,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)

/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
+ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr = msr | MSR_TS_S;
return RESUME_GUEST;
}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 6e56a6240bfa..7bf491bb0ca4 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -353,7 +353,7 @@ void mark_initmem_nx(void)
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);

- if (v_block_mapped((unsigned long)_stext) + 1)
+ if (v_block_mapped((unsigned long)_stext + 1))
mmu_mark_initmem_nx();
else
change_page_attr(page, numpages, PAGE_KERNEL);
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index fb64b09cad9d..eb583bd9a75d 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -38,10 +38,18 @@
#define SL_IBAT2 0x48
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
-#define SL_TB 0x60
-#define SL_R2 0x68
-#define SL_CR 0x6c
-#define SL_R12 0x70 /* r12 to r31 */
+#define SL_DBAT4 0x60
+#define SL_IBAT4 0x68
+#define SL_DBAT5 0x70
+#define SL_IBAT5 0x78
+#define SL_DBAT6 0x80
+#define SL_IBAT6 0x88
+#define SL_DBAT7 0x90
+#define SL_IBAT7 0x98
+#define SL_TB 0xa0
+#define SL_R2 0xa8
+#define SL_CR 0xac
+#define SL_R12 0xb0 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)

.section .text
@@ -126,6 +134,41 @@ _GLOBAL(low_sleep_handler)
mfibatl r4,3
stw r4,SL_IBAT3+4(r1)

+BEGIN_MMU_FTR_SECTION
+ mfspr r4,SPRN_DBAT4U
+ stw r4,SL_DBAT4(r1)
+ mfspr r4,SPRN_DBAT4L
+ stw r4,SL_DBAT4+4(r1)
+ mfspr r4,SPRN_DBAT5U
+ stw r4,SL_DBAT5(r1)
+ mfspr r4,SPRN_DBAT5L
+ stw r4,SL_DBAT5+4(r1)
+ mfspr r4,SPRN_DBAT6U
+ stw r4,SL_DBAT6(r1)
+ mfspr r4,SPRN_DBAT6L
+ stw r4,SL_DBAT6+4(r1)
+ mfspr r4,SPRN_DBAT7U
+ stw r4,SL_DBAT7(r1)
+ mfspr r4,SPRN_DBAT7L
+ stw r4,SL_DBAT7+4(r1)
+ mfspr r4,SPRN_IBAT4U
+ stw r4,SL_IBAT4(r1)
+ mfspr r4,SPRN_IBAT4L
+ stw r4,SL_IBAT4+4(r1)
+ mfspr r4,SPRN_IBAT5U
+ stw r4,SL_IBAT5(r1)
+ mfspr r4,SPRN_IBAT5L
+ stw r4,SL_IBAT5+4(r1)
+ mfspr r4,SPRN_IBAT6U
+ stw r4,SL_IBAT6(r1)
+ mfspr r4,SPRN_IBAT6L
+ stw r4,SL_IBAT6+4(r1)
+ mfspr r4,SPRN_IBAT7U
+ stw r4,SL_IBAT7(r1)
+ mfspr r4,SPRN_IBAT7L
+ stw r4,SL_IBAT7+4(r1)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
/* Backup various CPU config stuffs */
bl __save_cpu_setup

@@ -326,22 +369,37 @@ grackle_wake_up:
mtibatl 3,r4

BEGIN_MMU_FTR_SECTION
- li r4,0
+ lwz r4,SL_DBAT4(r1)
mtspr SPRN_DBAT4U,r4
+ lwz r4,SL_DBAT4+4(r1)
mtspr SPRN_DBAT4L,r4
+ lwz r4,SL_DBAT5(r1)
mtspr SPRN_DBAT5U,r4
+ lwz r4,SL_DBAT5+4(r1)
mtspr SPRN_DBAT5L,r4
+ lwz r4,SL_DBAT6(r1)
mtspr SPRN_DBAT6U,r4
+ lwz r4,SL_DBAT6+4(r1)
mtspr SPRN_DBAT6L,r4
+ lwz r4,SL_DBAT7(r1)
mtspr SPRN_DBAT7U,r4
+ lwz r4,SL_DBAT7+4(r1)
mtspr SPRN_DBAT7L,r4
+ lwz r4,SL_IBAT4(r1)
mtspr SPRN_IBAT4U,r4
+ lwz r4,SL_IBAT4+4(r1)
mtspr SPRN_IBAT4L,r4
+ lwz r4,SL_IBAT5(r1)
mtspr SPRN_IBAT5U,r4
+ lwz r4,SL_IBAT5+4(r1)
mtspr SPRN_IBAT5L,r4
+ lwz r4,SL_IBAT6(r1)
mtspr SPRN_IBAT6U,r4
+ lwz r4,SL_IBAT6+4(r1)
mtspr SPRN_IBAT6L,r4
+ lwz r4,SL_IBAT7(r1)
mtspr SPRN_IBAT7U,r4
+ lwz r4,SL_IBAT7+4(r1)
mtspr SPRN_IBAT7L,r4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)

diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index dc23d9d2a7d9..6309819ee2f4 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -31,9 +31,22 @@ static DEFINE_SPINLOCK(npu_context_lock);
static struct pci_dev *get_pci_dev(struct device_node *dn)
{
struct pci_dn *pdn = PCI_DN(dn);
+ struct pci_dev *pdev;

- return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
+ pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
pdn->busno, pdn->devfn);
+
+ /*
+ * pci_get_domain_bus_and_slot() increased the reference count of
+ * the PCI device, but callers don't need that actually as the PE
+ * already holds a reference to the device. Since callers aren't
+ * aware of the reference count change, call pci_dev_put() now to
+ * avoid leaks.
+ */
+ if (pdev)
+ pci_dev_put(pdev);
+
+ return pdev;
}

/* Given a NPU device get the associated PCI device. */
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 3ead4c237ed0..34427b46ec3b 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2459,6 +2459,14 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
if (!pnv_iommu_bypass_disabled)
pnv_pci_ioda2_set_bypass(pe, true);

+ /*
+ * Set table base for the case of IOMMU DMA use. Usually this is done
+ * from dma_dev_setup() which is not called when a device is returned
+ * from VFIO so do it here.
+ */
+ if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+
return 0;
}

@@ -2546,6 +2554,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (pe->pbus)
pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ else if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, NULL);
iommu_tce_table_put(tbl);
}

diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 47087832f8b2..e6bd172bcf30 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -980,6 +980,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
if (!memblock_size)
return -EINVAL;

+ if (!pr->old_prop)
+ return 0;
+
p = (__be32 *) pr->old_prop->value;
if (!p)
return -EINVAL;
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 79cfd3b30ceb..771e9b3b62eb 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -205,15 +205,22 @@ static int amd_uncore_event_init(struct perf_event *event)
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;

+ if (event->cpu < 0)
+ return -EINVAL;
+
/*
* SliceMask and ThreadMask need to be set for certain L3 events in
* Family 17h. For other events, the two fields do not affect the count.
*/
- if (l3_mask)
- hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+ if (l3_mask && is_llc_event(event)) {
+ int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);

- if (event->cpu < 0)
- return -EINVAL;
+ if (smp_num_siblings > 1)
+ thread += cpu_data(event->cpu).apicid & 1;
+
+ hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
+ AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
+ }

uncore = event_to_amd_uncore(event);
if (!uncore)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 82dad001d1ea..228bad687caa 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -19,6 +19,7 @@
#include <asm/intel-family.h>
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
+#include <asm/hypervisor.h>

#include "../perf_event.h"

@@ -2091,12 +2092,10 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc->intel_cp_status &= ~(1ull << hwc->idx);

- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
intel_pmu_disable_fixed(hwc);
- return;
- }
-
- x86_pmu_disable_event(event);
+ else
+ x86_pmu_disable_event(event);

/*
* Needs to be called after x86_pmu_disable_event,
@@ -3927,6 +3926,13 @@ static bool check_msr(unsigned long msr, u64 mask)
{
u64 val_old, val_new, val_tmp;

+ /*
+ * Disable the check for real HW, so we don't
+ * mess with potentionaly enabled registers:
+ */
+ if (hypervisor_is_type(X86_HYPER_NATIVE))
+ return true;
+
/*
* Read the current value, change it and read it back to see if it
* matches, this is needed to detect certain hardware emulators
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 853a49a8ccf6..b24da63459c4 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -419,6 +419,16 @@ static inline bool is_freerunning_event(struct perf_event *event)
(((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
}

+/* Check and reject invalid config */
+static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ if (is_freerunning_event(event))
+ return 0;
+
+ return -EINVAL;
+}
+
static inline void uncore_disable_box(struct intel_uncore_box *box)
{
if (box->pmu->type->ops->disable_box)
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index b10e04387f38..8e4e8e423839 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3585,6 +3585,7 @@ static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {

static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
.read_counter = uncore_msr_read_counter,
+ .hw_config = uncore_freerunning_hw_config,
};

static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index ea3d95275b43..115127c7ad28 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -54,7 +54,7 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
- : "ir" (i));
+ : "ir" (i) : "memory");
}

/**
@@ -68,7 +68,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
- : "ir" (i));
+ : "ir" (i) : "memory");
}

/**
@@ -95,7 +95,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
static __always_inline void arch_atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
- : "+m" (v->counter));
+ : "+m" (v->counter) :: "memory");
}
#define arch_atomic_inc arch_atomic_inc

@@ -108,7 +108,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
static __always_inline void arch_atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
- : "+m" (v->counter));
+ : "+m" (v->counter) :: "memory");
}
#define arch_atomic_dec arch_atomic_dec

diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index dadc20adba21..5e86c0d68ac1 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -45,7 +45,7 @@ static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
- : "er" (i), "m" (v->counter));
+ : "er" (i), "m" (v->counter) : "memory");
}

/**
@@ -59,7 +59,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter)
- : "er" (i), "m" (v->counter));
+ : "er" (i), "m" (v->counter) : "memory");
}

/**
@@ -87,7 +87,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
- : "m" (v->counter));
+ : "m" (v->counter) : "memory");
}
#define arch_atomic64_inc arch_atomic64_inc

@@ -101,7 +101,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter)
- : "m" (v->counter));
+ : "m" (v->counter) : "memory");
}
#define arch_atomic64_dec arch_atomic64_dec

diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 14de0432d288..84f848c2541a 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -80,8 +80,8 @@ do { \
})

/* Atomic operations are already serializing on x86 */
-#define __smp_mb__before_atomic() barrier()
-#define __smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic() do { } while (0)
+#define __smp_mb__after_atomic() do { } while (0)

#include <asm-generic/barrier.h>

diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 75f27ee2c263..1017b9c7dfe0 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -239,12 +239,14 @@
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 310118805f57..f60ddd655c78 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -56,6 +56,7 @@
#define INTEL_FAM6_ICELAKE_XEON_D 0x6C
#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
+#define INTEL_FAM6_ICELAKE_NNPI 0x9D

/* "Small Core" Processors (Atom) */

diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 395d46f78582..c7503be92f35 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -658,8 +658,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
if (c->x86 < 0x17) {
/* LLC is at the node level. */
per_cpu(cpu_llc_id, cpu) = node_id;
- } else if (c->x86 == 0x17 &&
- c->x86_model >= 0 && c->x86_model <= 0x1F) {
+ } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
/*
* LLC is at the core complex level.
* Core complex ID is ApicId[3] for these processors.
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index d0dfb892c72f..aed45b8895d5 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -4,6 +4,8 @@
# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
#

+set -e
+
IN=$1
OUT=$2

diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 1bfe5c6e6cfe..afac7ccce72f 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -546,17 +546,15 @@ void __init default_get_smp_config(unsigned int early)
* local APIC has default address
*/
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
- return;
+ goto out;
}

pr_info("Default MP configuration #%d\n", mpf->feature1);
construct_default_ISA_mptable(mpf->feature1);

} else if (mpf->physptr) {
- if (check_physptr(mpf, early)) {
- early_memunmap(mpf, sizeof(*mpf));
- return;
- }
+ if (check_physptr(mpf, early))
+ goto out;
} else
BUG();

@@ -565,7 +563,7 @@ void __init default_get_smp_config(unsigned int early)
/*
* Only use the first configuration found.
*/
-
+out:
early_memunmap(mpf, sizeof(*mpf));
}

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index dd745b58ffd8..ccefaf5218c7 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -131,8 +131,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
intr ? kvm_perf_overflow_intr :
kvm_perf_overflow, pmc);
if (IS_ERR(event)) {
- printk_once("kvm_pmu: event creation failed %ld\n",
- PTR_ERR(event));
+ pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
+ PTR_ERR(event), pmc->idx);
return;
}

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 4ca834d22169..f83e79a4d0b2 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2243,13 +2243,9 @@ static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)

set_cr4_guest_host_mask(vmx);

- if (kvm_mpx_supported()) {
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
- else
- vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
- }
+ if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
}

/*
@@ -2292,6 +2288,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
}
+ if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
+ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
vmx_set_rflags(vcpu, vmcs12->guest_rflags);

/* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
@@ -2891,9 +2890,6 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
*/
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_TPR_SHADOW);
- } else {
- printk("bad virtual-APIC page address\n");
- dump_vmcs();
}
}

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index cfb8f1ec9a0a..d9ea86cc899f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1718,7 +1718,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
&msr_info->data);
case MSR_IA32_XSS:
- if (!vmx_xsaves_supported())
+ if (!vmx_xsaves_supported() ||
+ (!msr_info->host_initiated &&
+ !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
return 1;
msr_info->data = vcpu->arch.ia32_xss;
break;
@@ -1929,7 +1932,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
return vmx_set_vmx_msr(vcpu, msr_index, data);
case MSR_IA32_XSS:
- if (!vmx_xsaves_supported())
+ if (!vmx_xsaves_supported() ||
+ (!msr_info->host_initiated &&
+ !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
return 1;
/*
* The only supported bit as of Skylake is bit 8, but
@@ -6102,28 +6108,21 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)

static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
{
- u32 exit_intr_info = 0;
- u16 basic_exit_reason = (u16)vmx->exit_reason;
-
- if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
- || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
+ if (vmx->exit_reason != EXIT_REASON_EXCEPTION_NMI)
return;

- if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- vmx->exit_intr_info = exit_intr_info;
+ vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);

/* if exit due to PF check for async PF */
- if (is_page_fault(exit_intr_info))
+ if (is_page_fault(vmx->exit_intr_info))
vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();

/* Handle machine checks before interrupts are enabled */
- if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
- is_machine_check(exit_intr_info))
+ if (is_machine_check(vmx->exit_intr_info))
kvm_machine_check();

/* We need to handle NMIs before interrupts are enabled */
- if (is_nmi(exit_intr_info)) {
+ if (is_nmi(vmx->exit_intr_info)) {
kvm_before_interrupt(&vmx->vcpu);
asm("int $2");
kvm_after_interrupt(&vmx->vcpu);
@@ -6526,6 +6525,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->idt_vectoring_info = 0;

vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
+ if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
+ kvm_machine_check();
+
if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
return;

diff --git a/block/bio.c b/block/bio.c
index a3c80a6c1fe5..e73a2a4c01cd 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -29,6 +29,7 @@
#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/blk-cgroup.h>
+#include <linux/highmem.h>

#include <trace/events/block.h>
#include "blk.h"
@@ -1475,8 +1476,22 @@ void bio_unmap_user(struct bio *bio)
bio_put(bio);
}

+static void bio_invalidate_vmalloc_pages(struct bio *bio)
+{
+#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+ if (bio->bi_private && !op_is_write(bio_op(bio))) {
+ unsigned long i, len = 0;
+
+ for (i = 0; i < bio->bi_vcnt; i++)
+ len += bio->bi_io_vec[i].bv_len;
+ invalidate_kernel_vmap_range(bio->bi_private, len);
+ }
+#endif
+}
+
static void bio_map_kern_endio(struct bio *bio)
{
+ bio_invalidate_vmalloc_pages(bio);
bio_put(bio);
}

@@ -1497,6 +1512,8 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = kaddr >> PAGE_SHIFT;
const int nr_pages = end - start;
+ bool is_vmalloc = is_vmalloc_addr(data);
+ struct page *page;
int offset, i;
struct bio *bio;

@@ -1504,6 +1521,11 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
if (!bio)
return ERR_PTR(-ENOMEM);

+ if (is_vmalloc) {
+ flush_kernel_vmap_range(data, len);
+ bio->bi_private = data;
+ }
+
offset = offset_in_page(kaddr);
for (i = 0; i < nr_pages; i++) {
unsigned int bytes = PAGE_SIZE - offset;
@@ -1514,7 +1536,11 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
if (bytes > len)
bytes = len;

- if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
+ if (!is_vmalloc)
+ page = virt_to_page(data);
+ else
+ page = vmalloc_to_page(data);
+ if (bio_add_pc_page(q, bio, page, bytes,
offset) < bytes) {
/* we don't support partial mappings */
bio_put(bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 617a2b3f7582..95593df93986 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1005,8 +1005,12 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
}
next:
if (has_stats) {
- off += scnprintf(buf+off, size-off, "\n");
- seq_commit(sf, off);
+ if (off < size - 1) {
+ off += scnprintf(buf+off, size-off, "\n");
+ seq_commit(sf, off);
+ } else {
+ seq_commit(sf, -1);
+ }
}
}

diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 507212d75ee2..51070a730e3c 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -617,44 +617,26 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)

inflight = atomic_dec_return(&rqw->inflight);
WARN_ON_ONCE(inflight < 0);
- if (iolat->min_lat_nsec == 0)
- goto next;
- iolatency_record_time(iolat, &bio->bi_issue, now,
- issue_as_root);
- window_start = atomic64_read(&iolat->window_start);
- if (now > window_start &&
- (now - window_start) >= iolat->cur_win_nsec) {
- if (atomic64_cmpxchg(&iolat->window_start,
- window_start, now) == window_start)
- iolatency_check_latencies(iolat, now);
+ /*
+ * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
+ * submitted, so do not account for it.
+ */
+ if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
+ iolatency_record_time(iolat, &bio->bi_issue, now,
+ issue_as_root);
+ window_start = atomic64_read(&iolat->window_start);
+ if (now > window_start &&
+ (now - window_start) >= iolat->cur_win_nsec) {
+ if (atomic64_cmpxchg(&iolat->window_start,
+ window_start, now) == window_start)
+ iolatency_check_latencies(iolat, now);
+ }
}
-next:
wake_up(&rqw->wait);
blkg = blkg->parent;
}
}

-static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
-{
- struct blkcg_gq *blkg;
-
- blkg = bio->bi_blkg;
- while (blkg && blkg->parent) {
- struct rq_wait *rqw;
- struct iolatency_grp *iolat;
-
- iolat = blkg_to_lat(blkg);
- if (!iolat)
- goto next;
-
- rqw = &iolat->rq_wait;
- atomic_dec(&rqw->inflight);
- wake_up(&rqw->wait);
-next:
- blkg = blkg->parent;
- }
-}
-
static void blkcg_iolatency_exit(struct rq_qos *rqos)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
@@ -666,7 +648,6 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)

static struct rq_qos_ops blkcg_iolatency_ops = {
.throttle = blkcg_iolatency_throttle,
- .cleanup = blkcg_iolatency_cleanup,
.done_bio = blkcg_iolatency_done_bio,
.exit = blkcg_iolatency_exit,
};
@@ -777,8 +758,10 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)

if (!oldval && val)
return 1;
- if (oldval && !val)
+ if (oldval && !val) {
+ blkcg_clear_delay(blkg);
return -1;
+ }
return 0;
}

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1b97a73d2fb1..61e990867a5d 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -881,13 +881,10 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
u64 tmp;

- jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
-
- /* Slice has just started. Consider one slice interval */
- if (!jiffy_elapsed)
- jiffy_elapsed_rnd = tg->td->throtl_slice;
+ jiffy_elapsed = jiffies - tg->slice_start[rw];

- jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
+ /* Round up to the next throttle slice, wait time must be nonzero */
+ jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);

/*
* jiffy_elapsed_rnd should not be a big value as minimum iops can be
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 2d98803faec2..6ea455b62cb4 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -69,7 +69,7 @@ EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
static inline unsigned int __blkdev_nr_zones(struct request_queue *q,
sector_t nr_sectors)
{
- unsigned long zone_sectors = blk_queue_zone_sectors(q);
+ sector_t zone_sectors = blk_queue_zone_sectors(q);

return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors);
}
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index be70ca6c85d3..1f1f004dc757 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select MPILIB
select CRYPTO_HASH_INFO
select CRYPTO_AKCIPHER
+ select CRYPTO_HASH
help
This option provides support for asymmetric public key type handling.
If signature generation and/or verification are to be used,
@@ -65,6 +66,7 @@ config TPM_KEY_PARSER
config PKCS7_MESSAGE_PARSER
tristate "PKCS#7 message parser"
depends on X509_CERTIFICATE_PARSER
+ select CRYPTO_HASH
select ASN1
select OID_REGISTRY
help
@@ -87,6 +89,7 @@ config SIGNED_PE_FILE_VERIFICATION
bool "Support for PE file signature verification"
depends on PKCS7_MESSAGE_PARSER=y
depends on SYSTEM_DATA_VERIFICATION
+ select CRYPTO_HASH
select ASN1
select OID_REGISTRY
help
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 279d816ab51d..e803ccf18c14 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -65,6 +65,8 @@ struct chachapoly_req_ctx {
unsigned int cryptlen;
/* Actual AD, excluding IV */
unsigned int assoclen;
+ /* request flags, with MAY_SLEEP cleared if needed */
+ u32 flags;
union {
struct poly_req poly;
struct chacha_req chacha;
@@ -74,8 +76,12 @@ struct chachapoly_req_ctx {
static inline void async_done_continue(struct aead_request *req, int err,
int (*cont)(struct aead_request *))
{
- if (!err)
+ if (!err) {
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = cont(req);
+ }

if (err != -EINPROGRESS && err != -EBUSY)
aead_request_complete(req, err);
@@ -142,7 +148,7 @@ static int chacha_decrypt(struct aead_request *req)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
}

- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
chacha_decrypt_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, src, dst,
@@ -186,7 +192,7 @@ static int poly_tail(struct aead_request *req)
memcpy(&preq->tail.cryptlen, &len, sizeof(len));
sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));

- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_tail_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src,
@@ -217,7 +223,7 @@ static int poly_cipherpad(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, &preq->pad, padlen);

- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_cipherpad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@ -248,7 +254,7 @@ static int poly_cipher(struct aead_request *req)
sg_init_table(rctx->src, 2);
crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);

- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_cipher_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
@@ -278,7 +284,7 @@ static int poly_adpad(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, preq->pad, padlen);

- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_adpad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@ -302,7 +308,7 @@ static int poly_ad(struct aead_request *req)
struct poly_req *preq = &rctx->u.poly;
int err;

- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_ad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
@@ -329,7 +335,7 @@ static int poly_setkey(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));

- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_setkey_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
@@ -353,7 +359,7 @@ static int poly_init(struct aead_request *req)
struct poly_req *preq = &rctx->u.poly;
int err;

- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_init_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);

@@ -391,7 +397,7 @@ static int poly_genkey(struct aead_request *req)

chacha_iv(creq->iv, req, 0);

- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
poly_genkey_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
@@ -431,7 +437,7 @@ static int chacha_encrypt(struct aead_request *req)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
}

- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
chacha_encrypt_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, src, dst,
@@ -449,6 +455,7 @@ static int chachapoly_encrypt(struct aead_request *req)
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);

rctx->cryptlen = req->cryptlen;
+ rctx->flags = aead_request_flags(req);

/* encrypt call chain:
* - chacha_encrypt/done()
@@ -470,6 +477,7 @@ static int chachapoly_decrypt(struct aead_request *req)
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);

rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
+ rctx->flags = aead_request_flags(req);

/* decrypt call chain:
* - poly_genkey/done()
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index d9f192b953b2..591b52d3bdca 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+ be128 k;

if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_shash *tfm,

if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
- ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+
+ BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
+ memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
+ ctx->gf128 = gf128mul_init_4k_lle(&k);
+ memzero_explicit(&k, GHASH_BLOCK_SIZE);
+
if (!ctx->gf128)
return -ENOMEM;

diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 7c3382facc82..600bd288881d 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -229,7 +229,13 @@
x4 ^= x2; \
})

-static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k)
+/*
+ * both gcc and clang have misoptimized this function in the past,
+ * producing horrible object code from spilling temporary variables
+ * on the stack. Forcing this part out of line avoids that.
+ */
+static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2,
+ u32 r3, u32 r4, u32 *k)
{
k += 100;
S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 8386038d67c7..51540dbee23b 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1050,6 +1050,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
req, tsgl, hashstate);
if (err)
return err;
+ cond_resched();
}
}
#endif
@@ -1105,6 +1106,7 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate);
if (err)
goto out;
+ cond_resched();
}
err = 0;
out:
@@ -1346,6 +1348,7 @@ static int test_aead_vec(const char *driver, int enc,
&cfg, req, tsgls);
if (err)
return err;
+ cond_resched();
}
}
#endif
@@ -1365,6 +1368,7 @@ static int test_aead(const char *driver, int enc,
tsgls);
if (err)
return err;
+ cond_resched();
}
return 0;
}
@@ -1679,6 +1683,7 @@ static int test_skcipher_vec(const char *driver, int enc,
&cfg, req, tsgls);
if (err)
return err;
+ cond_resched();
}
}
#endif
@@ -1698,6 +1703,7 @@ static int test_skcipher(const char *driver, int enc,
tsgls);
if (err)
return err;
+ cond_resched();
}
return 0;
}
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 831660179662..c8652f91054e 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -69,7 +69,8 @@ acpi_status
acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked);

acpi_status
-acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
+ u8 clear_on_enable);

acpi_status
acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 62d3aa74277b..344feba29063 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -146,6 +146,7 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
* FUNCTION: acpi_ev_add_gpe_reference
*
* PARAMETERS: gpe_event_info - Add a reference to this GPE
+ * clear_on_enable - Clear GPE status before enabling it
*
* RETURN: Status
*
@@ -155,7 +156,8 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
******************************************************************************/

acpi_status
-acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
+ u8 clear_on_enable)
{
acpi_status status = AE_OK;

@@ -170,6 +172,10 @@ acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)

/* Enable on first reference */

+ if (clear_on_enable) {
+ (void)acpi_hw_clear_gpe(gpe_event_info);
+ }
+
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status = acpi_ev_enable_gpe(gpe_event_info);
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 328d1d6123ad..fb15e9e2373b 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -453,7 +453,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
continue;
}

- status = acpi_ev_add_gpe_reference(gpe_event_info);
+ status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 3df00eb6621b..279ef0557aa3 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -971,7 +971,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
ACPI_GPE_DISPATCH_METHOD) ||
(ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
- (void)acpi_ev_add_gpe_reference(gpe_event_info);
+ (void)acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
if (ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {

/* Poll edge triggered GPEs to handle existing events */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 30a083902f52..710488ec59e9 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -108,7 +108,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
if (gpe_event_info) {
if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
ACPI_GPE_DISPATCH_NONE) {
- status = acpi_ev_add_gpe_reference(gpe_event_info);
+ status = acpi_ev_add_gpe_reference(gpe_event_info, TRUE);
if (ACPI_SUCCESS(status) &&
ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {

diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 938ed513b070..6215680418c4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1486,7 +1486,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
+ if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];

return 0;
@@ -1733,7 +1733,8 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
+ if (dev->class == ATA_DEV_ZAC &&
+ ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
char sense_key, asc, ascq;

sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
@@ -1787,10 +1788,11 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
}

switch (qc->dev->class) {
- case ATA_DEV_ATA:
case ATA_DEV_ZAC:
if (stat & ATA_SENSE)
ata_eh_request_sense(qc, qc->scsicmd);
+ /* fall through */
+ case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
if (err & (ATA_UNC | ATA_AMNF))
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 19eb454f26c3..df2893d4626b 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -565,6 +565,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
}

if (!strcmp(name, "dummy")) {
+ kfree(map->debugfs_name);
+
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
name = map->debugfs_name;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4f822e087def..61d1a0864dea 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1642,6 +1642,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
map->format.reg_bytes +
map->format.pad_bytes,
val, val_len);
+ else
+ ret = -ENOTSUPP;

/* If that didn't work fall back on linearising by hand. */
if (ret == -ENOTSUPP) {
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 95f608d1a098..38e5811a045e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2119,6 +2119,9 @@ static void setup_format_params(int track)
raw_cmd->kernel_data = floppy_track_buffer;
raw_cmd->length = 4 * F_SECT_PER_TRACK;

+ if (!F_SECT_PER_TRACK)
+ return;
+
/* allow for about 30ms for data transport per track */
head_shift = (F_SECT_PER_TRACK + 5) / 6;

@@ -3229,8 +3232,12 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
int cnt;

/* sanity checking for parameters. */
- if (g->sect <= 0 ||
- g->head <= 0 ||
+ if ((int)g->sect <= 0 ||
+ (int)g->head <= 0 ||
+ /* check for overflow in max_sector */
+ (int)(g->sect * g->head) <= 0 ||
+ /* check for zero in F_SECT_PER_TRACK */
+ (unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
/* check if reserved bits are set */
(g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
@@ -3374,6 +3381,24 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}

+static bool valid_floppy_drive_params(const short autodetect[8],
+ int native_format)
+{
+ size_t floppy_type_size = ARRAY_SIZE(floppy_type);
+ size_t i = 0;
+
+ for (i = 0; i < 8; ++i) {
+ if (autodetect[i] < 0 ||
+ autodetect[i] >= floppy_type_size)
+ return false;
+ }
+
+ if (native_format < 0 || native_format >= floppy_type_size)
+ return false;
+
+ return true;
+}
+
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long param)
{
@@ -3500,6 +3525,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
SUPBOUND(size, strlen((const char *)outparam) + 1);
break;
case FDSETDRVPRM:
+ if (!valid_floppy_drive_params(inparam.dp.autodetect,
+ inparam.dp.native_format))
+ return -EINVAL;
*UDP = inparam.dp;
break;
case FDGETDRVPRM:
@@ -3697,6 +3725,8 @@ static int compat_setdrvprm(int drive,
return -EPERM;
if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
return -EFAULT;
+ if (!valid_floppy_drive_params(v.autodetect, v.native_format))
+ return -EINVAL;
mutex_lock(&floppy_mutex);
UDP->cmos = v.cmos;
UDP->max_dtr = v.max_dtr;
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index d7ac09c092f2..21d0b651b335 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -326,11 +326,12 @@ static ssize_t nullb_device_power_store(struct config_item *item,
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
dev->power = newp;
} else if (dev->power && !newp) {
- mutex_lock(&lock);
- dev->power = newp;
- null_del_dev(dev->nullb);
- mutex_unlock(&lock);
- clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+ mutex_lock(&lock);
+ dev->power = newp;
+ null_del_dev(dev->nullb);
+ mutex_unlock(&lock);
+ }
clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}

diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7db48ae65cd2..4c9f11766e82 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -279,7 +279,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },

/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 1a7f0c82fb36..66fe1e6dc631 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -759,6 +759,11 @@ static int bcsp_close(struct hci_uart *hu)
skb_queue_purge(&bcsp->rel);
skb_queue_purge(&bcsp->unrel);

+ if (bcsp->rx_skb) {
+ kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
+ }
+
kfree(bcsp);
return 0;
}
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 122a81ab8e48..01ef2fab5764 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -325,7 +325,7 @@ static const char *imx8mm_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll
"sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };

static const char *imx8mm_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
- "sys_pll3_out", "sys_pll1_266m", "audio_pll2_clk", "sys_pll1_100m", };
+ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", };

static const char *imx8mm_csi1_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
"sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
@@ -361,11 +361,11 @@ static const char *imx8mm_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_
"sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };

static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
- "audio_pll2_clk", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", };
+ "audio_pll2_out", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", };

static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };

-static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_clk",
+static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out",
"vpu_pll", "sys_pll1_80m", };

static struct clk *clks[IMX8MM_CLK_END];
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 34bd250d46c6..6aa10cbc1d59 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -209,7 +209,7 @@ static void exynos4_frc_resume(struct clocksource *cs)

static struct clocksource mct_frc = {
.name = "mct-frc",
- .rating = 400,
+ .rating = 450, /* use value higher than ARM arch timer */
.read = exynos4_frc_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -464,7 +464,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->rating = 450;
+ evt->rating = 500; /* use value higher than ARM arch timer */

exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);

diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
index fdb3d795a409..84adfff59fb0 100644
--- a/drivers/clocksource/timer-tegra20.c
+++ b/drivers/clocksource/timer-tegra20.c
@@ -310,7 +310,7 @@ static int __init tegra_init_timer(struct device_node *np)
pr_err("%s: can't map IRQ for CPU%d\n",
__func__, cpu);
ret = -EINVAL;
- goto out;
+ goto out_irq;
}

irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
@@ -320,7 +320,8 @@ static int __init tegra_init_timer(struct device_node *np)
if (ret) {
pr_err("%s: cannot setup irq %d for CPU%d\n",
__func__, cpu_to->clkevt.irq, cpu);
- ret = -EINVAL;
+ irq_dispose_mapping(cpu_to->clkevt.irq);
+ cpu_to->clkevt.irq = 0;
goto out_irq;
}
}
@@ -340,6 +341,8 @@ static int __init tegra_init_timer(struct device_node *np)
irq_dispose_mapping(cpu_to->clkevt.irq);
}
}
+
+ to->of_base.base = timer_reg_base;
out:
timer_of_cleanup(to);
return ret;
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 3458c5a085d9..fb141d9b3c43 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -76,12 +76,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
}

static inline int crypto4xx_crypt(struct skcipher_request *req,
- const unsigned int ivlen, bool decrypt)
+ const unsigned int ivlen, bool decrypt,
+ bool check_blocksize)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE];

+ if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+
if (ivlen)
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);

@@ -90,24 +94,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
ctx->sa_len, 0, NULL);
}

-int crypto4xx_encrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, 0, false, true);
+}
+
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
+}
+
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, 0, false);
+ return crypto4xx_crypt(req, 0, true, true);
}

-int crypto4xx_encrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, AES_IV_SIZE, false);
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
}

-int crypto4xx_decrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, 0, true);
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
}

-int crypto4xx_decrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, AES_IV_SIZE, true);
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
}

/**
@@ -278,8 +292,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
return ret;
}

- return encrypt ? crypto4xx_encrypt_iv(req)
- : crypto4xx_decrypt_iv(req);
+ return encrypt ? crypto4xx_encrypt_iv_stream(req)
+ : crypto4xx_decrypt_iv_stream(req);
}

static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 920bd5e720b2..63c915f170c9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1226,8 +1226,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cbc,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_block,
+ .decrypt = crypto4xx_decrypt_iv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1238,7 +1238,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1246,8 +1246,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1259,7 +1259,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1279,7 +1279,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1306,8 +1306,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = crypto4xx_setkey_aes_ecb,
- .encrypt = crypto4xx_encrypt_noiv,
- .decrypt = crypto4xx_decrypt_noiv,
+ .encrypt = crypto4xx_encrypt_noiv_block,
+ .decrypt = crypto4xx_decrypt_noiv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1318,7 +1318,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1326,8 +1326,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 18df695ca6b1..da82862e57c9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -183,10 +183,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
int crypto4xx_decrypt_ctr(struct skcipher_request *req);
-int crypto4xx_encrypt_iv(struct skcipher_request *req);
-int crypto4xx_decrypt_iv(struct skcipher_request *req);
-int crypto4xx_encrypt_noiv(struct skcipher_request *req);
-int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 53ab1f140a26..8a3ed4031206 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -111,7 +111,6 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
return;

err_out:
- of_node_put(trng);
iounmap(dev->trng_base);
kfree(rng);
dev->trng_base = NULL;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 579578498deb..2b9b64b01c3b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -965,6 +965,7 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct skcipher_request *req = context;
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
int ivsize = crypto_skcipher_ivsize(skcipher);

#ifdef DEBUG
@@ -989,9 +990,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,

/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block when running in CBC mode.
*/
- if (ivsize)
+ if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
ivsize, ivsize, 0);

@@ -1072,6 +1073,7 @@ static void init_aead_job(struct aead_request *req,
if (unlikely(req->src != req->dst)) {
if (!edesc->mapped_dst_nents) {
dst_dma = 0;
+ out_options = 0;
} else if (edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
out_options = 0;
@@ -1808,9 +1810,9 @@ static int skcipher_decrypt(struct skcipher_request *req)

/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
+ * ciphertext block when running in CBC mode.
*/
- if (ivsize)
+ if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c61921d32489..96d1a9647b01 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1068,7 +1068,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
(1 + !!ivsize) * sizeof(*sg_table),
out_len, 0);
- } else if (mapped_dst_nents == 1) {
+ } else if (mapped_dst_nents <= 1) {
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
0);
} else {
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 0a72c96708c4..faf238db153c 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -525,6 +525,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dpaa2_fl_set_addr(out_fle, qm_sg_dma +
(1 + !!ivsize) * sizeof(*sg_table));
}
+ } else if (!mapped_dst_nents) {
+ /*
+ * crypto engine requires the output entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type),
+ * leaving out_fle zeroized is the best option.
+ */
+ goto skip_out_fle;
} else if (mapped_dst_nents == 1) {
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
@@ -536,6 +544,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,

dpaa2_fl_set_len(out_fle, out_len);

+skip_out_fle:
return edesc;
}

diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 7cb8b1755e57..976aa9b3b264 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -18,6 +18,7 @@
#include "desc_constr.h"

#define PREHDR_RSLS_SHIFT 31
+#define PREHDR_ABS BIT(25)

/*
* Use a reasonable backlog of frames (per CPU) as congestion threshold,
@@ -346,6 +347,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
+ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
dma_sync_single_for_device(qidev, drv_ctx->context_a,
sizeof(drv_ctx->sh_desc) +
@@ -401,6 +403,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
+ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 1b5035d56288..9b6d8972a565 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -35,56 +35,62 @@ struct ccp_tasklet_data {
};

/* Human-readable error strings */
+#define CCP_MAX_ERROR_CODE 64
static char *ccp_error_codes[] = {
"",
- "ERR 01: ILLEGAL_ENGINE",
- "ERR 02: ILLEGAL_KEY_ID",
- "ERR 03: ILLEGAL_FUNCTION_TYPE",
- "ERR 04: ILLEGAL_FUNCTION_MODE",
- "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
- "ERR 06: ILLEGAL_FUNCTION_SIZE",
- "ERR 07: Zlib_MISSING_INIT_EOM",
- "ERR 08: ILLEGAL_FUNCTION_RSVD",
- "ERR 09: ILLEGAL_BUFFER_LENGTH",
- "ERR 10: VLSB_FAULT",
- "ERR 11: ILLEGAL_MEM_ADDR",
- "ERR 12: ILLEGAL_MEM_SEL",
- "ERR 13: ILLEGAL_CONTEXT_ID",
- "ERR 14: ILLEGAL_KEY_ADDR",
- "ERR 15: 0xF Reserved",
- "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
- "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
- "ERR 18: CMD_TIMEOUT",
- "ERR 19: IDMA0_AXI_SLVERR",
- "ERR 20: IDMA0_AXI_DECERR",
- "ERR 21: 0x15 Reserved",
- "ERR 22: IDMA1_AXI_SLAVE_FAULT",
- "ERR 23: IDMA1_AIXI_DECERR",
- "ERR 24: 0x18 Reserved",
- "ERR 25: ZLIBVHB_AXI_SLVERR",
- "ERR 26: ZLIBVHB_AXI_DECERR",
- "ERR 27: 0x1B Reserved",
- "ERR 27: ZLIB_UNEXPECTED_EOM",
- "ERR 27: ZLIB_EXTRA_DATA",
- "ERR 30: ZLIB_BTYPE",
- "ERR 31: ZLIB_UNDEFINED_SYMBOL",
- "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
- "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
- "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
- "ERR 35: ZLIB_UNCOMPRESSED_LEN",
- "ERR 36: ZLIB_LIMIT_REACHED",
- "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
- "ERR 38: ODMA0_AXI_SLVERR",
- "ERR 39: ODMA0_AXI_DECERR",
- "ERR 40: 0x28 Reserved",
- "ERR 41: ODMA1_AXI_SLVERR",
- "ERR 42: ODMA1_AXI_DECERR",
- "ERR 43: LSB_PARITY_ERR",
+ "ILLEGAL_ENGINE",
+ "ILLEGAL_KEY_ID",
+ "ILLEGAL_FUNCTION_TYPE",
+ "ILLEGAL_FUNCTION_MODE",
+ "ILLEGAL_FUNCTION_ENCRYPT",
+ "ILLEGAL_FUNCTION_SIZE",
+ "Zlib_MISSING_INIT_EOM",
+ "ILLEGAL_FUNCTION_RSVD",
+ "ILLEGAL_BUFFER_LENGTH",
+ "VLSB_FAULT",
+ "ILLEGAL_MEM_ADDR",
+ "ILLEGAL_MEM_SEL",
+ "ILLEGAL_CONTEXT_ID",
+ "ILLEGAL_KEY_ADDR",
+ "0xF Reserved",
+ "Zlib_ILLEGAL_MULTI_QUEUE",
+ "Zlib_ILLEGAL_JOBID_CHANGE",
+ "CMD_TIMEOUT",
+ "IDMA0_AXI_SLVERR",
+ "IDMA0_AXI_DECERR",
+ "0x15 Reserved",
+ "IDMA1_AXI_SLAVE_FAULT",
+ "IDMA1_AIXI_DECERR",
+ "0x18 Reserved",
+ "ZLIBVHB_AXI_SLVERR",
+ "ZLIBVHB_AXI_DECERR",
+ "0x1B Reserved",
+ "ZLIB_UNEXPECTED_EOM",
+ "ZLIB_EXTRA_DATA",
+ "ZLIB_BTYPE",
+ "ZLIB_UNDEFINED_SYMBOL",
+ "ZLIB_UNDEFINED_DISTANCE_S",
+ "ZLIB_CODE_LENGTH_SYMBOL",
+ "ZLIB _VHB_ILLEGAL_FETCH",
+ "ZLIB_UNCOMPRESSED_LEN",
+ "ZLIB_LIMIT_REACHED",
+ "ZLIB_CHECKSUM_MISMATCH0",
+ "ODMA0_AXI_SLVERR",
+ "ODMA0_AXI_DECERR",
+ "0x28 Reserved",
+ "ODMA1_AXI_SLVERR",
+ "ODMA1_AXI_DECERR",
};

-void ccp_log_error(struct ccp_device *d, int e)
+void ccp_log_error(struct ccp_device *d, unsigned int e)
{
- dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+ if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
+ return;
+
+ if (e < ARRAY_SIZE(ccp_error_codes))
+ dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
+ else
+ dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
}

/* List of CCPs, CCP count, read-write access lock, and access functions
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 6810b65c1939..7442b0422f8a 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -632,7 +632,7 @@ struct ccp5_desc {
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);

-extern void ccp_log_error(struct ccp_device *, int);
+extern void ccp_log_error(struct ccp_device *, unsigned int);

struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
bool ccp_queues_suspended(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 267a367bd076..a17de7c9841b 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -625,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,

unsigned long long *final;
unsigned int dm_offset;
+ unsigned int jobid;
unsigned int ilen;
bool in_place = true; /* Default value */
int ret;
@@ -663,9 +664,11 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
}

+ jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+ op.jobid = jobid;
op.sb_key = cmd_q->sb_key; /* Pre-allocated */
op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
op.init = 1;
@@ -816,6 +819,13 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);

+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = jobid;
+ op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ op.init = 1;
+ op.u.aes.type = aes->type;
op.u.aes.mode = CCP_AES_MODE_GHASH;
op.u.aes.action = CCP_AES_GHASHFINAL;
op.src.type = CCP_MEMTYPE_SYSTEM;
@@ -843,7 +853,8 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
if (ret)
goto e_tag;

- ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
+ ret = crypto_memneq(tag.address, final_wa.address,
+ AES_BLOCK_SIZE) ? -EBADMSG : 0;
ccp_dm_free(&tag);
}

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 7ef30a98cb24..23fb85f4b3cc 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {

struct safexcel_cipher_req {
enum safexcel_cipher_direction direction;
+ /* Number of result descriptors associated to the request */
+ unsigned int rdescs;
bool needs_inv;
};

@@ -333,7 +335,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin

*ret = 0;

- do {
+ if (unlikely(!sreq->rdescs))
+ return 0;
+
+ while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -346,7 +351,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = safexcel_rdesc_check_errors(priv, rdesc);

ndesc++;
- } while (!rdesc->last_seg);
+ }

safexcel_complete(priv, ring);

@@ -501,6 +506,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *base,
+ struct safexcel_cipher_req *sreq,
bool *should_complete, int *ret)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
@@ -509,7 +515,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,

*ret = 0;

- do {
+ if (unlikely(!sreq->rdescs))
+ return 0;
+
+ while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -522,7 +531,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = safexcel_rdesc_check_errors(priv, rdesc);

ndesc++;
- } while (!rdesc->last_seg);
+ }

safexcel_complete(priv, ring);

@@ -564,7 +573,7 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,

if (sreq->needs_inv) {
sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
+ err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -587,7 +596,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,

if (sreq->needs_inv) {
sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
+ err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -633,6 +642,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
+
+ sreq->rdescs = *results;
return ret;
}

@@ -655,6 +666,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
+ sreq->rdescs = *results;
return ret;
}

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index becc654e0cd3..82d3625667cd 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1001,7 +1001,6 @@ static void ipsec_esp_encrypt_done(struct device *dev,
unsigned int authsize = crypto_aead_authsize(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
void *icvdata;

edesc = container_of(desc, struct talitos_edesc, desc);
@@ -1015,9 +1014,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
else
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2];
- sg = sg_last(areq->dst, edesc->dst_nents);
- memcpy((char *)sg_virt(sg) + sg->length - authsize,
- icvdata, authsize);
+ sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
+ authsize, areq->assoclen + areq->cryptlen);
}

dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
@@ -1035,7 +1033,6 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
char *oicv, *icv;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1045,9 +1042,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
ipsec_esp_unmap(dev, edesc, req);

if (!err) {
+ char icvdata[SHA512_DIGEST_SIZE];
+ int nents = edesc->dst_nents ? : 1;
+ unsigned int len = req->assoclen + req->cryptlen;
+
/* auth check */
- sg = sg_last(req->dst, edesc->dst_nents ? : 1);
- icv = (char *)sg_virt(sg) + sg->length - authsize;
+ if (nents > 1) {
+ sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
+ len - authsize);
+ icv = icvdata;
+ } else {
+ icv = (char *)sg_virt(req->dst) + len - authsize;
+ }

if (edesc->dma_len) {
if (is_sec1)
@@ -1463,7 +1469,6 @@ static int aead_decrypt(struct aead_request *req)
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
void *icvdata;

req->cryptlen -= authsize;
@@ -1497,9 +1502,8 @@ static int aead_decrypt(struct aead_request *req)
else
icvdata = &edesc->link_tbl[0];

- sg = sg_last(req->src, edesc->src_nents ? : 1);
-
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
+ sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+ req->assoclen + req->cryptlen - authsize);

return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
}
@@ -1553,11 +1557,15 @@ static void ablkcipher_done(struct device *dev,
int err)
{
struct ablkcipher_request *areq = context;
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
struct talitos_edesc *edesc;

edesc = container_of(desc, struct talitos_edesc, desc);

common_nonsnoop_unmap(dev, edesc, areq);
+ memcpy(areq->info, ctx->iv, ivsize);

kfree(edesc);

@@ -3184,7 +3192,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_priority = t_alg->algt.priority;
else
alg->cra_priority = TALITOS_CRA_PRIORITY;
- alg->cra_alignmask = 0;
+ if (has_ftr_sec1(priv))
+ alg->cra_alignmask = 3;
+ else
+ alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;

diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 248c440c10f2..4ec84a633bd3 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -2096,27 +2096,6 @@ static int sdma_probe(struct platform_device *pdev)
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);

- if (pdata) {
- ret = sdma_get_firmware(sdma, pdata->fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
- } else {
- /*
- * Because that device tree does not encode ROM script address,
- * the RAM script in firmware is mandatory for device tree
- * probe, otherwise it fails.
- */
- ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
- &fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware name\n");
- else {
- ret = sdma_get_firmware(sdma, fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
- }
- }
-
sdma->dma_device.dev = &pdev->dev;

sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@@ -2161,6 +2140,33 @@ static int sdma_probe(struct platform_device *pdev)
of_node_put(spba_bus);
}

+ /*
+ * Kick off firmware loading as the very last step:
+ * attempt to load firmware only if we're not on the error path, because
+ * the firmware callback requires a fully functional and allocated sdma
+ * instance.
+ */
+ if (pdata) {
+ ret = sdma_get_firmware(sdma, pdata->fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+ } else {
+ /*
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
+ */
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get firmware name\n");
+ } else {
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
+ }
+ }
+
return 0;

err_register:
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 464174685589..4386ea4b9b5a 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -26,7 +26,7 @@
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
static int edac_mc_panic_on_ue;
-static int edac_mc_poll_msec = 1000;
+static unsigned int edac_mc_poll_msec = 1000;

/* Getter functions for above */
int edac_mc_get_log_ue(void)
@@ -45,30 +45,30 @@ int edac_mc_get_panic_on_ue(void)
}

/* this is temporary */
-int edac_mc_get_poll_msec(void)
+unsigned int edac_mc_get_poll_msec(void)
{
return edac_mc_poll_msec;
}

static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
{
- unsigned long l;
+ unsigned int i;
int ret;

if (!val)
return -EINVAL;

- ret = kstrtoul(val, 0, &l);
+ ret = kstrtouint(val, 0, &i);
if (ret)
return ret;

- if (l < 1000)
+ if (i < 1000)
return -EINVAL;

- *((unsigned long *)kp->arg) = l;
+ *((unsigned int *)kp->arg) = i;

/* notify edac_mc engine to reset the poll period */
- edac_mc_reset_delay_period(l);
+ edac_mc_reset_delay_period(i);

return 0;
}
@@ -82,7 +82,7 @@ MODULE_PARM_DESC(edac_mc_log_ue,
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
"Log correctable error to console: 0=off 1=on");
-module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
+module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
&edac_mc_poll_msec, 0644);
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");

@@ -404,6 +404,8 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
+ int err;
+
csrow->dev.type = &csrow_attr_type;
csrow->dev.groups = csrow_dev_groups;
device_initialize(&csrow->dev);
@@ -415,7 +417,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
edac_dbg(0, "creating (virtual) csrow node %s\n",
dev_name(&csrow->dev));

- return device_add(&csrow->dev);
+ err = device_add(&csrow->dev);
+ if (err)
+ put_device(&csrow->dev);
+
+ return err;
}

/* Create a CSROW object under specifed edac_mc_device */
@@ -443,7 +449,8 @@ static int edac_create_csrow_objects(struct mem_ctl_info *mci)
csrow = mci->csrows[i];
if (!nr_pages_per_csrow(csrow))
continue;
- put_device(&mci->csrows[i]->dev);
+
+ device_del(&mci->csrows[i]->dev);
}

return err;
@@ -645,9 +652,11 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
dev_set_drvdata(&dimm->dev, dimm);
pm_runtime_forbid(&mci->dev);

- err = device_add(&dimm->dev);
+ err = device_add(&dimm->dev);
+ if (err)
+ put_device(&dimm->dev);

- edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
+ edac_dbg(0, "created rank/dimm device %s\n", dev_name(&dimm->dev));

return err;
}
@@ -928,6 +937,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
+ put_device(&mci->dev);
goto out;
}

diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index dd7d0b509aa3..75528f07abd5 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -36,7 +36,7 @@ extern int edac_mc_get_log_ue(void);
extern int edac_mc_get_log_ce(void);
extern int edac_mc_get_panic_on_ue(void);
extern int edac_get_poll_msec(void);
-extern int edac_mc_get_poll_msec(void);
+extern unsigned int edac_mc_get_poll_msec(void);

unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
unsigned len);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index fafd79438bbf..1ddc872b4e4b 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -838,9 +838,9 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)

raw_spin_lock_irqsave(&bank->lock, flags);
bank->irq_usage &= ~(BIT(offset));
- omap_set_gpio_irqenable(bank, offset, 0);
- omap_clear_gpio_irqstatus(bank, offset);
omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+ omap_clear_gpio_irqstatus(bank, offset);
+ omap_set_gpio_irqenable(bank, offset, 0);
if (!LINE_USED(bank->mod_usage, offset))
omap_clear_gpio_debounce(bank, offset);
omap_disable_gpio_module(bank, offset);
@@ -876,8 +876,8 @@ static void omap_gpio_mask_irq(struct irq_data *d)
unsigned long flags;

raw_spin_lock_irqsave(&bank->lock, flags);
- omap_set_gpio_irqenable(bank, offset, 0);
omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+ omap_set_gpio_irqenable(bank, offset, 0);
raw_spin_unlock_irqrestore(&bank->lock, flags);
}

@@ -889,9 +889,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
unsigned long flags;

raw_spin_lock_irqsave(&bank->lock, flags);
- if (trigger)
- omap_set_gpio_triggering(bank, offset, trigger);
-
omap_set_gpio_irqenable(bank, offset, 1);

/*
@@ -899,9 +896,13 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
* is cleared, thus after the handler has run. OMAP4 needs this done
* after enabing the interrupt to clear the wakeup status.
*/
- if (bank->level_mask & BIT(offset))
+ if (bank->regs->leveldetect0 && bank->regs->wkup_en &&
+ trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
omap_clear_gpio_irqstatus(bank, offset);

+ if (trigger)
+ omap_set_gpio_triggering(bank, offset, trigger);
+
raw_spin_unlock_irqrestore(&bank->lock, flags);
}

@@ -1454,7 +1455,7 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
void __iomem *base = bank->base;
- u32 nowake;
+ u32 mask, nowake;

bank->saved_datain = readl_relaxed(base + bank->regs->datain);

@@ -1464,6 +1465,16 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;

+ /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
+ mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
+ mask &= ~bank->context.risingdetect;
+ bank->saved_datain |= mask;
+
+ /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
+ mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
+ mask &= ~bank->context.fallingdetect;
+ bank->saved_datain &= ~mask;
+
if (!may_lose_context)
goto update_gpio_context_count;

@@ -1728,6 +1739,8 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.clr_dataout = OMAP4_GPIO_CLEARDATAOUT,
.irqstatus = OMAP4_GPIO_IRQSTATUS0,
.irqstatus2 = OMAP4_GPIO_IRQSTATUS1,
+ .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0,
+ .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1,
.irqenable = OMAP4_GPIO_IRQSTATUSSET0,
.irqenable2 = OMAP4_GPIO_IRQSTATUSSET1,
.set_irqenable = OMAP4_GPIO_IRQSTATUSSET0,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bca3e7740ef6..b8a5c1e3b99d 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3012,7 +3012,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
int gpiod_get_raw_value(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
- /* Should be using gpio_get_value_cansleep() */
+ /* Should be using gpiod_get_raw_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
return gpiod_get_raw_value_commit(desc);
}
@@ -3033,7 +3033,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
int value;

VALIDATE_DESC(desc);
- /* Should be using gpio_get_value_cansleep() */
+ /* Should be using gpiod_get_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);

value = gpiod_get_raw_value_commit(desc);
@@ -3304,7 +3304,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
- /* Should be using gpiod_set_value_cansleep() */
+ /* Should be using gpiod_set_raw_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
gpiod_set_raw_value_commit(desc, value);
}
@@ -3345,6 +3345,7 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
void gpiod_set_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
+ /* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
gpiod_set_value_nocheck(desc, value);
}
@@ -4232,8 +4233,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
*
* Returns:
* On successful request the GPIO pin is configured in accordance with
- * provided @dflags. If the node does not have the requested GPIO
- * property, NULL is returned.
+ * provided @dflags.
*
* In case of error an ERR_PTR() is returned.
*/
@@ -4255,9 +4255,6 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
index, &flags);

if (!desc || IS_ERR(desc)) {
- /* If it is not there, just return NULL */
- if (PTR_ERR(desc) == -ENOENT)
- return NULL;
return desc;
}

diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d94778915312..785747a14765 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1349,6 +1349,7 @@ MODULE_PARM_DESC(edid_fixup,

static void drm_get_displayid(struct drm_connector *connector,
struct edid *edid);
+static int validate_displayid(u8 *displayid, int length, int idx);

static int drm_edid_block_checksum(const u8 *raw_edid)
{
@@ -2932,16 +2933,46 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
return edid_ext;
}

-static u8 *drm_find_cea_extension(const struct edid *edid)
-{
- return drm_find_edid_extension(edid, CEA_EXT);
-}

static u8 *drm_find_displayid_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, DISPLAYID_EXT);
}

+static u8 *drm_find_cea_extension(const struct edid *edid)
+{
+ int ret;
+ int idx = 1;
+ int length = EDID_LENGTH;
+ struct displayid_block *block;
+ u8 *cea;
+ u8 *displayid;
+
+ /* Look for a top level CEA extension block */
+ cea = drm_find_edid_extension(edid, CEA_EXT);
+ if (cea)
+ return cea;
+
+ /* CEA blocks can also be found embedded in a DisplayID block */
+ displayid = drm_find_displayid_extension(edid);
+ if (!displayid)
+ return NULL;
+
+ ret = validate_displayid(displayid, length, idx);
+ if (ret)
+ return NULL;
+
+ idx += sizeof(struct displayid_hdr);
+ for_each_displayid_db(displayid, block, idx, length) {
+ if (block->tag == DATA_BLOCK_CTA) {
+ cea = (u8 *)block;
+ break;
+ }
+ }
+
+ return cea;
+}
+
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
@@ -3665,13 +3696,38 @@ cea_revision(const u8 *cea)
static int
cea_db_offsets(const u8 *cea, int *start, int *end)
{
- /* Data block offset in CEA extension block */
- *start = 4;
- *end = cea[2];
- if (*end == 0)
- *end = 127;
- if (*end < 4 || *end > 127)
- return -ERANGE;
+ /* DisplayID CTA extension blocks and top-level CEA EDID
+ * block header definitions differ in the following bytes:
+ * 1) Byte 2 of the header specifies length differently,
+ * 2) Byte 3 is only present in the CEA top level block.
+ *
+ * The different definitions for byte 2 follow.
+ *
+ * DisplayID CTA extension block defines byte 2 as:
+ * Number of payload bytes
+ *
+ * CEA EDID block defines byte 2 as:
+ * Byte number (decimal) within this block where the 18-byte
+ * DTDs begin. If no non-DTD data is present in this extension
+ * block, the value should be set to 04h (the byte after next).
+ * If set to 00h, there are no DTDs present in this block and
+ * no non-DTD data.
+ */
+ if (cea[0] == DATA_BLOCK_CTA) {
+ *start = 3;
+ *end = *start + cea[2];
+ } else if (cea[0] == CEA_EXT) {
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ } else {
+ return -ENOTSUPP;
+ }
+
return 0;
}

@@ -5219,6 +5275,9 @@ static int drm_parse_display_id(struct drm_connector *connector,
case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
/* handled in mode gathering code. */
break;
+ case DATA_BLOCK_CTA:
+ /* handled in the cea parser code. */
+ break;
default:
DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index ecacb22834d7..719345074711 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -184,6 +184,25 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
return 0;
}

+static int
+nvkm_i2c_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_pad *pad;
+
+ /*
+ * We init our i2c busses as early as possible, since they may be
+ * needed by the vbios init scripts on some cards
+ */
+ list_for_each_entry(pad, &i2c->pad, head)
+ nvkm_i2c_pad_init(pad);
+ list_for_each_entry(bus, &i2c->bus, head)
+ nvkm_i2c_bus_init(bus);
+
+ return 0;
+}
+
static int
nvkm_i2c_init(struct nvkm_subdev *subdev)
{
@@ -238,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_i2c = {
.dtor = nvkm_i2c_dtor,
+ .preinit = nvkm_i2c_preinit,
.init = nvkm_i2c_init,
.fini = nvkm_i2c_fini,
.intr = nvkm_i2c_intr,
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 594c3cbc8291..18816ccf600e 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -257,7 +257,7 @@ static int init_csc(struct ipu_ic *ic,
writel(param, base++);

param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
- (params->sat << 9);
+ (params->sat << 10);
writel(param, base++);

param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index a8633b1437b2..2e3e03df83da 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -307,6 +307,9 @@ static void wacom_feature_mapping(struct hid_device *hdev,
wacom_hid_usage_quirk(hdev, field, usage);

switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCH_RING_SETTING:
+ wacom->generic_has_leds = true;
+ break;
case HID_DG_CONTACTMAX:
/* leave touch_max as is if predefined */
if (!features->touch_max) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 09b8e4aac82f..447394cc4222 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1930,8 +1930,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_BUTTONCENTER:
- wacom->generic_has_leds = true;
- /* fall through */
case WACOM_HID_WD_BUTTONHOME:
case WACOM_HID_WD_BUTTONUP:
case WACOM_HID_WD_BUTTONDOWN:
@@ -2123,14 +2121,12 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
bool active = wacom_wac->hid_data.inrange_state != 0;

/* report prox for expresskey events */
- if ((wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) &&
- wacom_wac->hid_data.pad_input_event_flag) {
+ if (wacom_wac->hid_data.pad_input_event_flag) {
input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
input_sync(input);
if (!active)
wacom_wac->hid_data.pad_input_event_flag = false;
}
-
}

static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
@@ -2706,9 +2702,7 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
if (report->type != HID_INPUT_REPORT)
return -1;

- if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
- wacom_wac_pad_report(hdev, report, field);
- else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_report(hdev, report);
else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_report(hdev, report);
@@ -2722,7 +2716,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct hid_field *field;
bool pad_in_hid_field = false, pen_in_hid_field = false,
- finger_in_hid_field = false;
+ finger_in_hid_field = false, true_pad = false;
int r;
int prev_collection = -1;

@@ -2738,6 +2732,8 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
pen_in_hid_field = true;
if (WACOM_FINGER_FIELD(field))
finger_in_hid_field = true;
+ if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY)
+ true_pad = true;
}

wacom_wac_battery_pre_report(hdev, report);
@@ -2761,6 +2757,9 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
}

wacom_wac_battery_report(hdev, report);
+
+ if (true_pad && wacom->wacom_wac.pad_input)
+ wacom_wac_pad_report(hdev, report, field);
}

static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -3717,7 +3716,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
0, 5920, 4, 0);
}
input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
- input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40);

/* fall through */

diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 295fd3718caa..f67d871841c0 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -145,6 +145,7 @@
#define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
#define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
#define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_WD_TOUCH_RING_SETTING (WACOM_HID_UP_WACOMDIGITIZER | 0x1032)
#define WACOM_HID_UP_G9 0xff090000
#define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
#define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 8ff326c0c406..3cdf85b1ce4f 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -632,7 +632,7 @@ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
goto err_out;

ret = -ENOMEM;
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
if (!page)
goto err_free_sgt;

diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 70f2cb90adc5..e759ac0d48be 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -170,6 +170,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Ice Lake NNPI */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};

diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 5f4bd52121fe..7837ea67f1f8 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -91,6 +91,12 @@ void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
up_read(&bus->lock);
}

+static struct i3c_master_controller *
+i3c_bus_to_i3c_master(struct i3c_bus *i3cbus)
+{
+ return container_of(i3cbus, struct i3c_master_controller, bus);
+}
+
static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
{
return container_of(dev, struct i3c_master_controller, dev);
@@ -565,20 +571,38 @@ static const struct device_type i3c_masterdev_type = {
.groups = i3c_masterdev_groups,
};

-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode)
+int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
{
- i3cbus->mode = mode;
+ struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);

- if (!i3cbus->scl_rate.i3c)
- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ i3cbus->mode = mode;

- if (!i3cbus->scl_rate.i2c) {
- if (i3cbus->mode == I3C_BUS_MODE_MIXED_SLOW)
- i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
- else
- i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ switch (i3cbus->mode) {
+ case I3C_BUS_MODE_PURE:
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ break;
+ case I3C_BUS_MODE_MIXED_FAST:
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ if (!i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
+ break;
+ case I3C_BUS_MODE_MIXED_SLOW:
+ if (!i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
+ if (!i3cbus->scl_rate.i3c ||
+ i3cbus->scl_rate.i3c > i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i3c = i3cbus->scl_rate.i2c;
+ break;
+ default:
+ return -EINVAL;
}

+ dev_dbg(&master->dev, "i2c-scl = %ld Hz i3c-scl = %ld Hz\n",
+ i3cbus->scl_rate.i2c, i3cbus->scl_rate.i3c);
+
/*
* I3C/I2C frequency may have been overridden, check that user-provided
* values are not exceeding max possible frequency.
@@ -1966,9 +1990,6 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
/* LVR is encoded in reg[2]. */
boardinfo->lvr = reg[2];

- if (boardinfo->lvr & I3C_LVR_I2C_FM_MODE)
- master->bus.scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
-
list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
of_node_get(node);

@@ -2417,6 +2438,7 @@ int i3c_master_register(struct i3c_master_controller *master,
const struct i3c_master_controller_ops *ops,
bool secondary)
{
+ unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
struct i3c_bus *i3cbus = i3c_master_get_bus(master);
enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
struct i2c_dev_boardinfo *i2cbi;
@@ -2466,9 +2488,12 @@ int i3c_master_register(struct i3c_master_controller *master,
ret = -EINVAL;
goto err_put_dev;
}
+
+ if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
+ i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
}

- ret = i3c_bus_set_mode(i3cbus, mode);
+ ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
if (ret)
goto err_put_dev;

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index da81402992bc..9d7303041b3e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1041,15 +1041,19 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}

if (MLX5_CAP_GEN(mdev, tag_matching)) {
- props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
props->tm_caps.max_num_tags =
(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
- props->tm_caps.flags = IB_TM_CAP_RC;
props->tm_caps.max_ops =
1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
}

+ if (MLX5_CAP_GEN(mdev, tag_matching) &&
+ MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
+ props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
+ props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+ }
+
if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
props->cq_caps.max_cq_moderation_count =
MLX5_MAX_CQ_COUNT;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9b5e11d3fb85..04ea7db08e87 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1998,6 +1998,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf,
return err;

ivf->vf = vf;
+ memcpy(ivf->mac, dev->dev_addr, dev->addr_len);

return 0;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index be9ddcad8f28..87848faa7502 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3481,13 +3481,14 @@ static const match_table_t srp_opt_tokens = {
* @net: [in] Network namespace.
* @sa: [out] Address family, IP address and port number.
* @addr_port_str: [in] IP address and port number.
+ * @has_port: [out] Whether or not @addr_port_str includes a port number.
*
* Parse the following address formats:
* - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
* - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
*/
static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
- const char *addr_port_str)
+ const char *addr_port_str, bool *has_port)
{
char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
char *port_str;
@@ -3496,9 +3497,12 @@ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
if (!addr)
return -ENOMEM;
port_str = strrchr(addr, ':');
- if (!port_str)
- return -EINVAL;
- *port_str++ = '\0';
+ if (port_str && strchr(port_str, ']'))
+ port_str = NULL;
+ if (port_str)
+ *port_str++ = '\0';
+ if (has_port)
+ *has_port = port_str != NULL;
ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
if (ret && addr[0]) {
addr_end = addr + strlen(addr) - 1;
@@ -3520,6 +3524,7 @@ static int srp_parse_options(struct net *net, const char *buf,
char *p;
substring_t args[MAX_OPT_ARGS];
unsigned long long ull;
+ bool has_port;
int opt_mask = 0;
int token;
int ret = -EINVAL;
@@ -3618,7 +3623,8 @@ static int srp_parse_options(struct net *net, const char *buf,
ret = -ENOMEM;
goto out;
}
- ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
+ ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
+ NULL);
if (ret < 0) {
pr_warn("bad source parameter '%s'\n", p);
kfree(p);
@@ -3634,7 +3640,10 @@ static int srp_parse_options(struct net *net, const char *buf,
ret = -ENOMEM;
goto out;
}
- ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
+ ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
+ &has_port);
+ if (!has_port)
+ ret = -EINVAL;
if (ret < 0) {
pr_warn("bad dest parameter '%s'\n", p);
kfree(p);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 0a6f7ca883e7..dd80ff6cc427 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -24,6 +24,7 @@

#include "psmouse.h"
#include "alps.h"
+#include "trackpoint.h"

/*
* Definitions for ALPS version 3 and 4 command mode protocol
@@ -2864,6 +2865,23 @@ static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
return NULL;
}

+static bool alps_is_cs19_trackpoint(struct psmouse *psmouse)
+{
+ u8 param[2] = { 0 };
+
+ if (ps2_command(&psmouse->ps2dev,
+ param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
+ return false;
+
+ /*
+ * param[0] contains the trackpoint device variant_id while
+ * param[1] contains the firmware_id. So far all alps
+ * trackpoint-only devices have their variant_ids equal
+ * TP_VARIANT_ALPS and their firmware_ids are in 0x20~0x2f range.
+ */
+ return param[0] == TP_VARIANT_ALPS && ((param[1] & 0xf0) == 0x20);
+}
+
static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
{
const struct alps_protocol_info *protocol;
@@ -3164,6 +3182,20 @@ int alps_detect(struct psmouse *psmouse, bool set_properties)
if (error)
return error;

+ /*
+ * ALPS cs19 is a trackpoint-only device, and uses different
+ * protocol than DualPoint ones, so we return -EINVAL here and let
+ * trackpoint.c drive this device. If the trackpoint driver is not
+ * enabled, the device will fall back to a bare PS/2 mouse.
+ * If ps2_command() fails here, we depend on the immediately
+ * followed psmouse_reset() to reset the device to normal state.
+ */
+ if (alps_is_cs19_trackpoint(psmouse)) {
+ psmouse_dbg(psmouse,
+ "ALPS CS19 trackpoint-only device detected, ignoring\n");
+ return -EINVAL;
+ }
+
/*
* Reset the device to make sure it is fully operational:
* on some laptops, like certain Dell Latitudes, we may
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 68fd8232d44c..af7d48431b85 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0093", /* T480 */
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
+ "LEN009b", /* T580 */
"LEN200f", /* T450s */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 4b8b9d7aa75e..35031228a6d0 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -78,6 +78,7 @@ Scott Hill shill@gtcocalcomp.com

/* Max size of a single report */
#define REPORT_MAX_SIZE 10
+#define MAX_COLLECTION_LEVELS 10


/* Bitmask whether pen is in range */
@@ -223,8 +224,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
char maintype = 'x';
char globtype[12];
int indent = 0;
- char indentstr[10] = "";
-
+ char indentstr[MAX_COLLECTION_LEVELS + 1] = { 0 };

dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n");

@@ -350,6 +350,13 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case TAG_MAIN_COL_START:
maintype = 'S';

+ if (indent == MAX_COLLECTION_LEVELS) {
+ dev_err(ddev, "Collection level %d would exceed limit of %d\n",
+ indent + 1,
+ MAX_COLLECTION_LEVELS);
+ break;
+ }
+
if (data == 0) {
dev_dbg(ddev, "======>>>>>> Physical\n");
strcpy(globtype, "Physical");
@@ -369,8 +376,15 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
break;

case TAG_MAIN_COL_END:
- dev_dbg(ddev, "<<<<<<======\n");
maintype = 'E';
+
+ if (indent == 0) {
+ dev_err(ddev, "Collection level already at zero\n");
+ break;
+ }
+
+ dev_dbg(ddev, "<<<<<<======\n");
+
indent--;
for (x = 0; x < indent; x++)
indentstr[x] = '-';
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 109de67d5d72..2d06c507fbed 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -241,18 +241,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
pos = pos->next;
} else if ((start >= a) && (end <= b)) {
if (new->type == type)
- goto done;
+ return 0;
else
pos = pos->next;
} else {
if (new->type == type) {
phys_addr_t new_start = min(a, start);
phys_addr_t new_end = max(b, end);
+ int ret;

list_del(&entry->list);
entry->start = new_start;
entry->length = new_end - new_start + 1;
- iommu_insert_resv_region(entry, regions);
+ ret = iommu_insert_resv_region(entry, regions);
+ kfree(entry);
+ return ret;
} else {
pos = pos->next;
}
@@ -265,7 +268,6 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
return -ENOMEM;

list_add_tail(&region->list, pos);
-done:
return 0;
}

diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 15e55d327505..1bc86032d409 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -472,8 +472,12 @@ static void gic_deactivate_unhandled(u32 irqnr)

static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
{
+ bool irqs_enabled = interrupts_enabled(regs);
int err;

+ if (irqs_enabled)
+ nmi_enter();
+
if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
/*
@@ -485,6 +489,9 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
err = handle_domain_nmi(gic_data.domain, irqnr, regs);
if (err)
gic_deactivate_unhandled(irqnr);
+
+ if (irqs_enabled)
+ nmi_exit();
}

static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 7b531fd075b8..7599b10ecf09 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -73,6 +73,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
{ .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
{ .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
+ { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
{ }
};

diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 6ca868868fee..7393d64757a1 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -323,14 +323,16 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
int nr_pages)
{
- struct bio_vec bv;
- int i;
-
- WARN_ON(off + nr_pages != bio->bi_vcnt);
-
- for (i = off; i < nr_pages + off; i++) {
- bv = bio->bi_io_vec[i];
- mempool_free(bv.bv_page, &pblk->page_bio_pool);
+ struct bio_vec *bv;
+ struct page *page;
+ int i, e, nbv = 0;
+
+ for (i = 0; i < bio->bi_vcnt; i++) {
+ bv = &bio->bi_io_vec[i];
+ page = bv->bv_page;
+ for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
+ if (nbv >= off)
+ mempool_free(page++, &pblk->page_bio_pool);
}
}

diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index f8986effcb50..6f776823b9ba 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -393,6 +393,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
struct bucket *b;
long r;

+
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
+ return -1;
+
/* fastpath */
if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
fifo_pop(&ca->free[reserve], r))
@@ -484,6 +489,10 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
{
int i;

+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return -1;
+
lockdep_assert_held(&c->bucket_lock);
BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);

diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index fdf75352e16a..e30a983a68cd 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -726,8 +726,6 @@ struct cache_set {

#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
-
- DECLARE_HEAP(struct btree *, flush_btree);
};

struct bbio {
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index c25097968319..4d93f07f63e5 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -58,6 +58,18 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)

WARN_ONCE(!dc, "NULL pointer of struct cached_dev");

+ /*
+ * Read-ahead requests on a degrading and recovering md raid
+ * (e.g. raid6) device might be failured immediately by md
+ * raid code, which is not a real hardware media failure. So
+ * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
+ */
+ if (bio->bi_opf & REQ_RAHEAD) {
+ pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
+ dc->backing_dev_name);
+ return;
+ }
+
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
pr_err("%s: IO error on backing device, unrecoverable",
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6c94fa007796..d19dc6777a81 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -390,12 +390,6 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
}

/* Journalling */
-#define journal_max_cmp(l, r) \
- (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
- fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
-#define journal_min_cmp(l, r) \
- (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
- fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))

static void btree_flush_write(struct cache_set *c)
{
@@ -403,35 +397,25 @@ static void btree_flush_write(struct cache_set *c)
* Try to find the btree node with that references the oldest journal
* entry, best is our current candidate and is locked if non NULL:
*/
- struct btree *b;
- int i;
+ struct btree *b, *best;
+ unsigned int i;

atomic_long_inc(&c->flush_write);
-
retry:
- spin_lock(&c->journal.lock);
- if (heap_empty(&c->flush_btree)) {
- for_each_cached_btree(b, c, i)
- if (btree_current_write(b)->journal) {
- if (!heap_full(&c->flush_btree))
- heap_add(&c->flush_btree, b,
- journal_max_cmp);
- else if (journal_max_cmp(b,
- heap_peek(&c->flush_btree))) {
- c->flush_btree.data[0] = b;
- heap_sift(&c->flush_btree, 0,
- journal_max_cmp);
- }
+ best = NULL;
+
+ for_each_cached_btree(b, c, i)
+ if (btree_current_write(b)->journal) {
+ if (!best)
+ best = b;
+ else if (journal_pin_cmp(c,
+ btree_current_write(best)->journal,
+ btree_current_write(b)->journal)) {
+ best = b;
}
+ }

- for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
- heap_sift(&c->flush_btree, i, journal_min_cmp);
- }
-
- b = NULL;
- heap_pop(&c->flush_btree, b, journal_min_cmp);
- spin_unlock(&c->journal.lock);
-
+ b = best;
if (b) {
mutex_lock(&b->write_lock);
if (!btree_current_write(b)->journal) {
@@ -810,6 +794,10 @@ atomic_t *bch_journal(struct cache_set *c,
struct journal_write *w;
atomic_t *ret;

+ /* No journaling if CACHE_SET_IO_DISABLE set already */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return NULL;
+
if (!CACHE_SYNC(&c->sb))
return NULL;

@@ -854,7 +842,6 @@ void bch_journal_free(struct cache_set *c)
free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
free_fifo(&c->journal.pin);
- free_heap(&c->flush_btree);
}

int bch_journal_alloc(struct cache_set *c)
@@ -869,8 +856,7 @@ int bch_journal_alloc(struct cache_set *c)
j->w[0].c = c;
j->w[1].c = c;

- if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
- !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
!(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
!(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
return -ENOMEM;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index e489d2459569..e10323d50549 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1186,18 +1186,16 @@ static void cached_dev_free(struct closure *cl)
{
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);

- mutex_lock(&bch_register_lock);
-
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc);

if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
- if (dc->writeback_write_wq)
- destroy_workqueue(dc->writeback_write_wq);
if (!IS_ERR_OR_NULL(dc->status_update_thread))
kthread_stop(dc->status_update_thread);

+ mutex_lock(&bch_register_lock);
+
if (atomic_read(&dc->running))
bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
bcache_device_free(&dc->disk);
@@ -1431,8 +1429,6 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)

bool bch_cached_dev_error(struct cached_dev *dc)
{
- struct cache_set *c;
-
if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return false;

@@ -1443,21 +1439,6 @@ bool bch_cached_dev_error(struct cached_dev *dc)
pr_err("stop %s: too many IO errors on backing device %s\n",
dc->disk.disk->disk_name, dc->backing_dev_name);

- /*
- * If the cached device is still attached to a cache set,
- * even dc->io_disable is true and no more I/O requests
- * accepted, cache device internal I/O (writeback scan or
- * garbage collection) may still prevent bcache device from
- * being stopped. So here CACHE_SET_IO_DISABLE should be
- * set to c->flags too, to make the internal I/O to cache
- * device rejected and stopped immediately.
- * If c is NULL, that means the bcache device is not attached
- * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
- */
- c = dc->disk.c;
- if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
- pr_info("CACHE_SET_IO_DISABLE already set");
-
bcache_device_stop(&dc->disk);
return true;
}
@@ -1557,7 +1538,7 @@ static void cache_set_flush(struct closure *cl)
kobject_put(&c->internal);
kobject_del(&c->kobj);

- if (c->gc_thread)
+ if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);

if (!IS_ERR_OR_NULL(c->root))
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index eb42dcf52277..3ae271f95ffe 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -180,7 +180,7 @@ SHOW(__bch_cached_dev)
var_print(writeback_percent);
sysfs_hprint(writeback_rate,
wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
- sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
+ sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
sysfs_printf(io_error_limit, "%i", dc->error_limit);
sysfs_printf(io_disable, "%i", dc->io_disable);
var_print(writeback_rate_update_seconds);
@@ -464,7 +464,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_p_term_inverse,
&sysfs_writeback_rate_minimum,
&sysfs_writeback_rate_debug,
- &sysfs_errors,
+ &sysfs_io_errors,
&sysfs_io_error_limit,
&sysfs_io_disable,
&sysfs_dirty_data,
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 00aab6abcfe4..b1f5b7aea872 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -113,8 +113,6 @@ do { \

#define heap_full(h) ((h)->used == (h)->size)

-#define heap_empty(h) ((h)->used == 0)
-
#define DECLARE_FIFO(type, name) \
struct { \
size_t front, back, size, mask; \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 73f0efac2b9f..e9ffcea1ca50 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -735,6 +735,10 @@ static int bch_writeback_thread(void *arg)
}
}

+ if (dc->writeback_write_wq) {
+ flush_workqueue(dc->writeback_write_wq);
+ destroy_workqueue(dc->writeback_write_wq);
+ }
cached_dev_put(dc);
wait_for_kthread_stop();

@@ -830,6 +834,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
"bcache_writeback");
if (IS_ERR(dc->writeback_thread)) {
cached_dev_put(dc);
+ destroy_workqueue(dc->writeback_write_wq);
return PTR_ERR(dc->writeback_thread);
}
dc->writeback_running = true;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 1ecef76225a1..76fb431a7712 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1602,9 +1602,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
unsigned long freed;

c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (sc->gfp_mask & __GFP_FS)
- dm_bufio_lock(c);
- else if (!dm_bufio_trylock(c))
+ if (!dm_bufio_trylock(c))
return SHRINK_STOP;

freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index d8334cd45d7c..4cdde7a02e94 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1593,30 +1593,6 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
return zone;
}

-/*
- * Activate a zone (increment its reference count).
- */
-void dmz_activate_zone(struct dm_zone *zone)
-{
- set_bit(DMZ_ACTIVE, &zone->flags);
- atomic_inc(&zone->refcount);
-}
-
-/*
- * Deactivate a zone. This decrement the zone reference counter
- * and clears the active state of the zone once the count reaches 0,
- * indicating that all BIOs to the zone have completed. Returns
- * true if the zone was deactivated.
- */
-void dmz_deactivate_zone(struct dm_zone *zone)
-{
- if (atomic_dec_and_test(&zone->refcount)) {
- WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
- clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
- smp_mb__after_atomic();
- }
-}
-
/*
* Get the zone mapping a chunk, if the chunk is mapped already.
* If no mapping exist and the operation is WRITE, a zone is
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index 12419f0bfe78..ed8de49c9a08 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -115,7 +115,6 @@ enum {
DMZ_BUF,

/* Zone internal state */
- DMZ_ACTIVE,
DMZ_RECLAIM,
DMZ_SEQ_WRITE_ERR,
};
@@ -128,7 +127,6 @@ enum {
#define dmz_is_empty(z) ((z)->wp_block == 0)
#define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
#define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
-#define dmz_is_active(z) test_bit(DMZ_ACTIVE, &(z)->flags)
#define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
#define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)

@@ -188,8 +186,30 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);

-void dmz_activate_zone(struct dm_zone *zone);
-void dmz_deactivate_zone(struct dm_zone *zone);
+/*
+ * Activate a zone (increment its reference count).
+ */
+static inline void dmz_activate_zone(struct dm_zone *zone)
+{
+ atomic_inc(&zone->refcount);
+}
+
+/*
+ * Deactivate a zone. This decrement the zone reference counter
+ * indicating that all BIOs to the zone have completed when the count is 0.
+ */
+static inline void dmz_deactivate_zone(struct dm_zone *zone)
+{
+ atomic_dec(&zone->refcount);
+}
+
+/*
+ * Test if a zone is active, that is, has a refcount > 0.
+ */
+static inline bool dmz_is_active(struct dm_zone *zone)
+{
+ return atomic_read(&zone->refcount);
+}

int dmz_lock_zone_reclaim(struct dm_zone *zone);
void dmz_unlock_zone_reclaim(struct dm_zone *zone);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a4d2f552c8ab..05e288cba143 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7674,7 +7674,7 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
- int err = -EEXIST;
+ int ret, err = -EEXIST;
int disk;
struct disk_info *p;
int first = 0;
@@ -7689,7 +7689,14 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* The array is in readonly mode if journal is missing, so no
* write requests running. We should be safe
*/
- log_init(conf, rdev, false);
+ ret = log_init(conf, rdev, false);
+ if (ret)
+ return ret;
+
+ ret = r5l_start(conf->log);
+ if (ret)
+ return ret;
+
return 0;
}
if (mddev->recovery_disabled == conf->recovery_disabled)
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 9c163f658aaf..fea87ec80e55 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -207,6 +207,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
for (plane = 0; plane < vb->num_planes; ++plane) {
unsigned long size = PAGE_ALIGN(vb->planes[plane].length);

+ /* Did it wrap around? */
+ if (size < vb->planes[plane].length)
+ goto free;
+
mem_priv = call_ptr_memop(vb, alloc,
q->alloc_devs[plane] ? : q->dev,
q->dma_attrs, size, q->dma_dir, q->gfp_flags);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 270c3162fdcb..61670a1af713 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -59,7 +59,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
gfp_t gfp_flags)
{
unsigned int last_page = 0;
- int size = buf->size;
+ unsigned long size = buf->size;

while (size > 0) {
struct page *pages;
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index b233b7be0b84..e6aaf4973aef 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -75,8 +75,8 @@ static int tua6100_set_params(struct dvb_frontend *fe)
struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 };
struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 };

-#define _R 4
-#define _P 32
+#define _R_VAL 4
+#define _P_VAL 32
#define _ri 4000000

// setup register 0
@@ -91,14 +91,14 @@ static int tua6100_set_params(struct dvb_frontend *fe)
else
reg1[1] = 0x0c;

- if (_P == 64)
+ if (_P_VAL == 64)
reg1[1] |= 0x40;
if (c->frequency >= 1525000)
reg1[1] |= 0x80;

// register 2
- reg2[1] = (_R >> 8) & 0x03;
- reg2[2] = _R;
+ reg2[1] = (_R_VAL >> 8) & 0x03;
+ reg2[2] = _R_VAL;
if (c->frequency < 1455000)
reg2[1] |= 0x1c;
else if (c->frequency < 1630000)
@@ -110,18 +110,18 @@ static int tua6100_set_params(struct dvb_frontend *fe)
* The N divisor ratio (note: c->frequency is in kHz, but we
* need it in Hz)
*/
- prediv = (c->frequency * _R) / (_ri / 1000);
- div = prediv / _P;
+ prediv = (c->frequency * _R_VAL) / (_ri / 1000);
+ div = prediv / _P_VAL;
reg1[1] |= (div >> 9) & 0x03;
reg1[2] = div >> 1;
reg1[3] = (div << 7);
- priv->frequency = ((div * _P) * (_ri / 1000)) / _R;
+ priv->frequency = ((div * _P_VAL) * (_ri / 1000)) / _R_VAL;

// Finally, calculate and store the value for A
- reg1[3] |= (prediv - (div*_P)) & 0x7f;
+ reg1[3] |= (prediv - (div*_P_VAL)) & 0x7f;

-#undef _R
-#undef _P
+#undef _R_VAL
+#undef _P_VAL
#undef _ri

if (fe->ops.i2c_gate_ctrl)
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index a64fca82e0c4..55a3a2dee2de 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -35,7 +35,7 @@ obj-$(CONFIG_VIDEO_ADV748X) += adv748x/
obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
-obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
+obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
new file mode 100644
index 000000000000..2ad6bdf1a9fc
--- /dev/null
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -0,0 +1,1997 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices ADV7511 HDMI Transmitter Device Driver
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * This file is named adv7511-v4l2.c so it doesn't conflict with the Analog
+ * Device ADV7511 (config fragment CONFIG_DRM_I2C_ADV7511).
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/hdmi.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/i2c/adv7511.h>
+#include <media/cec.h>
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_LICENSE("GPL v2");
+
+#define MASK_ADV7511_EDID_RDY_INT 0x04
+#define MASK_ADV7511_MSEN_INT 0x40
+#define MASK_ADV7511_HPD_INT 0x80
+
+#define MASK_ADV7511_HPD_DETECT 0x40
+#define MASK_ADV7511_MSEN_DETECT 0x20
+#define MASK_ADV7511_EDID_RDY 0x10
+
+#define EDID_MAX_RETRIES (8)
+#define EDID_DELAY 250
+#define EDID_MAX_SEGM 8
+
+#define ADV7511_MAX_WIDTH 1920
+#define ADV7511_MAX_HEIGHT 1200
+#define ADV7511_MIN_PIXELCLOCK 20000000
+#define ADV7511_MAX_PIXELCLOCK 225000000
+
+#define ADV7511_MAX_ADDRS (3)
+
+/*
+**********************************************************************
+*
+* Arrays with configuration parameters for the ADV7511
+*
+**********************************************************************
+*/
+
+struct i2c_reg_value {
+ unsigned char reg;
+ unsigned char value;
+};
+
+struct adv7511_state_edid {
+ /* total number of blocks */
+ u32 blocks;
+ /* Number of segments read */
+ u32 segments;
+ u8 data[EDID_MAX_SEGM * 256];
+ /* Number of EDID read retries left */
+ unsigned read_retries;
+ bool complete;
+};
+
+struct adv7511_state {
+ struct adv7511_platform_data pdata;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler hdl;
+ int chip_revision;
+ u8 i2c_edid_addr;
+ u8 i2c_pktmem_addr;
+ u8 i2c_cec_addr;
+
+ struct i2c_client *i2c_cec;
+ struct cec_adapter *cec_adap;
+ u8 cec_addr[ADV7511_MAX_ADDRS];
+ u8 cec_valid_addrs;
+ bool cec_enabled_adap;
+
+ /* Is the adv7511 powered on? */
+ bool power_on;
+ /* Did we receive hotplug and rx-sense signals? */
+ bool have_monitor;
+ bool enabled_irq;
+ /* timings from s_dv_timings */
+ struct v4l2_dv_timings dv_timings;
+ u32 fmt_code;
+ u32 colorspace;
+ u32 ycbcr_enc;
+ u32 quantization;
+ u32 xfer_func;
+ u32 content_type;
+ /* controls */
+ struct v4l2_ctrl *hdmi_mode_ctrl;
+ struct v4l2_ctrl *hotplug_ctrl;
+ struct v4l2_ctrl *rx_sense_ctrl;
+ struct v4l2_ctrl *have_edid0_ctrl;
+ struct v4l2_ctrl *rgb_quantization_range_ctrl;
+ struct v4l2_ctrl *content_type_ctrl;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_pktmem;
+ struct adv7511_state_edid edid;
+ /* Running counter of the number of detected EDIDs (for debugging) */
+ unsigned edid_detect_counter;
+ struct workqueue_struct *work_queue;
+ struct delayed_work edid_handler; /* work entry */
+};
+
+static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd);
+static bool adv7511_check_edid_status(struct v4l2_subdev *sd);
+static void adv7511_setup(struct v4l2_subdev *sd);
+static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq);
+static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
+
+
+static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
+ ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM)
+};
+
+static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct adv7511_state, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct adv7511_state, hdl)->sd;
+}
+
+/* ------------------------ I2C ----------------------------------------------- */
+
+static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
+ u8 command, bool check)
+{
+ union i2c_smbus_data data;
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data))
+ return data.byte;
+ if (check)
+ v4l_err(client, "error reading %02x, %02x\n",
+ client->addr, command);
+ return -1;
+}
+
+static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
+{
+ int i;
+ for (i = 0; i < 3; i++) {
+ int ret = adv_smbus_read_byte_data_check(client, command, true);
+ if (ret >= 0) {
+ if (i)
+ v4l_err(client, "read ok after %d retries\n", i);
+ return ret;
+ }
+ }
+ v4l_err(client, "read failed\n");
+ return -1;
+}
+
+static int adv7511_rd(struct v4l2_subdev *sd, u8 reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return adv_smbus_read_byte_data(client, reg);
+}
+
+static int adv7511_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret == 0)
+ return 0;
+ }
+ v4l2_err(sd, "%s: i2c write error\n", __func__);
+ return ret;
+}
+
+/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
+ and then the value-mask (to be OR-ed). */
+static inline void adv7511_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
+{
+ adv7511_wr(sd, reg, (adv7511_rd(sd, reg) & clr_mask) | val_mask);
+}
+
+static int adv_smbus_read_i2c_block_data(struct i2c_client *client,
+ u8 command, unsigned length, u8 *values)
+{
+ union i2c_smbus_data data;
+ int ret;
+
+ if (length > I2C_SMBUS_BLOCK_MAX)
+ length = I2C_SMBUS_BLOCK_MAX;
+ data.block[0] = length;
+
+ ret = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_I2C_BLOCK_DATA, &data);
+ memcpy(values, data.block + 1, length);
+ return ret;
+}
+
+static void adv7511_edid_rd(struct v4l2_subdev *sd, uint16_t len, uint8_t *buf)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ int i;
+ int err = 0;
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
+ err = adv_smbus_read_i2c_block_data(state->i2c_edid, i,
+ I2C_SMBUS_BLOCK_MAX, buf + i);
+ if (err)
+ v4l2_err(sd, "%s: i2c read error\n", __func__);
+}
+
+static inline int adv7511_cec_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ return i2c_smbus_read_byte_data(state->i2c_cec, reg);
+}
+
+static int adv7511_cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ret = i2c_smbus_write_byte_data(state->i2c_cec, reg, val);
+ if (ret == 0)
+ return 0;
+ }
+ v4l2_err(sd, "%s: I2C Write Problem\n", __func__);
+ return ret;
+}
+
+static inline int adv7511_cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask,
+ u8 val)
+{
+ return adv7511_cec_write(sd, reg, (adv7511_cec_read(sd, reg) & mask) | val);
+}
+
+static int adv7511_pktmem_rd(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_pktmem, reg);
+}
+
+static int adv7511_pktmem_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ret = i2c_smbus_write_byte_data(state->i2c_pktmem, reg, val);
+ if (ret == 0)
+ return 0;
+ }
+ v4l2_err(sd, "%s: i2c write error\n", __func__);
+ return ret;
+}
+
+/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
+ and then the value-mask (to be OR-ed). */
+static inline void adv7511_pktmem_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
+{
+ adv7511_pktmem_wr(sd, reg, (adv7511_pktmem_rd(sd, reg) & clr_mask) | val_mask);
+}
+
+static inline bool adv7511_have_hotplug(struct v4l2_subdev *sd)
+{
+ return adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT;
+}
+
+static inline bool adv7511_have_rx_sense(struct v4l2_subdev *sd)
+{
+ return adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT;
+}
+
+static void adv7511_csc_conversion_mode(struct v4l2_subdev *sd, u8 mode)
+{
+ adv7511_wr_and_or(sd, 0x18, 0x9f, (mode & 0x3)<<5);
+}
+
+static void adv7511_csc_coeff(struct v4l2_subdev *sd,
+ u16 A1, u16 A2, u16 A3, u16 A4,
+ u16 B1, u16 B2, u16 B3, u16 B4,
+ u16 C1, u16 C2, u16 C3, u16 C4)
+{
+ /* A */
+ adv7511_wr_and_or(sd, 0x18, 0xe0, A1>>8);
+ adv7511_wr(sd, 0x19, A1);
+ adv7511_wr_and_or(sd, 0x1A, 0xe0, A2>>8);
+ adv7511_wr(sd, 0x1B, A2);
+ adv7511_wr_and_or(sd, 0x1c, 0xe0, A3>>8);
+ adv7511_wr(sd, 0x1d, A3);
+ adv7511_wr_and_or(sd, 0x1e, 0xe0, A4>>8);
+ adv7511_wr(sd, 0x1f, A4);
+
+ /* B */
+ adv7511_wr_and_or(sd, 0x20, 0xe0, B1>>8);
+ adv7511_wr(sd, 0x21, B1);
+ adv7511_wr_and_or(sd, 0x22, 0xe0, B2>>8);
+ adv7511_wr(sd, 0x23, B2);
+ adv7511_wr_and_or(sd, 0x24, 0xe0, B3>>8);
+ adv7511_wr(sd, 0x25, B3);
+ adv7511_wr_and_or(sd, 0x26, 0xe0, B4>>8);
+ adv7511_wr(sd, 0x27, B4);
+
+ /* C */
+ adv7511_wr_and_or(sd, 0x28, 0xe0, C1>>8);
+ adv7511_wr(sd, 0x29, C1);
+ adv7511_wr_and_or(sd, 0x2A, 0xe0, C2>>8);
+ adv7511_wr(sd, 0x2B, C2);
+ adv7511_wr_and_or(sd, 0x2C, 0xe0, C3>>8);
+ adv7511_wr(sd, 0x2D, C3);
+ adv7511_wr_and_or(sd, 0x2E, 0xe0, C4>>8);
+ adv7511_wr(sd, 0x2F, C4);
+}
+
+static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
+{
+ if (enable) {
+ u8 csc_mode = 0;
+ adv7511_csc_conversion_mode(sd, csc_mode);
+ adv7511_csc_coeff(sd,
+ 4096-564, 0, 0, 256,
+ 0, 4096-564, 0, 256,
+ 0, 0, 4096-564, 256);
+ /* enable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
+ /* AVI infoframe: Limited range RGB (16-235) */
+ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
+ } else {
+ /* disable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+ /* AVI infoframe: Full range RGB (0-255) */
+ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x08);
+ }
+}
+
+static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ /* Only makes sense for RGB formats */
+ if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
+ /* so just keep quantization */
+ adv7511_csc_rgb_full2limit(sd, false);
+ return;
+ }
+
+ switch (ctrl->val) {
+ case V4L2_DV_RGB_RANGE_AUTO:
+ /* automatic */
+ if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
+ /* CE format, RGB limited range (16-235) */
+ adv7511_csc_rgb_full2limit(sd, true);
+ } else {
+ /* not CE format, RGB full range (0-255) */
+ adv7511_csc_rgb_full2limit(sd, false);
+ }
+ break;
+ case V4L2_DV_RGB_RANGE_LIMITED:
+ /* RGB limited range (16-235) */
+ adv7511_csc_rgb_full2limit(sd, true);
+ break;
+ case V4L2_DV_RGB_RANGE_FULL:
+ /* RGB full range (0-255) */
+ adv7511_csc_rgb_full2limit(sd, false);
+ break;
+ }
+}
+
+/* ------------------------------ CTRL OPS ------------------------------ */
+
+static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
+
+ if (state->hdmi_mode_ctrl == ctrl) {
+ /* Set HDMI or DVI-D */
+ adv7511_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
+ return 0;
+ }
+ if (state->rgb_quantization_range_ctrl == ctrl) {
+ adv7511_set_rgb_quantization_mode(sd, ctrl);
+ return 0;
+ }
+ if (state->content_type_ctrl == ctrl) {
+ u8 itc, cn;
+
+ state->content_type = ctrl->val;
+ itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
+ cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
+ adv7511_wr_and_or(sd, 0x57, 0x7f, itc << 7);
+ adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops adv7511_ctrl_ops = {
+ .s_ctrl = adv7511_s_ctrl,
+};
+
+/* ---------------------------- CORE OPS ------------------------------------------- */
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static void adv7511_inv_register(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ v4l2_info(sd, "0x000-0x0ff: Main Map\n");
+ if (state->i2c_cec)
+ v4l2_info(sd, "0x100-0x1ff: CEC Map\n");
+}
+
+static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ reg->size = 1;
+ switch (reg->reg >> 8) {
+ case 0:
+ reg->val = adv7511_rd(sd, reg->reg & 0xff);
+ break;
+ case 1:
+ if (state->i2c_cec) {
+ reg->val = adv7511_cec_read(sd, reg->reg & 0xff);
+ break;
+ }
+ /* fall through */
+ default:
+ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+ adv7511_inv_register(sd);
+ break;
+ }
+ return 0;
+}
+
+static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ switch (reg->reg >> 8) {
+ case 0:
+ adv7511_wr(sd, reg->reg & 0xff, reg->val & 0xff);
+ break;
+ case 1:
+ if (state->i2c_cec) {
+ adv7511_cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ break;
+ }
+ /* fall through */
+ default:
+ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+ adv7511_inv_register(sd);
+ break;
+ }
+ return 0;
+}
+#endif
+
+struct adv7511_cfg_read_infoframe {
+ const char *desc;
+ u8 present_reg;
+ u8 present_mask;
+ u8 header[3];
+ u16 payload_addr;
+};
+
+static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
+{
+ u8 csum = 0;
+ size_t i;
+
+ /* compute checksum */
+ for (i = 0; i < size; i++)
+ csum += ptr[i];
+
+ return 256 - csum;
+}
+
+static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_infoframe *cri)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+ union hdmi_infoframe frame;
+ u8 buffer[32];
+ u8 len;
+ int i;
+
+ if (!(adv7511_rd(sd, cri->present_reg) & cri->present_mask)) {
+ v4l2_info(sd, "%s infoframe not transmitted\n", cri->desc);
+ return;
+ }
+
+ memcpy(buffer, cri->header, sizeof(cri->header));
+
+ len = buffer[2];
+
+ if (len + 4 > sizeof(buffer)) {
+ v4l2_err(sd, "%s: invalid %s infoframe length %d\n", __func__, cri->desc, len);
+ return;
+ }
+
+ if (cri->payload_addr >= 0x100) {
+ for (i = 0; i < len; i++)
+ buffer[i + 4] = adv7511_pktmem_rd(sd, cri->payload_addr + i - 0x100);
+ } else {
+ for (i = 0; i < len; i++)
+ buffer[i + 4] = adv7511_rd(sd, cri->payload_addr + i);
+ }
+ buffer[3] = 0;
+ buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
+
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
+ v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
+ return;
+ }
+
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+}
+
+static void adv7511_log_infoframes(struct v4l2_subdev *sd)
+{
+ static const struct adv7511_cfg_read_infoframe cri[] = {
+ { "AVI", 0x44, 0x10, { 0x82, 2, 13 }, 0x55 },
+ { "Audio", 0x44, 0x08, { 0x84, 1, 10 }, 0x73 },
+ { "SDP", 0x40, 0x40, { 0x83, 1, 25 }, 0x103 },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cri); i++)
+ log_infoframe(sd, &cri[i]);
+}
+
+static int adv7511_log_status(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_state_edid *edid = &state->edid;
+ int i;
+
+ static const char * const states[] = {
+ "in reset",
+ "reading EDID",
+ "idle",
+ "initializing HDCP",
+ "HDCP enabled",
+ "initializing HDCP repeater",
+ "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"
+ };
+ static const char * const errors[] = {
+ "no error",
+ "bad receiver BKSV",
+ "Ri mismatch",
+ "Pj mismatch",
+ "i2c error",
+ "timed out",
+ "max repeater cascade exceeded",
+ "hash check failed",
+ "too many devices",
+ "9", "A", "B", "C", "D", "E", "F"
+ };
+
+ v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
+ v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
+ (adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT) ? "detected" : "no",
+ (adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT) ? "detected" : "no",
+ edid->segments ? "found" : "no",
+ edid->blocks);
+ v4l2_info(sd, "%s output %s\n",
+ (adv7511_rd(sd, 0xaf) & 0x02) ?
+ "HDMI" : "DVI-D",
+ (adv7511_rd(sd, 0xa1) & 0x3c) ?
+ "disabled" : "enabled");
+ v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
+ states[adv7511_rd(sd, 0xc8) & 0xf],
+ errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
+ adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
+ v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
+ if (adv7511_rd(sd, 0xaf) & 0x02) {
+ /* HDMI only */
+ u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
+ u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
+ adv7511_rd(sd, 0x02) << 8 |
+ adv7511_rd(sd, 0x03);
+ u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
+ u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
+ u32 CTS;
+
+ if (manual_cts)
+ CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
+ adv7511_rd(sd, 0x08) << 8 |
+ adv7511_rd(sd, 0x09);
+ else
+ CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
+ adv7511_rd(sd, 0x05) << 8 |
+ adv7511_rd(sd, 0x06);
+ v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
+ manual_cts ? "manual" : "automatic", N, CTS);
+ v4l2_info(sd, "VIC: detected %d, sent %d\n",
+ vic_detect, vic_sent);
+ adv7511_log_infoframes(sd);
+ }
+ if (state->dv_timings.type == V4L2_DV_BT_656_1120)
+ v4l2_print_dv_timings(sd->name, "timings: ",
+ &state->dv_timings, false);
+ else
+ v4l2_info(sd, "no timings set\n");
+ v4l2_info(sd, "i2c edid addr: 0x%x\n", state->i2c_edid_addr);
+
+ if (state->i2c_cec == NULL)
+ return 0;
+
+ v4l2_info(sd, "i2c cec addr: 0x%x\n", state->i2c_cec_addr);
+
+ v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
+ "enabled" : "disabled");
+ if (state->cec_enabled_adap) {
+ for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
+ bool is_valid = state->cec_valid_addrs & (1 << i);
+
+ if (is_valid)
+ v4l2_info(sd, "CEC Logical Address: 0x%x\n",
+ state->cec_addr[i]);
+ }
+ }
+ v4l2_info(sd, "i2c pktmem addr: 0x%x\n", state->i2c_pktmem_addr);
+ return 0;
+}
+
+/* Power up/down adv7511 */
+static int adv7511_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ const int retries = 20;
+ int i;
+
+ v4l2_dbg(1, debug, sd, "%s: power %s\n", __func__, on ? "on" : "off");
+
+ state->power_on = on;
+
+ if (!on) {
+ /* Power down */
+ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
+ return true;
+ }
+
+ /* Power up */
+ /* The adv7511 does not always come up immediately.
+ Retry multiple times. */
+ for (i = 0; i < retries; i++) {
+ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x0);
+ if ((adv7511_rd(sd, 0x41) & 0x40) == 0)
+ break;
+ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
+ msleep(10);
+ }
+ if (i == retries) {
+ v4l2_dbg(1, debug, sd, "%s: failed to powerup the adv7511!\n", __func__);
+ adv7511_s_power(sd, 0);
+ return false;
+ }
+ if (i > 1)
+ v4l2_dbg(1, debug, sd, "%s: needed %d retries to powerup the adv7511\n", __func__, i);
+
+ /* Reserved registers that must be set */
+ adv7511_wr(sd, 0x98, 0x03);
+ adv7511_wr_and_or(sd, 0x9a, 0xfe, 0x70);
+ adv7511_wr(sd, 0x9c, 0x30);
+ adv7511_wr_and_or(sd, 0x9d, 0xfc, 0x01);
+ adv7511_wr(sd, 0xa2, 0xa4);
+ adv7511_wr(sd, 0xa3, 0xa4);
+ adv7511_wr(sd, 0xe0, 0xd0);
+ adv7511_wr(sd, 0xf9, 0x00);
+
+ adv7511_wr(sd, 0x43, state->i2c_edid_addr);
+ adv7511_wr(sd, 0x45, state->i2c_pktmem_addr);
+
+ /* Set number of attempts to read the EDID */
+ adv7511_wr(sd, 0xc9, 0xf);
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct adv7511_state *state = cec_get_drvdata(adap);
+ struct v4l2_subdev *sd = &state->sd;
+
+ if (state->i2c_cec == NULL)
+ return -EIO;
+
+ if (!state->cec_enabled_adap && enable) {
+ /* power up cec section */
+ adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x01);
+ /* legacy mode and clear all rx buffers */
+ adv7511_cec_write(sd, 0x4a, 0x00);
+ adv7511_cec_write(sd, 0x4a, 0x07);
+ adv7511_cec_write_and_or(sd, 0x11, 0xfe, 0); /* initially disable tx */
+ /* enabled irqs: */
+ /* tx: ready */
+ /* tx: arbitration lost */
+ /* tx: retry timeout */
+ /* rx: ready 1 */
+ if (state->enabled_irq)
+ adv7511_wr_and_or(sd, 0x95, 0xc0, 0x39);
+ } else if (state->cec_enabled_adap && !enable) {
+ if (state->enabled_irq)
+ adv7511_wr_and_or(sd, 0x95, 0xc0, 0x00);
+ /* disable address mask 1-3 */
+ adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0x00);
+ /* power down cec section */
+ adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x00);
+ state->cec_valid_addrs = 0;
+ }
+ state->cec_enabled_adap = enable;
+ return 0;
+}
+
+static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct adv7511_state *state = cec_get_drvdata(adap);
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int i, free_idx = ADV7511_MAX_ADDRS;
+
+ if (!state->cec_enabled_adap)
+ return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
+
+ if (addr == CEC_LOG_ADDR_INVALID) {
+ adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0);
+ state->cec_valid_addrs = 0;
+ return 0;
+ }
+
+ for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
+ bool is_valid = state->cec_valid_addrs & (1 << i);
+
+ if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
+ free_idx = i;
+ if (is_valid && state->cec_addr[i] == addr)
+ return 0;
+ }
+ if (i == ADV7511_MAX_ADDRS) {
+ i = free_idx;
+ if (i == ADV7511_MAX_ADDRS)
+ return -ENXIO;
+ }
+ state->cec_addr[i] = addr;
+ state->cec_valid_addrs |= 1 << i;
+
+ switch (i) {
+ case 0:
+ /* enable address mask 0 */
+ adv7511_cec_write_and_or(sd, 0x4b, 0xef, 0x10);
+ /* set address for mask 0 */
+ adv7511_cec_write_and_or(sd, 0x4c, 0xf0, addr);
+ break;
+ case 1:
+ /* enable address mask 1 */
+ adv7511_cec_write_and_or(sd, 0x4b, 0xdf, 0x20);
+ /* set address for mask 1 */
+ adv7511_cec_write_and_or(sd, 0x4c, 0x0f, addr << 4);
+ break;
+ case 2:
+ /* enable address mask 2 */
+ adv7511_cec_write_and_or(sd, 0x4b, 0xbf, 0x40);
+ /* set address for mask 1 */
+ adv7511_cec_write_and_or(sd, 0x4d, 0xf0, addr);
+ break;
+ }
+ return 0;
+}
+
+static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct adv7511_state *state = cec_get_drvdata(adap);
+ struct v4l2_subdev *sd = &state->sd;
+ u8 len = msg->len;
+ unsigned int i;
+
+ v4l2_dbg(1, debug, sd, "%s: len %d\n", __func__, len);
+
+ if (len > 16) {
+ v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
+ return -EINVAL;
+ }
+
+ /*
+ * The number of retries is the number of attempts - 1, but retry
+ * at least once. It's not clear if a value of 0 is allowed, so
+ * let's do at least one retry.
+ */
+ adv7511_cec_write_and_or(sd, 0x12, ~0x70, max(1, attempts - 1) << 4);
+
+ /* clear cec tx irq status */
+ adv7511_wr(sd, 0x97, 0x38);
+
+ /* write data */
+ for (i = 0; i < len; i++)
+ adv7511_cec_write(sd, i, msg->msg[i]);
+
+ /* set length (data + header) */
+ adv7511_cec_write(sd, 0x10, len);
+ /* start transmit, enable tx */
+ adv7511_cec_write(sd, 0x11, 0x01);
+ return 0;
+}
+
+static void adv_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ if ((adv7511_cec_read(sd, 0x11) & 0x01) == 0) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
+ return;
+ }
+
+ if (tx_raw_status & 0x10) {
+ v4l2_dbg(1, debug, sd,
+ "%s: tx raw: arbitration lost\n", __func__);
+ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
+ 1, 0, 0, 0);
+ return;
+ }
+ if (tx_raw_status & 0x08) {
+ u8 status;
+ u8 nack_cnt;
+ u8 low_drive_cnt;
+
+ v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
+ /*
+ * We set this status bit since this hardware performs
+ * retransmissions.
+ */
+ status = CEC_TX_STATUS_MAX_RETRIES;
+ nack_cnt = adv7511_cec_read(sd, 0x14) & 0xf;
+ if (nack_cnt)
+ status |= CEC_TX_STATUS_NACK;
+ low_drive_cnt = adv7511_cec_read(sd, 0x14) >> 4;
+ if (low_drive_cnt)
+ status |= CEC_TX_STATUS_LOW_DRIVE;
+ cec_transmit_done(state->cec_adap, status,
+ 0, nack_cnt, low_drive_cnt, 0);
+ return;
+ }
+ if (tx_raw_status & 0x20) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
+ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ return;
+ }
+}
+
+static const struct cec_adap_ops adv7511_cec_adap_ops = {
+ .adap_enable = adv7511_cec_adap_enable,
+ .adap_log_addr = adv7511_cec_adap_log_addr,
+ .adap_transmit = adv7511_cec_adap_transmit,
+};
+#endif
+
+/* Enable interrupts */
+static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ u8 irqs = MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT;
+ u8 irqs_rd;
+ int retries = 100;
+
+ v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ? "enable" : "disable");
+
+ if (state->enabled_irq == enable)
+ return;
+ state->enabled_irq = enable;
+
+ /* The datasheet says that the EDID ready interrupt should be
+ disabled if there is no hotplug. */
+ if (!enable)
+ irqs = 0;
+ else if (adv7511_have_hotplug(sd))
+ irqs |= MASK_ADV7511_EDID_RDY_INT;
+
+ /*
+ * This i2c write can fail (approx. 1 in 1000 writes). But it
+ * is essential that this register is correct, so retry it
+ * multiple times.
+ *
+ * Note that the i2c write does not report an error, but the readback
+ * clearly shows the wrong value.
+ */
+ do {
+ adv7511_wr(sd, 0x94, irqs);
+ irqs_rd = adv7511_rd(sd, 0x94);
+ } while (retries-- && irqs_rd != irqs);
+
+ if (irqs_rd != irqs)
+ v4l2_err(sd, "Could not set interrupts: hw failure?\n");
+
+ adv7511_wr_and_or(sd, 0x95, 0xc0,
+ (state->cec_enabled_adap && enable) ? 0x39 : 0x00);
+}
+
+/* Interrupt handler */
+static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+ u8 irq_status;
+ u8 cec_irq;
+
+ /* disable interrupts to prevent a race condition */
+ adv7511_set_isr(sd, false);
+ irq_status = adv7511_rd(sd, 0x96);
+ cec_irq = adv7511_rd(sd, 0x97);
+ /* clear detected interrupts */
+ adv7511_wr(sd, 0x96, irq_status);
+ adv7511_wr(sd, 0x97, cec_irq);
+
+ v4l2_dbg(1, debug, sd, "%s: irq 0x%x, cec-irq 0x%x\n", __func__,
+ irq_status, cec_irq);
+
+ if (irq_status & (MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT))
+ adv7511_check_monitor_present_status(sd);
+ if (irq_status & MASK_ADV7511_EDID_RDY_INT)
+ adv7511_check_edid_status(sd);
+
+#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+ if (cec_irq & 0x38)
+ adv_cec_tx_raw_status(sd, cec_irq);
+
+ if (cec_irq & 1) {
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct cec_msg msg;
+
+ msg.len = adv7511_cec_read(sd, 0x25) & 0x1f;
+
+ v4l2_dbg(1, debug, sd, "%s: cec msg len %d\n", __func__,
+ msg.len);
+
+ if (msg.len > 16)
+ msg.len = 16;
+
+ if (msg.len) {
+ u8 i;
+
+ for (i = 0; i < msg.len; i++)
+ msg.msg[i] = adv7511_cec_read(sd, i + 0x15);
+
+ adv7511_cec_write(sd, 0x4a, 0); /* toggle to re-enable rx 1 */
+ adv7511_cec_write(sd, 0x4a, 1);
+ cec_received_msg(state->cec_adap, &msg);
+ }
+ }
+#endif
+
+ /* enable interrupts */
+ adv7511_set_isr(sd, true);
+
+ if (handled)
+ *handled = true;
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops adv7511_core_ops = {
+ .log_status = adv7511_log_status,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = adv7511_g_register,
+ .s_register = adv7511_s_register,
+#endif
+ .s_power = adv7511_s_power,
+ .interrupt_service_routine = adv7511_isr,
+};
+
+/* ------------------------------ VIDEO OPS ------------------------------ */
+
+/* Enable/disable adv7511 output */
+static int adv7511_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
+ adv7511_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
+ if (enable) {
+ adv7511_check_monitor_present_status(sd);
+ } else {
+ adv7511_s_power(sd, 0);
+ state->have_monitor = false;
+ }
+ return 0;
+}
+
+static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct v4l2_bt_timings *bt = &timings->bt;
+ u32 fps;
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ /* quick sanity check */
+ if (!v4l2_valid_dv_timings(timings, &adv7511_timings_cap, NULL, NULL))
+ return -EINVAL;
+
+ /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
+ if the format is one of the CEA or DMT timings. */
+ v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
+
+ /* save timings */
+ state->dv_timings = *timings;
+
+ /* set h/vsync polarities */
+ adv7511_wr_and_or(sd, 0x17, 0x9f,
+ ((bt->polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
+ ((bt->polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
+
+ fps = (u32)bt->pixelclock / (V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt));
+ switch (fps) {
+ case 24:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 1 << 1);
+ break;
+ case 25:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 2 << 1);
+ break;
+ case 30:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 3 << 1);
+ break;
+ default:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 0);
+ break;
+ }
+
+ /* update quantization range based on new dv_timings */
+ adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
+
+ return 0;
+}
+
+static int adv7511_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ if (!timings)
+ return -EINVAL;
+
+ *timings = state->dv_timings;
+
+ return 0;
+}
+
+static int adv7511_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ if (timings->pad != 0)
+ return -EINVAL;
+
+ return v4l2_enum_dv_timings_cap(timings, &adv7511_timings_cap, NULL, NULL);
+}
+
+static int adv7511_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ if (cap->pad != 0)
+ return -EINVAL;
+
+ *cap = adv7511_timings_cap;
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops adv7511_video_ops = {
+ .s_stream = adv7511_s_stream,
+ .s_dv_timings = adv7511_s_dv_timings,
+ .g_dv_timings = adv7511_g_dv_timings,
+};
+
+/* ------------------------------ AUDIO OPS ------------------------------ */
+static int adv7511_s_audio_stream(struct v4l2_subdev *sd, int enable)
+{
+ v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
+
+ if (enable)
+ adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x80);
+ else
+ adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x40);
+
+ return 0;
+}
+
+static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
+{
+ u32 N;
+
+ switch (freq) {
+ case 32000: N = 4096; break;
+ case 44100: N = 6272; break;
+ case 48000: N = 6144; break;
+ case 88200: N = 12544; break;
+ case 96000: N = 12288; break;
+ case 176400: N = 25088; break;
+ case 192000: N = 24576; break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set N (used with CTS to regenerate the audio clock) */
+ adv7511_wr(sd, 0x01, (N >> 16) & 0xf);
+ adv7511_wr(sd, 0x02, (N >> 8) & 0xff);
+ adv7511_wr(sd, 0x03, N & 0xff);
+
+ return 0;
+}
+
+static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
+{
+ u32 i2s_sf;
+
+ switch (freq) {
+ case 32000: i2s_sf = 0x30; break;
+ case 44100: i2s_sf = 0x00; break;
+ case 48000: i2s_sf = 0x20; break;
+ case 88200: i2s_sf = 0x80; break;
+ case 96000: i2s_sf = 0xa0; break;
+ case 176400: i2s_sf = 0xc0; break;
+ case 192000: i2s_sf = 0xe0; break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set sampling frequency for I2S audio to 48 kHz */
+ adv7511_wr_and_or(sd, 0x15, 0xf, i2s_sf);
+
+ return 0;
+}
+
+static int adv7511_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config)
+{
+ /* Only 2 channels in use for application */
+ adv7511_wr_and_or(sd, 0x73, 0xf8, 0x1);
+ /* Speaker mapping */
+ adv7511_wr(sd, 0x76, 0x00);
+
+ /* 16 bit audio word length */
+ adv7511_wr_and_or(sd, 0x14, 0xf0, 0x02);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_audio_ops adv7511_audio_ops = {
+ .s_stream = adv7511_s_audio_stream,
+ .s_clock_freq = adv7511_s_clock_freq,
+ .s_i2s_clock_freq = adv7511_s_i2s_clock_freq,
+ .s_routing = adv7511_s_routing,
+};
+
+/* ---------------------------- PAD OPS ------------------------------------- */
+
+static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
+ if (edid->pad != 0)
+ return -EINVAL;
+
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = state->edid.segments * 2;
+ return 0;
+ }
+
+ if (state->edid.segments == 0)
+ return -ENODATA;
+
+ if (edid->start_block >= state->edid.segments * 2)
+ return -EINVAL;
+
+ if (edid->start_block + edid->blocks > state->edid.segments * 2)
+ edid->blocks = state->edid.segments * 2 - edid->start_block;
+
+ memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
+ 128 * edid->blocks);
+
+ return 0;
+}
+
+static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad != 0)
+ return -EINVAL;
+
+ switch (code->index) {
+ case 0:
+ code->code = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ case 1:
+ code->code = MEDIA_BUS_FMT_YUYV8_1X16;
+ break;
+ case 2:
+ code->code = MEDIA_BUS_FMT_UYVY8_1X16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void adv7511_fill_format(struct adv7511_state *state,
+ struct v4l2_mbus_framefmt *format)
+{
+ format->width = state->dv_timings.bt.width;
+ format->height = state->dv_timings.bt.height;
+ format->field = V4L2_FIELD_NONE;
+}
+
+static int adv7511_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ memset(&format->format, 0, sizeof(format->format));
+ adv7511_fill_format(state, &format->format);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ format->format.code = fmt->code;
+ format->format.colorspace = fmt->colorspace;
+ format->format.ycbcr_enc = fmt->ycbcr_enc;
+ format->format.quantization = fmt->quantization;
+ format->format.xfer_func = fmt->xfer_func;
+ } else {
+ format->format.code = state->fmt_code;
+ format->format.colorspace = state->colorspace;
+ format->format.ycbcr_enc = state->ycbcr_enc;
+ format->format.quantization = state->quantization;
+ format->format.xfer_func = state->xfer_func;
+ }
+
+ return 0;
+}
+
+static int adv7511_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ /*
+ * Bitfield namings come the CEA-861-F standard, table 8 "Auxiliary
+ * Video Information (AVI) InfoFrame Format"
+ *
+ * c = Colorimetry
+ * ec = Extended Colorimetry
+ * y = RGB or YCbCr
+ * q = RGB Quantization Range
+ * yq = YCC Quantization Range
+ */
+ u8 c = HDMI_COLORIMETRY_NONE;
+ u8 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ u8 y = HDMI_COLORSPACE_RGB;
+ u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ u8 itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
+ u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
+
+ if (format->pad != 0)
+ return -EINVAL;
+ switch (format->format.code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adv7511_fill_format(state, &format->format);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ fmt->code = format->format.code;
+ fmt->colorspace = format->format.colorspace;
+ fmt->ycbcr_enc = format->format.ycbcr_enc;
+ fmt->quantization = format->format.quantization;
+ fmt->xfer_func = format->format.xfer_func;
+ return 0;
+ }
+
+ switch (format->format.code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
+ adv7511_wr_and_or(sd, 0x16, 0x03, 0xb8);
+ y = HDMI_COLORSPACE_YUV422;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
+ adv7511_wr_and_or(sd, 0x16, 0x03, 0xbc);
+ y = HDMI_COLORSPACE_YUV422;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ default:
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x00);
+ adv7511_wr_and_or(sd, 0x16, 0x03, 0x00);
+ break;
+ }
+ state->fmt_code = format->format.code;
+ state->colorspace = format->format.colorspace;
+ state->ycbcr_enc = format->format.ycbcr_enc;
+ state->quantization = format->format.quantization;
+ state->xfer_func = format->format.xfer_func;
+
+ switch (format->format.colorspace) {
+ case V4L2_COLORSPACE_OPRGB:
+ c = HDMI_COLORIMETRY_EXTENDED;
+ ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 :
+ HDMI_EXTENDED_COLORIMETRY_OPRGB;
+ break;
+ case V4L2_COLORSPACE_SMPTE170M:
+ c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
+ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV601) {
+ c = HDMI_COLORIMETRY_EXTENDED;
+ ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ }
+ break;
+ case V4L2_COLORSPACE_REC709:
+ c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE;
+ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV709) {
+ c = HDMI_COLORIMETRY_EXTENDED;
+ ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
+ }
+ break;
+ case V4L2_COLORSPACE_SRGB:
+ c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE;
+ ec = y ? HDMI_EXTENDED_COLORIMETRY_S_YCC_601 :
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+ break;
+ case V4L2_COLORSPACE_BT2020:
+ c = HDMI_COLORIMETRY_EXTENDED;
+ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
+ ec = 5; /* Not yet available in hdmi.h */
+ else
+ ec = 6; /* Not yet available in hdmi.h */
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * CEA-861-F says that for RGB formats the YCC range must match the
+ * RGB range, although sources should ignore the YCC range.
+ *
+ * The RGB quantization range shouldn't be non-zero if the EDID doesn't
+ * have the Q bit set in the Video Capabilities Data Block, however this
+ * isn't checked at the moment. The assumption is that the application
+ * knows the EDID and can detect this.
+ *
+ * The same is true for the YCC quantization range: non-standard YCC
+ * quantization ranges should only be sent if the EDID has the YQ bit
+ * set in the Video Capabilities Data Block.
+ */
+ switch (format->format.quantization) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
+ HDMI_QUANTIZATION_RANGE_FULL;
+ yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
+ break;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ break;
+ }
+
+ adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
+ adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
+ adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
+ adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
+ adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
+ adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
+ adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
+ .get_edid = adv7511_get_edid,
+ .enum_mbus_code = adv7511_enum_mbus_code,
+ .get_fmt = adv7511_get_fmt,
+ .set_fmt = adv7511_set_fmt,
+ .enum_dv_timings = adv7511_enum_dv_timings,
+ .dv_timings_cap = adv7511_dv_timings_cap,
+};
+
+/* --------------------- SUBDEV OPS --------------------------------------- */
+
+static const struct v4l2_subdev_ops adv7511_ops = {
+ .core = &adv7511_core_ops,
+ .pad = &adv7511_pad_ops,
+ .video = &adv7511_video_ops,
+ .audio = &adv7511_audio_ops,
+};
+
+/* ----------------------------------------------------------------------- */
+static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, u8 *buf)
+{
+ if (debug >= lvl) {
+ int i, j;
+ v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment);
+ for (i = 0; i < 256; i += 16) {
+ u8 b[128];
+ u8 *bp = b;
+ if (i == 128)
+ v4l2_dbg(lvl, debug, sd, "\n");
+ for (j = i; j < i + 16; j++) {
+ sprintf(bp, "0x%02x, ", buf[j]);
+ bp += 6;
+ }
+ bp[0] = '\0';
+ v4l2_dbg(lvl, debug, sd, "%s\n", b);
+ }
+ }
+}
+
+static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_edid_detect ed;
+
+ /* We failed to read the EDID, so send an event for this. */
+ ed.present = false;
+ ed.segment = adv7511_rd(sd, 0xc4);
+ ed.phys_addr = CEC_PHYS_ADDR_INVALID;
+ cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
+ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
+}
+
+static void adv7511_edid_handler(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
+ struct v4l2_subdev *sd = &state->sd;
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ if (adv7511_check_edid_status(sd)) {
+ /* Return if we received the EDID. */
+ return;
+ }
+
+ if (adv7511_have_hotplug(sd)) {
+ /* We must retry reading the EDID several times, it is possible
+ * that initially the EDID couldn't be read due to i2c errors
+ * (DVI connectors are particularly prone to this problem). */
+ if (state->edid.read_retries) {
+ state->edid.read_retries--;
+ v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
+ state->have_monitor = false;
+ adv7511_s_power(sd, false);
+ adv7511_s_power(sd, true);
+ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+ return;
+ }
+ }
+
+ /* We failed to read the EDID, so send an event for this. */
+ adv7511_notify_no_edid(sd);
+ v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
+}
+
+static void adv7511_audio_setup(struct v4l2_subdev *sd)
+{
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ adv7511_s_i2s_clock_freq(sd, 48000);
+ adv7511_s_clock_freq(sd, 48000);
+ adv7511_s_routing(sd, 0, 0, 0);
+}
+
+/* Configure hdmi transmitter. */
+static void adv7511_setup(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ /* Input format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
+ /* Output format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
+ /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
+ adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
+ /* Disable pixel repetition */
+ adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
+ /* Disable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+ /* Output format: RGB 4:4:4, Active Format Information is valid,
+ * underscanned */
+ adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
+ /* AVI Info frame packet enable, Audio Info frame disable */
+ adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
+ /* Colorimetry, Active format aspect ratio: same as picure. */
+ adv7511_wr(sd, 0x56, 0xa8);
+ /* No encryption */
+ adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
+
+ /* Positive clk edge capture for input video clock */
+ adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+
+ adv7511_audio_setup(sd);
+
+ v4l2_ctrl_handler_setup(&state->hdl);
+}
+
+static void adv7511_notify_monitor_detect(struct v4l2_subdev *sd)
+{
+ struct adv7511_monitor_detect mdt;
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ mdt.present = state->have_monitor;
+ v4l2_subdev_notify(sd, ADV7511_MONITOR_DETECT, (void *)&mdt);
+}
+
+static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ /* read hotplug and rx-sense state */
+ u8 status = adv7511_rd(sd, 0x42);
+
+ v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
+ __func__,
+ status,
+ status & MASK_ADV7511_HPD_DETECT ? ", hotplug" : "",
+ status & MASK_ADV7511_MSEN_DETECT ? ", rx-sense" : "");
+
+ /* update read only ctrls */
+ v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
+ v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
+
+ if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
+ v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
+ if (!state->have_monitor) {
+ v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
+ state->have_monitor = true;
+ adv7511_set_isr(sd, true);
+ if (!adv7511_s_power(sd, true)) {
+ v4l2_dbg(1, debug, sd, "%s: monitor detected, powerup failed\n", __func__);
+ return;
+ }
+ adv7511_setup(sd);
+ adv7511_notify_monitor_detect(sd);
+ state->edid.read_retries = EDID_MAX_RETRIES;
+ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+ }
+ } else if (status & MASK_ADV7511_HPD_DETECT) {
+ v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
+ state->edid.read_retries = EDID_MAX_RETRIES;
+ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+ } else if (!(status & MASK_ADV7511_HPD_DETECT)) {
+ v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
+ if (state->have_monitor) {
+ v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
+ state->have_monitor = false;
+ adv7511_notify_monitor_detect(sd);
+ }
+ adv7511_s_power(sd, false);
+ memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
+ adv7511_notify_no_edid(sd);
+ }
+}
+
+static bool edid_block_verify_crc(u8 *edid_block)
+{
+ u8 sum = 0;
+ int i;
+
+ for (i = 0; i < 128; i++)
+ sum += edid_block[i];
+ return sum == 0;
+}
+
+static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ u32 blocks = state->edid.blocks;
+ u8 *data = state->edid.data;
+
+ if (!edid_block_verify_crc(&data[segment * 256]))
+ return false;
+ if ((segment + 1) * 2 <= blocks)
+ return edid_block_verify_crc(&data[segment * 256 + 128]);
+ return true;
+}
+
+static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
+{
+ static const u8 hdmi_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+ };
+ struct adv7511_state *state = get_adv7511_state(sd);
+ u8 *data = state->edid.data;
+
+ if (segment != 0)
+ return true;
+ return !memcmp(data, hdmi_header, sizeof(hdmi_header));
+}
+
+static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ u8 edidRdy = adv7511_rd(sd, 0xc5);
+
+ v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
+ __func__, EDID_MAX_RETRIES - state->edid.read_retries);
+
+ if (state->edid.complete)
+ return true;
+
+ if (edidRdy & MASK_ADV7511_EDID_RDY) {
+ int segment = adv7511_rd(sd, 0xc4);
+ struct adv7511_edid_detect ed;
+
+ if (segment >= EDID_MAX_SEGM) {
+ v4l2_err(sd, "edid segment number too big\n");
+ return false;
+ }
+ v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
+ adv7511_edid_rd(sd, 256, &state->edid.data[segment * 256]);
+ adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
+ if (segment == 0) {
+ state->edid.blocks = state->edid.data[0x7e] + 1;
+ v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
+ }
+ if (!edid_verify_crc(sd, segment) ||
+ !edid_verify_header(sd, segment)) {
+ /* edid crc error, force reread of edid segment */
+ v4l2_err(sd, "%s: edid crc or header error\n", __func__);
+ state->have_monitor = false;
+ adv7511_s_power(sd, false);
+ adv7511_s_power(sd, true);
+ return false;
+ }
+ /* one more segment read ok */
+ state->edid.segments = segment + 1;
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
+ if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+ /* Request next EDID segment */
+ v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
+ adv7511_wr(sd, 0xc9, 0xf);
+ adv7511_wr(sd, 0xc4, state->edid.segments);
+ state->edid.read_retries = EDID_MAX_RETRIES;
+ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+ return false;
+ }
+
+ v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments);
+ state->edid.complete = true;
+ ed.phys_addr = cec_get_edid_phys_addr(state->edid.data,
+ state->edid.segments * 256,
+ NULL);
+ /* report when we have all segments
+ but report only for segment 0
+ */
+ ed.present = true;
+ ed.segment = 0;
+ state->edid_detect_counter++;
+ cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
+ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ return ed.present;
+ }
+
+ return false;
+}
+
+static int adv7511_registered(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int err;
+
+ err = cec_register_adapter(state->cec_adap, &client->dev);
+ if (err)
+ cec_delete_adapter(state->cec_adap);
+ return err;
+}
+
+static void adv7511_unregistered(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ cec_unregister_adapter(state->cec_adap);
+}
+
+static const struct v4l2_subdev_internal_ops adv7511_int_ops = {
+ .registered = adv7511_registered,
+ .unregistered = adv7511_unregistered,
+};
+
+/* ----------------------------------------------------------------------- */
+/* Setup ADV7511 */
+static void adv7511_init_setup(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_state_edid *edid = &state->edid;
+ u32 cec_clk = state->pdata.cec_clk;
+ u8 ratio;
+
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ /* clear all interrupts */
+ adv7511_wr(sd, 0x96, 0xff);
+ adv7511_wr(sd, 0x97, 0xff);
+ /*
+ * Stop HPD from resetting a lot of registers.
+ * It might leave the chip in a partly un-initialized state,
+ * in particular with regards to hotplug bounces.
+ */
+ adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
+ memset(edid, 0, sizeof(struct adv7511_state_edid));
+ state->have_monitor = false;
+ adv7511_set_isr(sd, false);
+ adv7511_s_stream(sd, false);
+ adv7511_s_audio_stream(sd, false);
+
+ if (state->i2c_cec == NULL)
+ return;
+
+ v4l2_dbg(1, debug, sd, "%s: cec_clk %d\n", __func__, cec_clk);
+
+ /* cec soft reset */
+ adv7511_cec_write(sd, 0x50, 0x01);
+ adv7511_cec_write(sd, 0x50, 0x00);
+
+ /* legacy mode */
+ adv7511_cec_write(sd, 0x4a, 0x00);
+ adv7511_cec_write(sd, 0x4a, 0x07);
+
+ if (cec_clk % 750000 != 0)
+ v4l2_err(sd, "%s: cec_clk %d, not multiple of 750 Khz\n",
+ __func__, cec_clk);
+
+ ratio = (cec_clk / 750000) - 1;
+ adv7511_cec_write(sd, 0x4e, ratio << 2);
+}
+
+static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct adv7511_state *state;
+ struct adv7511_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_subdev *sd;
+ u8 chip_id[2];
+ int err = -EIO;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ state = devm_kzalloc(&client->dev, sizeof(struct adv7511_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ /* Platform data */
+ if (!pdata) {
+ v4l_err(client, "No platform data!\n");
+ return -ENODEV;
+ }
+ memcpy(&state->pdata, pdata, sizeof(state->pdata));
+ state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
+ state->colorspace = V4L2_COLORSPACE_SRGB;
+
+ sd = &state->sd;
+
+ v4l2_dbg(1, debug, sd, "detecting adv7511 client on address 0x%x\n",
+ client->addr << 1);
+
+ v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
+ sd->internal_ops = &adv7511_int_ops;
+
+ hdl = &state->hdl;
+ v4l2_ctrl_handler_init(hdl, 10);
+ /* add in ascending ID order */
+ state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+ V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
+ 0, V4L2_DV_TX_MODE_DVI_D);
+ state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0);
+ state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0);
+ state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0);
+ state->rgb_quantization_range_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+ V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+ 0, V4L2_DV_RGB_RANGE_AUTO);
+ state->content_type_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+ V4L2_CID_DV_TX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
+ 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+ sd->ctrl_handler = hdl;
+ if (hdl->error) {
+ err = hdl->error;
+ goto err_hdl;
+ }
+ state->pad.flags = MEDIA_PAD_FL_SINK;
+ sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
+ err = media_entity_pads_init(&sd->entity, 1, &state->pad);
+ if (err)
+ goto err_hdl;
+
+ /* EDID and CEC i2c addr */
+ state->i2c_edid_addr = state->pdata.i2c_edid << 1;
+ state->i2c_cec_addr = state->pdata.i2c_cec << 1;
+ state->i2c_pktmem_addr = state->pdata.i2c_pktmem << 1;
+
+ state->chip_revision = adv7511_rd(sd, 0x0);
+ chip_id[0] = adv7511_rd(sd, 0xf5);
+ chip_id[1] = adv7511_rd(sd, 0xf6);
+ if (chip_id[0] != 0x75 || chip_id[1] != 0x11) {
+ v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0],
+ chip_id[1]);
+ err = -EIO;
+ goto err_entity;
+ }
+
+ state->i2c_edid = i2c_new_dummy(client->adapter,
+ state->i2c_edid_addr >> 1);
+ if (state->i2c_edid == NULL) {
+ v4l2_err(sd, "failed to register edid i2c client\n");
+ err = -ENOMEM;
+ goto err_entity;
+ }
+
+ adv7511_wr(sd, 0xe1, state->i2c_cec_addr);
+ if (state->pdata.cec_clk < 3000000 ||
+ state->pdata.cec_clk > 100000000) {
+ v4l2_err(sd, "%s: cec_clk %u outside range, disabling cec\n",
+ __func__, state->pdata.cec_clk);
+ state->pdata.cec_clk = 0;
+ }
+
+ if (state->pdata.cec_clk) {
+ state->i2c_cec = i2c_new_dummy(client->adapter,
+ state->i2c_cec_addr >> 1);
+ if (state->i2c_cec == NULL) {
+ v4l2_err(sd, "failed to register cec i2c client\n");
+ err = -ENOMEM;
+ goto err_unreg_edid;
+ }
+ adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
+ } else {
+ adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
+ }
+
+ state->i2c_pktmem = i2c_new_dummy(client->adapter, state->i2c_pktmem_addr >> 1);
+ if (state->i2c_pktmem == NULL) {
+ v4l2_err(sd, "failed to register pktmem i2c client\n");
+ err = -ENOMEM;
+ goto err_unreg_cec;
+ }
+
+ state->work_queue = create_singlethread_workqueue(sd->name);
+ if (state->work_queue == NULL) {
+ v4l2_err(sd, "could not create workqueue\n");
+ err = -ENOMEM;
+ goto err_unreg_pktmem;
+ }
+
+ INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
+
+ adv7511_init_setup(sd);
+
+#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+ state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
+ state, dev_name(&client->dev), CEC_CAP_DEFAULTS,
+ ADV7511_MAX_ADDRS);
+ err = PTR_ERR_OR_ZERO(state->cec_adap);
+ if (err) {
+ destroy_workqueue(state->work_queue);
+ goto err_unreg_pktmem;
+ }
+#endif
+
+ adv7511_set_isr(sd, true);
+ adv7511_check_monitor_present_status(sd);
+
+ v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+ client->addr << 1, client->adapter->name);
+ return 0;
+
+err_unreg_pktmem:
+ i2c_unregister_device(state->i2c_pktmem);
+err_unreg_cec:
+ if (state->i2c_cec)
+ i2c_unregister_device(state->i2c_cec);
+err_unreg_edid:
+ i2c_unregister_device(state->i2c_edid);
+err_entity:
+ media_entity_cleanup(&sd->entity);
+err_hdl:
+ v4l2_ctrl_handler_free(&state->hdl);
+ return err;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int adv7511_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ state->chip_revision = -1;
+
+ v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
+ client->addr << 1, client->adapter->name);
+
+ adv7511_set_isr(sd, false);
+ adv7511_init_setup(sd);
+ cancel_delayed_work(&state->edid_handler);
+ i2c_unregister_device(state->i2c_edid);
+ if (state->i2c_cec)
+ i2c_unregister_device(state->i2c_cec);
+ i2c_unregister_device(state->i2c_pktmem);
+ destroy_workqueue(state->work_queue);
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static const struct i2c_device_id adv7511_id[] = {
+ { "adv7511", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_id);
+
+static struct i2c_driver adv7511_driver = {
+ .driver = {
+ .name = "adv7511",
+ },
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ .id_table = adv7511_id,
+};
+
+module_i2c_driver(adv7511_driver);
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
deleted file mode 100644
index cec5ebb1c9e6..000000000000
--- a/drivers/media/i2c/adv7511.c
+++ /dev/null
@@ -1,1992 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Analog Devices ADV7511 HDMI Transmitter Device Driver
- *
- * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/videodev2.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
-#include <linux/hdmi.h>
-#include <linux/v4l2-dv-timings.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-dv-timings.h>
-#include <media/i2c/adv7511.h>
-#include <media/cec.h>
-
-static int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-2)");
-
-MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
-MODULE_AUTHOR("Hans Verkuil");
-MODULE_LICENSE("GPL v2");
-
-#define MASK_ADV7511_EDID_RDY_INT 0x04
-#define MASK_ADV7511_MSEN_INT 0x40
-#define MASK_ADV7511_HPD_INT 0x80
-
-#define MASK_ADV7511_HPD_DETECT 0x40
-#define MASK_ADV7511_MSEN_DETECT 0x20
-#define MASK_ADV7511_EDID_RDY 0x10
-
-#define EDID_MAX_RETRIES (8)
-#define EDID_DELAY 250
-#define EDID_MAX_SEGM 8
-
-#define ADV7511_MAX_WIDTH 1920
-#define ADV7511_MAX_HEIGHT 1200
-#define ADV7511_MIN_PIXELCLOCK 20000000
-#define ADV7511_MAX_PIXELCLOCK 225000000
-
-#define ADV7511_MAX_ADDRS (3)
-
-/*
-**********************************************************************
-*
-* Arrays with configuration parameters for the ADV7511
-*
-**********************************************************************
-*/
-
-struct i2c_reg_value {
- unsigned char reg;
- unsigned char value;
-};
-
-struct adv7511_state_edid {
- /* total number of blocks */
- u32 blocks;
- /* Number of segments read */
- u32 segments;
- u8 data[EDID_MAX_SEGM * 256];
- /* Number of EDID read retries left */
- unsigned read_retries;
- bool complete;
-};
-
-struct adv7511_state {
- struct adv7511_platform_data pdata;
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct v4l2_ctrl_handler hdl;
- int chip_revision;
- u8 i2c_edid_addr;
- u8 i2c_pktmem_addr;
- u8 i2c_cec_addr;
-
- struct i2c_client *i2c_cec;
- struct cec_adapter *cec_adap;
- u8 cec_addr[ADV7511_MAX_ADDRS];
- u8 cec_valid_addrs;
- bool cec_enabled_adap;
-
- /* Is the adv7511 powered on? */
- bool power_on;
- /* Did we receive hotplug and rx-sense signals? */
- bool have_monitor;
- bool enabled_irq;
- /* timings from s_dv_timings */
- struct v4l2_dv_timings dv_timings;
- u32 fmt_code;
- u32 colorspace;
- u32 ycbcr_enc;
- u32 quantization;
- u32 xfer_func;
- u32 content_type;
- /* controls */
- struct v4l2_ctrl *hdmi_mode_ctrl;
- struct v4l2_ctrl *hotplug_ctrl;
- struct v4l2_ctrl *rx_sense_ctrl;
- struct v4l2_ctrl *have_edid0_ctrl;
- struct v4l2_ctrl *rgb_quantization_range_ctrl;
- struct v4l2_ctrl *content_type_ctrl;
- struct i2c_client *i2c_edid;
- struct i2c_client *i2c_pktmem;
- struct adv7511_state_edid edid;
- /* Running counter of the number of detected EDIDs (for debugging) */
- unsigned edid_detect_counter;
- struct workqueue_struct *work_queue;
- struct delayed_work edid_handler; /* work entry */
-};
-
-static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd);
-static bool adv7511_check_edid_status(struct v4l2_subdev *sd);
-static void adv7511_setup(struct v4l2_subdev *sd);
-static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq);
-static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
-
-
-static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
- .type = V4L2_DV_BT_656_1120,
- /* keep this initialization for compatibility with GCC < 4.4.6 */
- .reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
- ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
- V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
- V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
- V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
- V4L2_DV_BT_CAP_CUSTOM)
-};
-
-static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct adv7511_state, sd);
-}
-
-static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
-{
- return &container_of(ctrl->handler, struct adv7511_state, hdl)->sd;
-}
-
-/* ------------------------ I2C ----------------------------------------------- */
-
-static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
- u8 command, bool check)
-{
- union i2c_smbus_data data;
-
- if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- I2C_SMBUS_READ, command,
- I2C_SMBUS_BYTE_DATA, &data))
- return data.byte;
- if (check)
- v4l_err(client, "error reading %02x, %02x\n",
- client->addr, command);
- return -1;
-}
-
-static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
-{
- int i;
- for (i = 0; i < 3; i++) {
- int ret = adv_smbus_read_byte_data_check(client, command, true);
- if (ret >= 0) {
- if (i)
- v4l_err(client, "read ok after %d retries\n", i);
- return ret;
- }
- }
- v4l_err(client, "read failed\n");
- return -1;
-}
-
-static int adv7511_rd(struct v4l2_subdev *sd, u8 reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return adv_smbus_read_byte_data(client, reg);
-}
-
-static int adv7511_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- int i;
-
- for (i = 0; i < 3; i++) {
- ret = i2c_smbus_write_byte_data(client, reg, val);
- if (ret == 0)
- return 0;
- }
- v4l2_err(sd, "%s: i2c write error\n", __func__);
- return ret;
-}
-
-/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
- and then the value-mask (to be OR-ed). */
-static inline void adv7511_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
-{
- adv7511_wr(sd, reg, (adv7511_rd(sd, reg) & clr_mask) | val_mask);
-}
-
-static int adv_smbus_read_i2c_block_data(struct i2c_client *client,
- u8 command, unsigned length, u8 *values)
-{
- union i2c_smbus_data data;
- int ret;
-
- if (length > I2C_SMBUS_BLOCK_MAX)
- length = I2C_SMBUS_BLOCK_MAX;
- data.block[0] = length;
-
- ret = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- I2C_SMBUS_READ, command,
- I2C_SMBUS_I2C_BLOCK_DATA, &data);
- memcpy(values, data.block + 1, length);
- return ret;
-}
-
-static void adv7511_edid_rd(struct v4l2_subdev *sd, uint16_t len, uint8_t *buf)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- int i;
- int err = 0;
-
- v4l2_dbg(1, debug, sd, "%s:\n", __func__);
-
- for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
- err = adv_smbus_read_i2c_block_data(state->i2c_edid, i,
- I2C_SMBUS_BLOCK_MAX, buf + i);
- if (err)
- v4l2_err(sd, "%s: i2c read error\n", __func__);
-}
-
-static inline int adv7511_cec_read(struct v4l2_subdev *sd, u8 reg)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- return i2c_smbus_read_byte_data(state->i2c_cec, reg);
-}
-
-static int adv7511_cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- int ret;
- int i;
-
- for (i = 0; i < 3; i++) {
- ret = i2c_smbus_write_byte_data(state->i2c_cec, reg, val);
- if (ret == 0)
- return 0;
- }
- v4l2_err(sd, "%s: I2C Write Problem\n", __func__);
- return ret;
-}
-
-static inline int adv7511_cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask,
- u8 val)
-{
- return adv7511_cec_write(sd, reg, (adv7511_cec_read(sd, reg) & mask) | val);
-}
-
-static int adv7511_pktmem_rd(struct v4l2_subdev *sd, u8 reg)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- return adv_smbus_read_byte_data(state->i2c_pktmem, reg);
-}
-
-static int adv7511_pktmem_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- int ret;
- int i;
-
- for (i = 0; i < 3; i++) {
- ret = i2c_smbus_write_byte_data(state->i2c_pktmem, reg, val);
- if (ret == 0)
- return 0;
- }
- v4l2_err(sd, "%s: i2c write error\n", __func__);
- return ret;
-}
-
-/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
- and then the value-mask (to be OR-ed). */
-static inline void adv7511_pktmem_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
-{
- adv7511_pktmem_wr(sd, reg, (adv7511_pktmem_rd(sd, reg) & clr_mask) | val_mask);
-}
-
-static inline bool adv7511_have_hotplug(struct v4l2_subdev *sd)
-{
- return adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT;
-}
-
-static inline bool adv7511_have_rx_sense(struct v4l2_subdev *sd)
-{
- return adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT;
-}
-
-static void adv7511_csc_conversion_mode(struct v4l2_subdev *sd, u8 mode)
-{
- adv7511_wr_and_or(sd, 0x18, 0x9f, (mode & 0x3)<<5);
-}
-
-static void adv7511_csc_coeff(struct v4l2_subdev *sd,
- u16 A1, u16 A2, u16 A3, u16 A4,
- u16 B1, u16 B2, u16 B3, u16 B4,
- u16 C1, u16 C2, u16 C3, u16 C4)
-{
- /* A */
- adv7511_wr_and_or(sd, 0x18, 0xe0, A1>>8);
- adv7511_wr(sd, 0x19, A1);
- adv7511_wr_and_or(sd, 0x1A, 0xe0, A2>>8);
- adv7511_wr(sd, 0x1B, A2);
- adv7511_wr_and_or(sd, 0x1c, 0xe0, A3>>8);
- adv7511_wr(sd, 0x1d, A3);
- adv7511_wr_and_or(sd, 0x1e, 0xe0, A4>>8);
- adv7511_wr(sd, 0x1f, A4);
-
- /* B */
- adv7511_wr_and_or(sd, 0x20, 0xe0, B1>>8);
- adv7511_wr(sd, 0x21, B1);
- adv7511_wr_and_or(sd, 0x22, 0xe0, B2>>8);
- adv7511_wr(sd, 0x23, B2);
- adv7511_wr_and_or(sd, 0x24, 0xe0, B3>>8);
- adv7511_wr(sd, 0x25, B3);
- adv7511_wr_and_or(sd, 0x26, 0xe0, B4>>8);
- adv7511_wr(sd, 0x27, B4);
-
- /* C */
- adv7511_wr_and_or(sd, 0x28, 0xe0, C1>>8);
- adv7511_wr(sd, 0x29, C1);
- adv7511_wr_and_or(sd, 0x2A, 0xe0, C2>>8);
- adv7511_wr(sd, 0x2B, C2);
- adv7511_wr_and_or(sd, 0x2C, 0xe0, C3>>8);
- adv7511_wr(sd, 0x2D, C3);
- adv7511_wr_and_or(sd, 0x2E, 0xe0, C4>>8);
- adv7511_wr(sd, 0x2F, C4);
-}
-
-static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
-{
- if (enable) {
- u8 csc_mode = 0;
- adv7511_csc_conversion_mode(sd, csc_mode);
- adv7511_csc_coeff(sd,
- 4096-564, 0, 0, 256,
- 0, 4096-564, 0, 256,
- 0, 0, 4096-564, 256);
- /* enable CSC */
- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
- /* AVI infoframe: Limited range RGB (16-235) */
- adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
- } else {
- /* disable CSC */
- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
- /* AVI infoframe: Full range RGB (0-255) */
- adv7511_wr_and_or(sd, 0x57, 0xf3, 0x08);
- }
-}
-
-static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- /* Only makes sense for RGB formats */
- if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
- /* so just keep quantization */
- adv7511_csc_rgb_full2limit(sd, false);
- return;
- }
-
- switch (ctrl->val) {
- case V4L2_DV_RGB_RANGE_AUTO:
- /* automatic */
- if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
- /* CE format, RGB limited range (16-235) */
- adv7511_csc_rgb_full2limit(sd, true);
- } else {
- /* not CE format, RGB full range (0-255) */
- adv7511_csc_rgb_full2limit(sd, false);
- }
- break;
- case V4L2_DV_RGB_RANGE_LIMITED:
- /* RGB limited range (16-235) */
- adv7511_csc_rgb_full2limit(sd, true);
- break;
- case V4L2_DV_RGB_RANGE_FULL:
- /* RGB full range (0-255) */
- adv7511_csc_rgb_full2limit(sd, false);
- break;
- }
-}
-
-/* ------------------------------ CTRL OPS ------------------------------ */
-
-static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_subdev *sd = to_sd(ctrl);
- struct adv7511_state *state = get_adv7511_state(sd);
-
- v4l2_dbg(1, debug, sd, "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
-
- if (state->hdmi_mode_ctrl == ctrl) {
- /* Set HDMI or DVI-D */
- adv7511_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
- return 0;
- }
- if (state->rgb_quantization_range_ctrl == ctrl) {
- adv7511_set_rgb_quantization_mode(sd, ctrl);
- return 0;
- }
- if (state->content_type_ctrl == ctrl) {
- u8 itc, cn;
-
- state->content_type = ctrl->val;
- itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
- cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
- adv7511_wr_and_or(sd, 0x57, 0x7f, itc << 7);
- adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4);
- return 0;
- }
-
- return -EINVAL;
-}
-
-static const struct v4l2_ctrl_ops adv7511_ctrl_ops = {
- .s_ctrl = adv7511_s_ctrl,
-};
-
-/* ---------------------------- CORE OPS ------------------------------------------- */
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static void adv7511_inv_register(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- v4l2_info(sd, "0x000-0x0ff: Main Map\n");
- if (state->i2c_cec)
- v4l2_info(sd, "0x100-0x1ff: CEC Map\n");
-}
-
-static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- reg->size = 1;
- switch (reg->reg >> 8) {
- case 0:
- reg->val = adv7511_rd(sd, reg->reg & 0xff);
- break;
- case 1:
- if (state->i2c_cec) {
- reg->val = adv7511_cec_read(sd, reg->reg & 0xff);
- break;
- }
- /* fall through */
- default:
- v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
- adv7511_inv_register(sd);
- break;
- }
- return 0;
-}
-
-static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- switch (reg->reg >> 8) {
- case 0:
- adv7511_wr(sd, reg->reg & 0xff, reg->val & 0xff);
- break;
- case 1:
- if (state->i2c_cec) {
- adv7511_cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
- break;
- }
- /* fall through */
- default:
- v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
- adv7511_inv_register(sd);
- break;
- }
- return 0;
-}
-#endif
-
-struct adv7511_cfg_read_infoframe {
- const char *desc;
- u8 present_reg;
- u8 present_mask;
- u8 header[3];
- u16 payload_addr;
-};
-
-static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
-{
- u8 csum = 0;
- size_t i;
-
- /* compute checksum */
- for (i = 0; i < size; i++)
- csum += ptr[i];
-
- return 256 - csum;
-}
-
-static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_infoframe *cri)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct device *dev = &client->dev;
- union hdmi_infoframe frame;
- u8 buffer[32];
- u8 len;
- int i;
-
- if (!(adv7511_rd(sd, cri->present_reg) & cri->present_mask)) {
- v4l2_info(sd, "%s infoframe not transmitted\n", cri->desc);
- return;
- }
-
- memcpy(buffer, cri->header, sizeof(cri->header));
-
- len = buffer[2];
-
- if (len + 4 > sizeof(buffer)) {
- v4l2_err(sd, "%s: invalid %s infoframe length %d\n", __func__, cri->desc, len);
- return;
- }
-
- if (cri->payload_addr >= 0x100) {
- for (i = 0; i < len; i++)
- buffer[i + 4] = adv7511_pktmem_rd(sd, cri->payload_addr + i - 0x100);
- } else {
- for (i = 0; i < len; i++)
- buffer[i + 4] = adv7511_rd(sd, cri->payload_addr + i);
- }
- buffer[3] = 0;
- buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
-
- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
- v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
- return;
- }
-
- hdmi_infoframe_log(KERN_INFO, dev, &frame);
-}
-
-static void adv7511_log_infoframes(struct v4l2_subdev *sd)
-{
- static const struct adv7511_cfg_read_infoframe cri[] = {
- { "AVI", 0x44, 0x10, { 0x82, 2, 13 }, 0x55 },
- { "Audio", 0x44, 0x08, { 0x84, 1, 10 }, 0x73 },
- { "SDP", 0x40, 0x40, { 0x83, 1, 25 }, 0x103 },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cri); i++)
- log_infoframe(sd, &cri[i]);
-}
-
-static int adv7511_log_status(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- struct adv7511_state_edid *edid = &state->edid;
- int i;
-
- static const char * const states[] = {
- "in reset",
- "reading EDID",
- "idle",
- "initializing HDCP",
- "HDCP enabled",
- "initializing HDCP repeater",
- "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"
- };
- static const char * const errors[] = {
- "no error",
- "bad receiver BKSV",
- "Ri mismatch",
- "Pj mismatch",
- "i2c error",
- "timed out",
- "max repeater cascade exceeded",
- "hash check failed",
- "too many devices",
- "9", "A", "B", "C", "D", "E", "F"
- };
-
- v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
- v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
- (adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT) ? "detected" : "no",
- (adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT) ? "detected" : "no",
- edid->segments ? "found" : "no",
- edid->blocks);
- v4l2_info(sd, "%s output %s\n",
- (adv7511_rd(sd, 0xaf) & 0x02) ?
- "HDMI" : "DVI-D",
- (adv7511_rd(sd, 0xa1) & 0x3c) ?
- "disabled" : "enabled");
- v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
- states[adv7511_rd(sd, 0xc8) & 0xf],
- errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
- adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
- v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
- if (adv7511_rd(sd, 0xaf) & 0x02) {
- /* HDMI only */
- u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
- u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
- adv7511_rd(sd, 0x02) << 8 |
- adv7511_rd(sd, 0x03);
- u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
- u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
- u32 CTS;
-
- if (manual_cts)
- CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
- adv7511_rd(sd, 0x08) << 8 |
- adv7511_rd(sd, 0x09);
- else
- CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
- adv7511_rd(sd, 0x05) << 8 |
- adv7511_rd(sd, 0x06);
- v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
- manual_cts ? "manual" : "automatic", N, CTS);
- v4l2_info(sd, "VIC: detected %d, sent %d\n",
- vic_detect, vic_sent);
- adv7511_log_infoframes(sd);
- }
- if (state->dv_timings.type == V4L2_DV_BT_656_1120)
- v4l2_print_dv_timings(sd->name, "timings: ",
- &state->dv_timings, false);
- else
- v4l2_info(sd, "no timings set\n");
- v4l2_info(sd, "i2c edid addr: 0x%x\n", state->i2c_edid_addr);
-
- if (state->i2c_cec == NULL)
- return 0;
-
- v4l2_info(sd, "i2c cec addr: 0x%x\n", state->i2c_cec_addr);
-
- v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
- "enabled" : "disabled");
- if (state->cec_enabled_adap) {
- for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
- bool is_valid = state->cec_valid_addrs & (1 << i);
-
- if (is_valid)
- v4l2_info(sd, "CEC Logical Address: 0x%x\n",
- state->cec_addr[i]);
- }
- }
- v4l2_info(sd, "i2c pktmem addr: 0x%x\n", state->i2c_pktmem_addr);
- return 0;
-}
-
-/* Power up/down adv7511 */
-static int adv7511_s_power(struct v4l2_subdev *sd, int on)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- const int retries = 20;
- int i;
-
- v4l2_dbg(1, debug, sd, "%s: power %s\n", __func__, on ? "on" : "off");
-
- state->power_on = on;
-
- if (!on) {
- /* Power down */
- adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
- return true;
- }
-
- /* Power up */
- /* The adv7511 does not always come up immediately.
- Retry multiple times. */
- for (i = 0; i < retries; i++) {
- adv7511_wr_and_or(sd, 0x41, 0xbf, 0x0);
- if ((adv7511_rd(sd, 0x41) & 0x40) == 0)
- break;
- adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
- msleep(10);
- }
- if (i == retries) {
- v4l2_dbg(1, debug, sd, "%s: failed to powerup the adv7511!\n", __func__);
- adv7511_s_power(sd, 0);
- return false;
- }
- if (i > 1)
- v4l2_dbg(1, debug, sd, "%s: needed %d retries to powerup the adv7511\n", __func__, i);
-
- /* Reserved registers that must be set */
- adv7511_wr(sd, 0x98, 0x03);
- adv7511_wr_and_or(sd, 0x9a, 0xfe, 0x70);
- adv7511_wr(sd, 0x9c, 0x30);
- adv7511_wr_and_or(sd, 0x9d, 0xfc, 0x01);
- adv7511_wr(sd, 0xa2, 0xa4);
- adv7511_wr(sd, 0xa3, 0xa4);
- adv7511_wr(sd, 0xe0, 0xd0);
- adv7511_wr(sd, 0xf9, 0x00);
-
- adv7511_wr(sd, 0x43, state->i2c_edid_addr);
- adv7511_wr(sd, 0x45, state->i2c_pktmem_addr);
-
- /* Set number of attempts to read the EDID */
- adv7511_wr(sd, 0xc9, 0xf);
- return true;
-}
-
-#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
-static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
-{
- struct adv7511_state *state = cec_get_drvdata(adap);
- struct v4l2_subdev *sd = &state->sd;
-
- if (state->i2c_cec == NULL)
- return -EIO;
-
- if (!state->cec_enabled_adap && enable) {
- /* power up cec section */
- adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x01);
- /* legacy mode and clear all rx buffers */
- adv7511_cec_write(sd, 0x4a, 0x00);
- adv7511_cec_write(sd, 0x4a, 0x07);
- adv7511_cec_write_and_or(sd, 0x11, 0xfe, 0); /* initially disable tx */
- /* enabled irqs: */
- /* tx: ready */
- /* tx: arbitration lost */
- /* tx: retry timeout */
- /* rx: ready 1 */
- if (state->enabled_irq)
- adv7511_wr_and_or(sd, 0x95, 0xc0, 0x39);
- } else if (state->cec_enabled_adap && !enable) {
- if (state->enabled_irq)
- adv7511_wr_and_or(sd, 0x95, 0xc0, 0x00);
- /* disable address mask 1-3 */
- adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0x00);
- /* power down cec section */
- adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x00);
- state->cec_valid_addrs = 0;
- }
- state->cec_enabled_adap = enable;
- return 0;
-}
-
-static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
-{
- struct adv7511_state *state = cec_get_drvdata(adap);
- struct v4l2_subdev *sd = &state->sd;
- unsigned int i, free_idx = ADV7511_MAX_ADDRS;
-
- if (!state->cec_enabled_adap)
- return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
-
- if (addr == CEC_LOG_ADDR_INVALID) {
- adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0);
- state->cec_valid_addrs = 0;
- return 0;
- }
-
- for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
- bool is_valid = state->cec_valid_addrs & (1 << i);
-
- if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
- free_idx = i;
- if (is_valid && state->cec_addr[i] == addr)
- return 0;
- }
- if (i == ADV7511_MAX_ADDRS) {
- i = free_idx;
- if (i == ADV7511_MAX_ADDRS)
- return -ENXIO;
- }
- state->cec_addr[i] = addr;
- state->cec_valid_addrs |= 1 << i;
-
- switch (i) {
- case 0:
- /* enable address mask 0 */
- adv7511_cec_write_and_or(sd, 0x4b, 0xef, 0x10);
- /* set address for mask 0 */
- adv7511_cec_write_and_or(sd, 0x4c, 0xf0, addr);
- break;
- case 1:
- /* enable address mask 1 */
- adv7511_cec_write_and_or(sd, 0x4b, 0xdf, 0x20);
- /* set address for mask 1 */
- adv7511_cec_write_and_or(sd, 0x4c, 0x0f, addr << 4);
- break;
- case 2:
- /* enable address mask 2 */
- adv7511_cec_write_and_or(sd, 0x4b, 0xbf, 0x40);
- /* set address for mask 1 */
- adv7511_cec_write_and_or(sd, 0x4d, 0xf0, addr);
- break;
- }
- return 0;
-}
-
-static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
-{
- struct adv7511_state *state = cec_get_drvdata(adap);
- struct v4l2_subdev *sd = &state->sd;
- u8 len = msg->len;
- unsigned int i;
-
- v4l2_dbg(1, debug, sd, "%s: len %d\n", __func__, len);
-
- if (len > 16) {
- v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
- return -EINVAL;
- }
-
- /*
- * The number of retries is the number of attempts - 1, but retry
- * at least once. It's not clear if a value of 0 is allowed, so
- * let's do at least one retry.
- */
- adv7511_cec_write_and_or(sd, 0x12, ~0x70, max(1, attempts - 1) << 4);
-
- /* clear cec tx irq status */
- adv7511_wr(sd, 0x97, 0x38);
-
- /* write data */
- for (i = 0; i < len; i++)
- adv7511_cec_write(sd, i, msg->msg[i]);
-
- /* set length (data + header) */
- adv7511_cec_write(sd, 0x10, len);
- /* start transmit, enable tx */
- adv7511_cec_write(sd, 0x11, 0x01);
- return 0;
-}
-
-static void adv_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- if ((adv7511_cec_read(sd, 0x11) & 0x01) == 0) {
- v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
- return;
- }
-
- if (tx_raw_status & 0x10) {
- v4l2_dbg(1, debug, sd,
- "%s: tx raw: arbitration lost\n", __func__);
- cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
- 1, 0, 0, 0);
- return;
- }
- if (tx_raw_status & 0x08) {
- u8 status;
- u8 nack_cnt;
- u8 low_drive_cnt;
-
- v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
- /*
- * We set this status bit since this hardware performs
- * retransmissions.
- */
- status = CEC_TX_STATUS_MAX_RETRIES;
- nack_cnt = adv7511_cec_read(sd, 0x14) & 0xf;
- if (nack_cnt)
- status |= CEC_TX_STATUS_NACK;
- low_drive_cnt = adv7511_cec_read(sd, 0x14) >> 4;
- if (low_drive_cnt)
- status |= CEC_TX_STATUS_LOW_DRIVE;
- cec_transmit_done(state->cec_adap, status,
- 0, nack_cnt, low_drive_cnt, 0);
- return;
- }
- if (tx_raw_status & 0x20) {
- v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
- cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
- return;
- }
-}
-
-static const struct cec_adap_ops adv7511_cec_adap_ops = {
- .adap_enable = adv7511_cec_adap_enable,
- .adap_log_addr = adv7511_cec_adap_log_addr,
- .adap_transmit = adv7511_cec_adap_transmit,
-};
-#endif
-
-/* Enable interrupts */
-static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- u8 irqs = MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT;
- u8 irqs_rd;
- int retries = 100;
-
- v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ? "enable" : "disable");
-
- if (state->enabled_irq == enable)
- return;
- state->enabled_irq = enable;
-
- /* The datasheet says that the EDID ready interrupt should be
- disabled if there is no hotplug. */
- if (!enable)
- irqs = 0;
- else if (adv7511_have_hotplug(sd))
- irqs |= MASK_ADV7511_EDID_RDY_INT;
-
- /*
- * This i2c write can fail (approx. 1 in 1000 writes). But it
- * is essential that this register is correct, so retry it
- * multiple times.
- *
- * Note that the i2c write does not report an error, but the readback
- * clearly shows the wrong value.
- */
- do {
- adv7511_wr(sd, 0x94, irqs);
- irqs_rd = adv7511_rd(sd, 0x94);
- } while (retries-- && irqs_rd != irqs);
-
- if (irqs_rd != irqs)
- v4l2_err(sd, "Could not set interrupts: hw failure?\n");
-
- adv7511_wr_and_or(sd, 0x95, 0xc0,
- (state->cec_enabled_adap && enable) ? 0x39 : 0x00);
-}
-
-/* Interrupt handler */
-static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
-{
- u8 irq_status;
- u8 cec_irq;
-
- /* disable interrupts to prevent a race condition */
- adv7511_set_isr(sd, false);
- irq_status = adv7511_rd(sd, 0x96);
- cec_irq = adv7511_rd(sd, 0x97);
- /* clear detected interrupts */
- adv7511_wr(sd, 0x96, irq_status);
- adv7511_wr(sd, 0x97, cec_irq);
-
- v4l2_dbg(1, debug, sd, "%s: irq 0x%x, cec-irq 0x%x\n", __func__,
- irq_status, cec_irq);
-
- if (irq_status & (MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT))
- adv7511_check_monitor_present_status(sd);
- if (irq_status & MASK_ADV7511_EDID_RDY_INT)
- adv7511_check_edid_status(sd);
-
-#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
- if (cec_irq & 0x38)
- adv_cec_tx_raw_status(sd, cec_irq);
-
- if (cec_irq & 1) {
- struct adv7511_state *state = get_adv7511_state(sd);
- struct cec_msg msg;
-
- msg.len = adv7511_cec_read(sd, 0x25) & 0x1f;
-
- v4l2_dbg(1, debug, sd, "%s: cec msg len %d\n", __func__,
- msg.len);
-
- if (msg.len > 16)
- msg.len = 16;
-
- if (msg.len) {
- u8 i;
-
- for (i = 0; i < msg.len; i++)
- msg.msg[i] = adv7511_cec_read(sd, i + 0x15);
-
- adv7511_cec_write(sd, 0x4a, 0); /* toggle to re-enable rx 1 */
- adv7511_cec_write(sd, 0x4a, 1);
- cec_received_msg(state->cec_adap, &msg);
- }
- }
-#endif
-
- /* enable interrupts */
- adv7511_set_isr(sd, true);
-
- if (handled)
- *handled = true;
- return 0;
-}
-
-static const struct v4l2_subdev_core_ops adv7511_core_ops = {
- .log_status = adv7511_log_status,
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = adv7511_g_register,
- .s_register = adv7511_s_register,
-#endif
- .s_power = adv7511_s_power,
- .interrupt_service_routine = adv7511_isr,
-};
-
-/* ------------------------------ VIDEO OPS ------------------------------ */
-
-/* Enable/disable adv7511 output */
-static int adv7511_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
- adv7511_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
- if (enable) {
- adv7511_check_monitor_present_status(sd);
- } else {
- adv7511_s_power(sd, 0);
- state->have_monitor = false;
- }
- return 0;
-}
-
-static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- struct v4l2_bt_timings *bt = &timings->bt;
- u32 fps;
-
- v4l2_dbg(1, debug, sd, "%s:\n", __func__);
-
- /* quick sanity check */
- if (!v4l2_valid_dv_timings(timings, &adv7511_timings_cap, NULL, NULL))
- return -EINVAL;
-
- /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
- if the format is one of the CEA or DMT timings. */
- v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
-
- /* save timings */
- state->dv_timings = *timings;
-
- /* set h/vsync polarities */
- adv7511_wr_and_or(sd, 0x17, 0x9f,
- ((bt->polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
- ((bt->polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
-
- fps = (u32)bt->pixelclock / (V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt));
- switch (fps) {
- case 24:
- adv7511_wr_and_or(sd, 0xfb, 0xf9, 1 << 1);
- break;
- case 25:
- adv7511_wr_and_or(sd, 0xfb, 0xf9, 2 << 1);
- break;
- case 30:
- adv7511_wr_and_or(sd, 0xfb, 0xf9, 3 << 1);
- break;
- default:
- adv7511_wr_and_or(sd, 0xfb, 0xf9, 0);
- break;
- }
-
- /* update quantization range based on new dv_timings */
- adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
-
- return 0;
-}
-
-static int adv7511_g_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- v4l2_dbg(1, debug, sd, "%s:\n", __func__);
-
- if (!timings)
- return -EINVAL;
-
- *timings = state->dv_timings;
-
- return 0;
-}
-
-static int adv7511_enum_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_enum_dv_timings *timings)
-{
- if (timings->pad != 0)
- return -EINVAL;
-
- return v4l2_enum_dv_timings_cap(timings, &adv7511_timings_cap, NULL, NULL);
-}
-
-static int adv7511_dv_timings_cap(struct v4l2_subdev *sd,
- struct v4l2_dv_timings_cap *cap)
-{
- if (cap->pad != 0)
- return -EINVAL;
-
- *cap = adv7511_timings_cap;
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops adv7511_video_ops = {
- .s_stream = adv7511_s_stream,
- .s_dv_timings = adv7511_s_dv_timings,
- .g_dv_timings = adv7511_g_dv_timings,
-};
-
-/* ------------------------------ AUDIO OPS ------------------------------ */
-static int adv7511_s_audio_stream(struct v4l2_subdev *sd, int enable)
-{
- v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
-
- if (enable)
- adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x80);
- else
- adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x40);
-
- return 0;
-}
-
-static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
-{
- u32 N;
-
- switch (freq) {
- case 32000: N = 4096; break;
- case 44100: N = 6272; break;
- case 48000: N = 6144; break;
- case 88200: N = 12544; break;
- case 96000: N = 12288; break;
- case 176400: N = 25088; break;
- case 192000: N = 24576; break;
- default:
- return -EINVAL;
- }
-
- /* Set N (used with CTS to regenerate the audio clock) */
- adv7511_wr(sd, 0x01, (N >> 16) & 0xf);
- adv7511_wr(sd, 0x02, (N >> 8) & 0xff);
- adv7511_wr(sd, 0x03, N & 0xff);
-
- return 0;
-}
-
-static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
-{
- u32 i2s_sf;
-
- switch (freq) {
- case 32000: i2s_sf = 0x30; break;
- case 44100: i2s_sf = 0x00; break;
- case 48000: i2s_sf = 0x20; break;
- case 88200: i2s_sf = 0x80; break;
- case 96000: i2s_sf = 0xa0; break;
- case 176400: i2s_sf = 0xc0; break;
- case 192000: i2s_sf = 0xe0; break;
- default:
- return -EINVAL;
- }
-
- /* Set sampling frequency for I2S audio to 48 kHz */
- adv7511_wr_and_or(sd, 0x15, 0xf, i2s_sf);
-
- return 0;
-}
-
-static int adv7511_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config)
-{
- /* Only 2 channels in use for application */
- adv7511_wr_and_or(sd, 0x73, 0xf8, 0x1);
- /* Speaker mapping */
- adv7511_wr(sd, 0x76, 0x00);
-
- /* 16 bit audio word length */
- adv7511_wr_and_or(sd, 0x14, 0xf0, 0x02);
-
- return 0;
-}
-
-static const struct v4l2_subdev_audio_ops adv7511_audio_ops = {
- .s_stream = adv7511_s_audio_stream,
- .s_clock_freq = adv7511_s_clock_freq,
- .s_i2s_clock_freq = adv7511_s_i2s_clock_freq,
- .s_routing = adv7511_s_routing,
-};
-
-/* ---------------------------- PAD OPS ------------------------------------- */
-
-static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- memset(edid->reserved, 0, sizeof(edid->reserved));
-
- if (edid->pad != 0)
- return -EINVAL;
-
- if (edid->start_block == 0 && edid->blocks == 0) {
- edid->blocks = state->edid.segments * 2;
- return 0;
- }
-
- if (state->edid.segments == 0)
- return -ENODATA;
-
- if (edid->start_block >= state->edid.segments * 2)
- return -EINVAL;
-
- if (edid->start_block + edid->blocks > state->edid.segments * 2)
- edid->blocks = state->edid.segments * 2 - edid->start_block;
-
- memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
- 128 * edid->blocks);
-
- return 0;
-}
-
-static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad != 0)
- return -EINVAL;
-
- switch (code->index) {
- case 0:
- code->code = MEDIA_BUS_FMT_RGB888_1X24;
- break;
- case 1:
- code->code = MEDIA_BUS_FMT_YUYV8_1X16;
- break;
- case 2:
- code->code = MEDIA_BUS_FMT_UYVY8_1X16;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void adv7511_fill_format(struct adv7511_state *state,
- struct v4l2_mbus_framefmt *format)
-{
- format->width = state->dv_timings.bt.width;
- format->height = state->dv_timings.bt.height;
- format->field = V4L2_FIELD_NONE;
-}
-
-static int adv7511_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- if (format->pad != 0)
- return -EINVAL;
-
- memset(&format->format, 0, sizeof(format->format));
- adv7511_fill_format(state, &format->format);
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *fmt;
-
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
- format->format.code = fmt->code;
- format->format.colorspace = fmt->colorspace;
- format->format.ycbcr_enc = fmt->ycbcr_enc;
- format->format.quantization = fmt->quantization;
- format->format.xfer_func = fmt->xfer_func;
- } else {
- format->format.code = state->fmt_code;
- format->format.colorspace = state->colorspace;
- format->format.ycbcr_enc = state->ycbcr_enc;
- format->format.quantization = state->quantization;
- format->format.xfer_func = state->xfer_func;
- }
-
- return 0;
-}
-
-static int adv7511_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- /*
- * Bitfield namings come the CEA-861-F standard, table 8 "Auxiliary
- * Video Information (AVI) InfoFrame Format"
- *
- * c = Colorimetry
- * ec = Extended Colorimetry
- * y = RGB or YCbCr
- * q = RGB Quantization Range
- * yq = YCC Quantization Range
- */
- u8 c = HDMI_COLORIMETRY_NONE;
- u8 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
- u8 y = HDMI_COLORSPACE_RGB;
- u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
- u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
- u8 itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
- u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
-
- if (format->pad != 0)
- return -EINVAL;
- switch (format->format.code) {
- case MEDIA_BUS_FMT_UYVY8_1X16:
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_RGB888_1X24:
- break;
- default:
- return -EINVAL;
- }
-
- adv7511_fill_format(state, &format->format);
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *fmt;
-
- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
- fmt->code = format->format.code;
- fmt->colorspace = format->format.colorspace;
- fmt->ycbcr_enc = format->format.ycbcr_enc;
- fmt->quantization = format->format.quantization;
- fmt->xfer_func = format->format.xfer_func;
- return 0;
- }
-
- switch (format->format.code) {
- case MEDIA_BUS_FMT_UYVY8_1X16:
- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
- adv7511_wr_and_or(sd, 0x16, 0x03, 0xb8);
- y = HDMI_COLORSPACE_YUV422;
- break;
- case MEDIA_BUS_FMT_YUYV8_1X16:
- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
- adv7511_wr_and_or(sd, 0x16, 0x03, 0xbc);
- y = HDMI_COLORSPACE_YUV422;
- break;
- case MEDIA_BUS_FMT_RGB888_1X24:
- default:
- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x00);
- adv7511_wr_and_or(sd, 0x16, 0x03, 0x00);
- break;
- }
- state->fmt_code = format->format.code;
- state->colorspace = format->format.colorspace;
- state->ycbcr_enc = format->format.ycbcr_enc;
- state->quantization = format->format.quantization;
- state->xfer_func = format->format.xfer_func;
-
- switch (format->format.colorspace) {
- case V4L2_COLORSPACE_OPRGB:
- c = HDMI_COLORIMETRY_EXTENDED;
- ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 :
- HDMI_EXTENDED_COLORIMETRY_OPRGB;
- break;
- case V4L2_COLORSPACE_SMPTE170M:
- c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
- if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV601) {
- c = HDMI_COLORIMETRY_EXTENDED;
- ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
- }
- break;
- case V4L2_COLORSPACE_REC709:
- c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE;
- if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV709) {
- c = HDMI_COLORIMETRY_EXTENDED;
- ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
- }
- break;
- case V4L2_COLORSPACE_SRGB:
- c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE;
- ec = y ? HDMI_EXTENDED_COLORIMETRY_S_YCC_601 :
- HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
- break;
- case V4L2_COLORSPACE_BT2020:
- c = HDMI_COLORIMETRY_EXTENDED;
- if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
- ec = 5; /* Not yet available in hdmi.h */
- else
- ec = 6; /* Not yet available in hdmi.h */
- break;
- default:
- break;
- }
-
- /*
- * CEA-861-F says that for RGB formats the YCC range must match the
- * RGB range, although sources should ignore the YCC range.
- *
- * The RGB quantization range shouldn't be non-zero if the EDID doesn't
- * have the Q bit set in the Video Capabilities Data Block, however this
- * isn't checked at the moment. The assumption is that the application
- * knows the EDID and can detect this.
- *
- * The same is true for the YCC quantization range: non-standard YCC
- * quantization ranges should only be sent if the EDID has the YQ bit
- * set in the Video Capabilities Data Block.
- */
- switch (format->format.quantization) {
- case V4L2_QUANTIZATION_FULL_RANGE:
- q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
- HDMI_QUANTIZATION_RANGE_FULL;
- yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
- break;
- case V4L2_QUANTIZATION_LIM_RANGE:
- q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
- HDMI_QUANTIZATION_RANGE_LIMITED;
- yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
- break;
- }
-
- adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
- adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
- adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
- adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
- adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
- adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
- adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
-
- return 0;
-}
-
-static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
- .get_edid = adv7511_get_edid,
- .enum_mbus_code = adv7511_enum_mbus_code,
- .get_fmt = adv7511_get_fmt,
- .set_fmt = adv7511_set_fmt,
- .enum_dv_timings = adv7511_enum_dv_timings,
- .dv_timings_cap = adv7511_dv_timings_cap,
-};
-
-/* --------------------- SUBDEV OPS --------------------------------------- */
-
-static const struct v4l2_subdev_ops adv7511_ops = {
- .core = &adv7511_core_ops,
- .pad = &adv7511_pad_ops,
- .video = &adv7511_video_ops,
- .audio = &adv7511_audio_ops,
-};
-
-/* ----------------------------------------------------------------------- */
-static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, u8 *buf)
-{
- if (debug >= lvl) {
- int i, j;
- v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment);
- for (i = 0; i < 256; i += 16) {
- u8 b[128];
- u8 *bp = b;
- if (i == 128)
- v4l2_dbg(lvl, debug, sd, "\n");
- for (j = i; j < i + 16; j++) {
- sprintf(bp, "0x%02x, ", buf[j]);
- bp += 6;
- }
- bp[0] = '\0';
- v4l2_dbg(lvl, debug, sd, "%s\n", b);
- }
- }
-}
-
-static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- struct adv7511_edid_detect ed;
-
- /* We failed to read the EDID, so send an event for this. */
- ed.present = false;
- ed.segment = adv7511_rd(sd, 0xc4);
- ed.phys_addr = CEC_PHYS_ADDR_INVALID;
- cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
-}
-
-static void adv7511_edid_handler(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
- struct v4l2_subdev *sd = &state->sd;
-
- v4l2_dbg(1, debug, sd, "%s:\n", __func__);
-
- if (adv7511_check_edid_status(sd)) {
- /* Return if we received the EDID. */
- return;
- }
-
- if (adv7511_have_hotplug(sd)) {
- /* We must retry reading the EDID several times, it is possible
- * that initially the EDID couldn't be read due to i2c errors
- * (DVI connectors are particularly prone to this problem). */
- if (state->edid.read_retries) {
- state->edid.read_retries--;
- v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
- state->have_monitor = false;
- adv7511_s_power(sd, false);
- adv7511_s_power(sd, true);
- queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
- return;
- }
- }
-
- /* We failed to read the EDID, so send an event for this. */
- adv7511_notify_no_edid(sd);
- v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
-}
-
-static void adv7511_audio_setup(struct v4l2_subdev *sd)
-{
- v4l2_dbg(1, debug, sd, "%s\n", __func__);
-
- adv7511_s_i2s_clock_freq(sd, 48000);
- adv7511_s_clock_freq(sd, 48000);
- adv7511_s_routing(sd, 0, 0, 0);
-}
-
-/* Configure hdmi transmitter. */
-static void adv7511_setup(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- v4l2_dbg(1, debug, sd, "%s\n", __func__);
-
- /* Input format: RGB 4:4:4 */
- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
- /* Output format: RGB 4:4:4 */
- adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
- /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
- adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
- /* Disable pixel repetition */
- adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
- /* Disable CSC */
- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
- /* Output format: RGB 4:4:4, Active Format Information is valid,
- * underscanned */
- adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
- /* AVI Info frame packet enable, Audio Info frame disable */
- adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
- /* Colorimetry, Active format aspect ratio: same as picure. */
- adv7511_wr(sd, 0x56, 0xa8);
- /* No encryption */
- adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
-
- /* Positive clk edge capture for input video clock */
- adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
-
- adv7511_audio_setup(sd);
-
- v4l2_ctrl_handler_setup(&state->hdl);
-}
-
-static void adv7511_notify_monitor_detect(struct v4l2_subdev *sd)
-{
- struct adv7511_monitor_detect mdt;
- struct adv7511_state *state = get_adv7511_state(sd);
-
- mdt.present = state->have_monitor;
- v4l2_subdev_notify(sd, ADV7511_MONITOR_DETECT, (void *)&mdt);
-}
-
-static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- /* read hotplug and rx-sense state */
- u8 status = adv7511_rd(sd, 0x42);
-
- v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
- __func__,
- status,
- status & MASK_ADV7511_HPD_DETECT ? ", hotplug" : "",
- status & MASK_ADV7511_MSEN_DETECT ? ", rx-sense" : "");
-
- /* update read only ctrls */
- v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
- v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
-
- if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
- v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
- if (!state->have_monitor) {
- v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
- state->have_monitor = true;
- adv7511_set_isr(sd, true);
- if (!adv7511_s_power(sd, true)) {
- v4l2_dbg(1, debug, sd, "%s: monitor detected, powerup failed\n", __func__);
- return;
- }
- adv7511_setup(sd);
- adv7511_notify_monitor_detect(sd);
- state->edid.read_retries = EDID_MAX_RETRIES;
- queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
- }
- } else if (status & MASK_ADV7511_HPD_DETECT) {
- v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
- state->edid.read_retries = EDID_MAX_RETRIES;
- queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
- } else if (!(status & MASK_ADV7511_HPD_DETECT)) {
- v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
- if (state->have_monitor) {
- v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
- state->have_monitor = false;
- adv7511_notify_monitor_detect(sd);
- }
- adv7511_s_power(sd, false);
- memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
- adv7511_notify_no_edid(sd);
- }
-}
-
-static bool edid_block_verify_crc(u8 *edid_block)
-{
- u8 sum = 0;
- int i;
-
- for (i = 0; i < 128; i++)
- sum += edid_block[i];
- return sum == 0;
-}
-
-static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- u32 blocks = state->edid.blocks;
- u8 *data = state->edid.data;
-
- if (!edid_block_verify_crc(&data[segment * 256]))
- return false;
- if ((segment + 1) * 2 <= blocks)
- return edid_block_verify_crc(&data[segment * 256 + 128]);
- return true;
-}
-
-static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
-{
- static const u8 hdmi_header[] = {
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
- };
- struct adv7511_state *state = get_adv7511_state(sd);
- u8 *data = state->edid.data;
-
- if (segment != 0)
- return true;
- return !memcmp(data, hdmi_header, sizeof(hdmi_header));
-}
-
-static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- u8 edidRdy = adv7511_rd(sd, 0xc5);
-
- v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
- __func__, EDID_MAX_RETRIES - state->edid.read_retries);
-
- if (state->edid.complete)
- return true;
-
- if (edidRdy & MASK_ADV7511_EDID_RDY) {
- int segment = adv7511_rd(sd, 0xc4);
- struct adv7511_edid_detect ed;
-
- if (segment >= EDID_MAX_SEGM) {
- v4l2_err(sd, "edid segment number too big\n");
- return false;
- }
- v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
- adv7511_edid_rd(sd, 256, &state->edid.data[segment * 256]);
- adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
- if (segment == 0) {
- state->edid.blocks = state->edid.data[0x7e] + 1;
- v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
- }
- if (!edid_verify_crc(sd, segment) ||
- !edid_verify_header(sd, segment)) {
- /* edid crc error, force reread of edid segment */
- v4l2_err(sd, "%s: edid crc or header error\n", __func__);
- state->have_monitor = false;
- adv7511_s_power(sd, false);
- adv7511_s_power(sd, true);
- return false;
- }
- /* one more segment read ok */
- state->edid.segments = segment + 1;
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
- if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
- /* Request next EDID segment */
- v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
- adv7511_wr(sd, 0xc9, 0xf);
- adv7511_wr(sd, 0xc4, state->edid.segments);
- state->edid.read_retries = EDID_MAX_RETRIES;
- queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
- return false;
- }
-
- v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments);
- state->edid.complete = true;
- ed.phys_addr = cec_get_edid_phys_addr(state->edid.data,
- state->edid.segments * 256,
- NULL);
- /* report when we have all segments
- but report only for segment 0
- */
- ed.present = true;
- ed.segment = 0;
- state->edid_detect_counter++;
- cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
- return ed.present;
- }
-
- return false;
-}
-
-static int adv7511_registered(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int err;
-
- err = cec_register_adapter(state->cec_adap, &client->dev);
- if (err)
- cec_delete_adapter(state->cec_adap);
- return err;
-}
-
-static void adv7511_unregistered(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
-
- cec_unregister_adapter(state->cec_adap);
-}
-
-static const struct v4l2_subdev_internal_ops adv7511_int_ops = {
- .registered = adv7511_registered,
- .unregistered = adv7511_unregistered,
-};
-
-/* ----------------------------------------------------------------------- */
-/* Setup ADV7511 */
-static void adv7511_init_setup(struct v4l2_subdev *sd)
-{
- struct adv7511_state *state = get_adv7511_state(sd);
- struct adv7511_state_edid *edid = &state->edid;
- u32 cec_clk = state->pdata.cec_clk;
- u8 ratio;
-
- v4l2_dbg(1, debug, sd, "%s\n", __func__);
-
- /* clear all interrupts */
- adv7511_wr(sd, 0x96, 0xff);
- adv7511_wr(sd, 0x97, 0xff);
- /*
- * Stop HPD from resetting a lot of registers.
- * It might leave the chip in a partly un-initialized state,
- * in particular with regards to hotplug bounces.
- */
- adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
- memset(edid, 0, sizeof(struct adv7511_state_edid));
- state->have_monitor = false;
- adv7511_set_isr(sd, false);
- adv7511_s_stream(sd, false);
- adv7511_s_audio_stream(sd, false);
-
- if (state->i2c_cec == NULL)
- return;
-
- v4l2_dbg(1, debug, sd, "%s: cec_clk %d\n", __func__, cec_clk);
-
- /* cec soft reset */
- adv7511_cec_write(sd, 0x50, 0x01);
- adv7511_cec_write(sd, 0x50, 0x00);
-
- /* legacy mode */
- adv7511_cec_write(sd, 0x4a, 0x00);
- adv7511_cec_write(sd, 0x4a, 0x07);
-
- if (cec_clk % 750000 != 0)
- v4l2_err(sd, "%s: cec_clk %d, not multiple of 750 Khz\n",
- __func__, cec_clk);
-
- ratio = (cec_clk / 750000) - 1;
- adv7511_cec_write(sd, 0x4e, ratio << 2);
-}
-
-static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
-{
- struct adv7511_state *state;
- struct adv7511_platform_data *pdata = client->dev.platform_data;
- struct v4l2_ctrl_handler *hdl;
- struct v4l2_subdev *sd;
- u8 chip_id[2];
- int err = -EIO;
-
- /* Check if the adapter supports the needed features */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -EIO;
-
- state = devm_kzalloc(&client->dev, sizeof(struct adv7511_state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
- /* Platform data */
- if (!pdata) {
- v4l_err(client, "No platform data!\n");
- return -ENODEV;
- }
- memcpy(&state->pdata, pdata, sizeof(state->pdata));
- state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
- state->colorspace = V4L2_COLORSPACE_SRGB;
-
- sd = &state->sd;
-
- v4l2_dbg(1, debug, sd, "detecting adv7511 client on address 0x%x\n",
- client->addr << 1);
-
- v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
- sd->internal_ops = &adv7511_int_ops;
-
- hdl = &state->hdl;
- v4l2_ctrl_handler_init(hdl, 10);
- /* add in ascending ID order */
- state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
- V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
- 0, V4L2_DV_TX_MODE_DVI_D);
- state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL,
- V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0);
- state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL,
- V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0);
- state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL,
- V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0);
- state->rgb_quantization_range_ctrl =
- v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
- V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
- 0, V4L2_DV_RGB_RANGE_AUTO);
- state->content_type_ctrl =
- v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
- V4L2_CID_DV_TX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
- 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
- sd->ctrl_handler = hdl;
- if (hdl->error) {
- err = hdl->error;
- goto err_hdl;
- }
- state->pad.flags = MEDIA_PAD_FL_SINK;
- sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
- err = media_entity_pads_init(&sd->entity, 1, &state->pad);
- if (err)
- goto err_hdl;
-
- /* EDID and CEC i2c addr */
- state->i2c_edid_addr = state->pdata.i2c_edid << 1;
- state->i2c_cec_addr = state->pdata.i2c_cec << 1;
- state->i2c_pktmem_addr = state->pdata.i2c_pktmem << 1;
-
- state->chip_revision = adv7511_rd(sd, 0x0);
- chip_id[0] = adv7511_rd(sd, 0xf5);
- chip_id[1] = adv7511_rd(sd, 0xf6);
- if (chip_id[0] != 0x75 || chip_id[1] != 0x11) {
- v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0],
- chip_id[1]);
- err = -EIO;
- goto err_entity;
- }
-
- state->i2c_edid = i2c_new_dummy(client->adapter,
- state->i2c_edid_addr >> 1);
- if (state->i2c_edid == NULL) {
- v4l2_err(sd, "failed to register edid i2c client\n");
- err = -ENOMEM;
- goto err_entity;
- }
-
- adv7511_wr(sd, 0xe1, state->i2c_cec_addr);
- if (state->pdata.cec_clk < 3000000 ||
- state->pdata.cec_clk > 100000000) {
- v4l2_err(sd, "%s: cec_clk %u outside range, disabling cec\n",
- __func__, state->pdata.cec_clk);
- state->pdata.cec_clk = 0;
- }
-
- if (state->pdata.cec_clk) {
- state->i2c_cec = i2c_new_dummy(client->adapter,
- state->i2c_cec_addr >> 1);
- if (state->i2c_cec == NULL) {
- v4l2_err(sd, "failed to register cec i2c client\n");
- err = -ENOMEM;
- goto err_unreg_edid;
- }
- adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
- } else {
- adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
- }
-
- state->i2c_pktmem = i2c_new_dummy(client->adapter, state->i2c_pktmem_addr >> 1);
- if (state->i2c_pktmem == NULL) {
- v4l2_err(sd, "failed to register pktmem i2c client\n");
- err = -ENOMEM;
- goto err_unreg_cec;
- }
-
- state->work_queue = create_singlethread_workqueue(sd->name);
- if (state->work_queue == NULL) {
- v4l2_err(sd, "could not create workqueue\n");
- err = -ENOMEM;
- goto err_unreg_pktmem;
- }
-
- INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
-
- adv7511_init_setup(sd);
-
-#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
- state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
- state, dev_name(&client->dev), CEC_CAP_DEFAULTS,
- ADV7511_MAX_ADDRS);
- err = PTR_ERR_OR_ZERO(state->cec_adap);
- if (err) {
- destroy_workqueue(state->work_queue);
- goto err_unreg_pktmem;
- }
-#endif
-
- adv7511_set_isr(sd, true);
- adv7511_check_monitor_present_status(sd);
-
- v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
- client->addr << 1, client->adapter->name);
- return 0;
-
-err_unreg_pktmem:
- i2c_unregister_device(state->i2c_pktmem);
-err_unreg_cec:
- if (state->i2c_cec)
- i2c_unregister_device(state->i2c_cec);
-err_unreg_edid:
- i2c_unregister_device(state->i2c_edid);
-err_entity:
- media_entity_cleanup(&sd->entity);
-err_hdl:
- v4l2_ctrl_handler_free(&state->hdl);
- return err;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static int adv7511_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct adv7511_state *state = get_adv7511_state(sd);
-
- state->chip_revision = -1;
-
- v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
- client->addr << 1, client->adapter->name);
-
- adv7511_set_isr(sd, false);
- adv7511_init_setup(sd);
- cancel_delayed_work(&state->edid_handler);
- i2c_unregister_device(state->i2c_edid);
- if (state->i2c_cec)
- i2c_unregister_device(state->i2c_cec);
- i2c_unregister_device(state->i2c_pktmem);
- destroy_workqueue(state->work_queue);
- v4l2_device_unregister_subdev(sd);
- media_entity_cleanup(&sd->entity);
- v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static const struct i2c_device_id adv7511_id[] = {
- { "adv7511", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, adv7511_id);
-
-static struct i2c_driver adv7511_driver = {
- .driver = {
- .name = "adv7511",
- },
- .probe = adv7511_probe,
- .remove = adv7511_remove,
- .id_table = adv7511_id,
-};
-
-module_i2c_driver(adv7511_driver);
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 5168bb5880c4..3a543e435e61 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1248,9 +1248,11 @@ static int mt9m111_probe(struct i2c_client *client,
if (!mt9m111)
return -ENOMEM;

- ret = mt9m111_probe_fw(client, mt9m111);
- if (ret)
- return ret;
+ if (dev_fwnode(&client->dev)) {
+ ret = mt9m111_probe_fw(client, mt9m111);
+ if (ret)
+ return ret;
+ }

mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index dfece91ce96b..8207e7cf9923 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -761,7 +761,11 @@ static int ov7740_try_fmt_internal(struct v4l2_subdev *sd,

fsize++;
}
-
+ if (i >= ARRAY_SIZE(ov7740_framesizes)) {
+ fsize = &ov7740_framesizes[0];
+ fmt->width = fsize->width;
+ fmt->height = fsize->height;
+ }
if (ret_frmsize != NULL)
*ret_frmsize = fsize;

diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index b8ec88612df7..8e2a66493e62 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -502,6 +502,7 @@ static long media_device_enum_links32(struct media_device *mdev,
{
struct media_links_enum links;
compat_uptr_t pads_ptr, links_ptr;
+ int ret;

memset(&links, 0, sizeof(links));

@@ -513,7 +514,14 @@ static long media_device_enum_links32(struct media_device *mdev,
links.pads = compat_ptr(pads_ptr);
links.links = compat_ptr(links_ptr);

- return media_device_enum_links(mdev, &links);
+ ret = media_device_enum_links(mdev, &links);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(ulinks->reserved, links.reserved,
+ sizeof(ulinks->reserved)))
+ return -EFAULT;
+ return 0;
}

#define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32)
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 05f25c9bb308..f5ad3cf207d3 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1122,16 +1122,25 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
return 0;
}

+static struct proc_dir_entry *saa7164_pe;
+
static int saa7164_proc_create(void)
{
- struct proc_dir_entry *pe;
-
- pe = proc_create_single("saa7164", S_IRUGO, NULL, saa7164_proc_show);
- if (!pe)
+ saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
+ if (!saa7164_pe)
return -ENOMEM;

return 0;
}
+
+static void saa7164_proc_destroy(void)
+{
+ if (saa7164_pe)
+ remove_proc_entry("saa7164", NULL);
+}
+#else
+static int saa7164_proc_create(void) { return 0; }
+static void saa7164_proc_destroy(void) {}
#endif

static int saa7164_thread_function(void *data)
@@ -1503,19 +1512,21 @@ static struct pci_driver saa7164_pci_driver = {

static int __init saa7164_init(void)
{
- printk(KERN_INFO "saa7164 driver loaded\n");
+ int ret = pci_register_driver(&saa7164_pci_driver);
+
+ if (ret)
+ return ret;

-#ifdef CONFIG_PROC_FS
saa7164_proc_create();
-#endif
- return pci_register_driver(&saa7164_pci_driver);
+
+ pr_info("saa7164 driver loaded\n");
+
+ return 0;
}

static void __exit saa7164_fini(void)
{
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("saa7164", NULL);
-#endif
+ saa7164_proc_destroy();
pci_unregister_driver(&saa7164_pci_driver);
}

diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index 692e08ef38c0..668d8827e281 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -1600,8 +1600,9 @@ static int aspeed_video_init(struct aspeed_video *video)
return -ENODEV;
}

- rc = devm_request_irq(dev, irq, aspeed_video_irq, IRQF_SHARED,
- DEVICE_NAME, video);
+ rc = devm_request_threaded_irq(dev, irq, NULL, aspeed_video_irq,
+ IRQF_ONESHOT | IRQF_SHARED, DEVICE_NAME,
+ video);
if (rc < 0) {
dev_err(dev, "Unable to request IRQ %d\n", irq);
return rc;
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index eaa86737fa04..cc1920e68bc1 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1743,6 +1743,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
return ret;
}
+ ctx->sequence_offset = ~0U;
ctx->initialized = 1;

/* Update kfifo out pointer from coda bitstream read pointer */
@@ -2150,12 +2151,17 @@ static void coda_finish_decode(struct coda_ctx *ctx)
else if (ctx->display_idx < 0)
ctx->hold = true;
} else if (decoded_idx == -2) {
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames)
+ ctx->sequence_offset++;
/* no frame was decoded, we still return remaining buffers */
} else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
v4l2_err(&dev->v4l2_dev,
"decoded frame index out of range: %d\n", decoded_idx);
} else {
- val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
+ val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
+ if (ctx->sequence_offset == -1)
+ ctx->sequence_offset = val;
val -= ctx->sequence_offset;
spin_lock(&ctx->buffer_meta_lock);
if (!list_empty(&ctx->buffer_meta_list)) {
@@ -2308,7 +2314,6 @@ irqreturn_t coda_irq_handler(int irq, void *data)
if (ctx == NULL) {
v4l2_err(&dev->v4l2_dev,
"Instance released before the end of transaction\n");
- mutex_unlock(&dev->coda_mutex);
return IRQ_HANDLED;
}

diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index fa0b22fb7991..9bf2116ffc76 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1007,6 +1007,8 @@ static int coda_encoder_cmd(struct file *file, void *fh,
/* Set the stream-end flag on this context */
ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;

+ flush_work(&ctx->pic_run_work);
+
/* If there is no buffer in flight, wake up */
if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) {
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 6216b7ac6875..a20cb6fff2ec 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1384,6 +1384,14 @@ static int initialize_vpif(void)
return err;
}

+static inline void free_vpif_objs(void)
+{
+ int i;
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++)
+ kfree(vpif_obj.dev[i]);
+}
+
static int vpif_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
@@ -1653,7 +1661,7 @@ static __init int vpif_probe(struct platform_device *pdev)
err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
if (err) {
v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
- goto cleanup;
+ goto vpif_free;
}

while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
@@ -1700,7 +1708,9 @@ static __init int vpif_probe(struct platform_device *pdev)
"registered sub device %s\n",
subdevdata->name);
}
- vpif_probe_complete();
+ err = vpif_probe_complete();
+ if (err)
+ goto probe_subdev_out;
} else {
vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
@@ -1719,6 +1729,8 @@ static __init int vpif_probe(struct platform_device *pdev)
kfree(vpif_obj.sd);
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
+vpif_free:
+ free_vpif_objs();
cleanup:
v4l2_async_notifier_cleanup(&vpif_obj.notifier);

diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 19cf6853411e..89a86c19579b 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -518,6 +518,11 @@ static int __init vpss_init(void)
return -EBUSY;

oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ if (unlikely(!oper_cfg.vpss_regs_base2)) {
+ release_mem_region(VPSS_CLK_CTRL, 4);
+ return -ENOMEM;
+ }
+
writel(VPSS_CLK_CTRL_VENCCLKEN |
VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);

diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index f1b301810260..0a6411b877e9 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -200,7 +200,6 @@ struct mcam_vb_buffer {
struct list_head queue;
struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
dma_addr_t dma_desc_pa; /* Descriptor physical address */
- int dma_desc_nent; /* Number of mapped descriptors */
};

static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
@@ -608,9 +607,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
static void mcam_sg_next_buffer(struct mcam_camera *cam)
{
struct mcam_vb_buffer *buf;
+ struct sg_table *sg_table;

buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
list_del_init(&buf->queue);
+ sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
/*
* Very Bad Not Good Things happen if you don't clear
* C1_DESC_ENA before making any descriptor changes.
@@ -618,7 +619,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
mcam_reg_write(cam, REG_DESC_LEN_Y,
- buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+ sg_table->nents * sizeof(struct mcam_dma_desc));
mcam_reg_write(cam, REG_DESC_LEN_U, 0);
mcam_reg_write(cam, REG_DESC_LEN_V, 0);
mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 6cfa8021721e..f81449b400c4 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -87,11 +87,11 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,

ret = of_address_to_resource(node, 0, &r);
if (ret)
- return ret;
+ goto err_put_node;

ret = request_firmware(&mdt, fwname, dev);
if (ret < 0)
- return ret;
+ goto err_put_node;

fw_size = qcom_mdt_get_size(mdt);
if (fw_size < 0) {
@@ -125,6 +125,8 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
memunmap(mem_va);
err_release_fw:
release_firmware(mdt);
+err_put_node:
+ of_node_put(node);
return ret;
}

diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 6bda1eee9170..4f103be215d3 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -257,6 +257,8 @@ MODULE_PARM_DESC(debug, "activate debug info");
#define FD1_IP_H3_ES1 0x02010101
#define FD1_IP_M3W 0x02010202
#define FD1_IP_H3 0x02010203
+#define FD1_IP_M3N 0x02010204
+#define FD1_IP_E3 0x02010205

/* LUTs */
#define FD1_LUT_DIF_ADJ 0x1000
@@ -2365,6 +2367,12 @@ static int fdp1_probe(struct platform_device *pdev)
case FD1_IP_H3:
dprintk(fdp1, "FDP1 Version R-Car H3\n");
break;
+ case FD1_IP_M3N:
+ dprintk(fdp1, "FDP1 Version R-Car M3N\n");
+ break;
+ case FD1_IP_E3:
+ dprintk(fdp1, "FDP1 Version R-Car E3\n");
+ break;
default:
dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
hw_version);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 9a53d3908b52..2504fe9761bf 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -527,7 +527,8 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
dev);
ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
dev);
- ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
+ if (FW_HAS_E_MIN_SCRATCH_BUF(dev))
+ ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
get_min_scratch_buf_size, dev);
if (ctx->img_width == 0 || ctx->img_height == 0)
ctx->state = MFCINST_ERROR;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index eb85cedc5ef3..5e080f32b0e8 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -38,6 +38,11 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
for (i = 0; i < pm->num_clocks; i++) {
pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
if (IS_ERR(pm->clocks[i])) {
+ /* additional clocks are optional */
+ if (i && PTR_ERR(pm->clocks[i]) == -ENOENT) {
+ pm->clocks[i] = NULL;
+ continue;
+ }
mfc_err("Failed to get clock: %s\n",
pm->clk_names[i]);
return PTR_ERR(pm->clocks[i]);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index dd47821fc661..240327d2a3ad 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1355,7 +1355,7 @@ static int vim2m_probe(struct platform_device *pdev)
MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
- goto error_m2m;
+ goto error_dev;
}

ret = media_device_register(&dev->mdev);
@@ -1369,11 +1369,11 @@ static int vim2m_probe(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
error_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
-error_m2m:
- v4l2_m2m_release(dev->m2m_dev);
#endif
error_dev:
video_unregister_device(&dev->vfd);
+ /* vim2m_device_release called by video_unregister_device to release various objects */
+ return ret;
error_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
error_free:
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index ea869631a3f6..bbc16072ec16 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -130,12 +130,15 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vimc_cap_device *vcap = video_drvdata(file);
+ int ret;

/* Do not change the format while stream is on */
if (vb2_is_busy(&vcap->queue))
return -EBUSY;

- vimc_cap_try_fmt_vid_cap(file, priv, f);
+ ret = vimc_cap_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;

dev_dbg(vcap->dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index e25fd4d4d280..a1eaea19a81c 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -550,6 +550,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)

/* Register with V4L2 subsystem as RADIO device */
if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
+ v4l2_device_unregister(&fmdev->v4l2_dev);
fmerr("Could not register video device\n");
return -ENOMEM;
}
@@ -563,6 +564,8 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
if (ret < 0) {
fmerr("(fmdev): Can't init ctrl handler\n");
v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
+ video_unregister_device(fmdev->radio_dev);
+ v4l2_device_unregister(&fmdev->v4l2_dev);
return -EBUSY;
}

diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
index 66334e8d63ba..c58f2d38a458 100644
--- a/drivers/media/rc/ir-spi.c
+++ b/drivers/media/rc/ir-spi.c
@@ -161,6 +161,7 @@ static const struct of_device_id ir_spi_of_match[] = {
{ .compatible = "ir-spi-led" },
{},
};
+MODULE_DEVICE_TABLE(of, ir_spi_of_match);

static struct spi_driver ir_spi_driver = {
.probe = ir_spi_probe,
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 99951e02a880..dd063a736df5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -287,12 +287,15 @@ EXPORT_SYMBOL(dvb_usb_device_init);
void dvb_usb_device_exit(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- const char *name = "generic DVB-USB module";
+ const char *default_name = "generic DVB-USB module";
+ char name[40];

usb_set_intfdata(intf, NULL);
if (d != NULL && d->desc != NULL) {
- name = d->desc->name;
+ strscpy(name, d->desc->name, sizeof(name));
dvb_usb_exit(d);
+ } else {
+ strscpy(name, default_name, sizeof(name));
}
info("%s successfully deinitialized and disconnected.", name);

diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index e082086428a4..ae6609716347 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -439,7 +439,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
/* wait for the first buffer */
if (!(file->f_flags & O_NONBLOCK)) {
if (wait_event_interruptible(dev->wait_data,
- hdpvr_get_next_buffer(dev)))
+ !list_empty_careful(&dev->rec_buff_list)))
return -ERESTARTSYS;
}

@@ -465,10 +465,17 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
goto err;
}
if (!err) {
- v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
- "timeout: restart streaming\n");
+ v4l2_info(&dev->v4l2_dev,
+ "timeout: restart streaming\n");
+ mutex_lock(&dev->io_mutex);
hdpvr_stop_streaming(dev);
- msecs_to_jiffies(4000);
+ mutex_unlock(&dev->io_mutex);
+ /*
+ * The FW needs about 4 seconds after streaming
+ * stopped before it is ready to restart
+ * streaming.
+ */
+ msleep(4000);
err = hdpvr_start_streaming(dev);
if (err) {
ret = err;
@@ -1133,9 +1140,7 @@ static void hdpvr_device_release(struct video_device *vdev)
struct hdpvr_device *dev = video_get_drvdata(vdev);

hdpvr_delete(dev);
- mutex_lock(&dev->io_mutex);
flush_work(&dev->worker);
- mutex_unlock(&dev->io_mutex);

v4l2_device_unregister(&dev->v4l2_dev);
v4l2_ctrl_handler_free(&dev->hdl);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 14cff91b7aea..aa021498036a 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -2350,7 +2350,9 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev)
struct uvc_entity *entity;
unsigned int i;

- cancel_work_sync(&dev->async_ctrl.work);
+ /* Can be uninitialized if we are aborting on probe error. */
+ if (dev->async_ctrl.work.func)
+ cancel_work_sync(&dev->async_ctrl.work);

/* Free controls and control mappings for all entities. */
list_for_each_entry(entity, &dev->entities, list) {
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 96fee8d5b865..cd2bc9ed0cd9 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -703,7 +703,8 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv,
struct zr364xx_camera *cam = video_drvdata(file);

strscpy(cap->driver, DRIVER_DESC, sizeof(cap->driver));
- strscpy(cap->card, cam->udev->product, sizeof(cap->card));
+ if (cam->udev->product)
+ strscpy(cap->card, cam->udev->product, sizeof(cap->card));
strscpy(cap->bus_info, dev_name(&cam->udev->dev),
sizeof(cap->bus_info));
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 54d66dbc2a31..6b1469d11f58 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -2148,15 +2148,6 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
if (size_extra_req)
new_ref->p_req.p = &new_ref[1];

- if (ctrl->handler == hdl) {
- /* By default each control starts in a cluster of its own.
- new_ref->ctrl is basically a cluster array with one
- element, so that's perfect to use as the cluster pointer.
- But only do this for the handler that owns the control. */
- ctrl->cluster = &new_ref->ctrl;
- ctrl->ncontrols = 1;
- }
-
INIT_LIST_HEAD(&new_ref->node);

mutex_lock(hdl->lock);
@@ -2189,6 +2180,15 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
hdl->buckets[bucket] = new_ref;
if (ctrl_ref)
*ctrl_ref = new_ref;
+ if (ctrl->handler == hdl) {
+ /* By default each control starts in a cluster of its own.
+ * new_ref->ctrl is basically a cluster array with one
+ * element, so that's perfect to use as the cluster pointer.
+ * But only do this for the handler that owns the control.
+ */
+ ctrl->cluster = &new_ref->ctrl;
+ ctrl->ncontrols = 1;
+ }

unlock:
mutex_unlock(hdl->lock);
@@ -2365,16 +2365,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
&def, &flags);

- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
- cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU);
if (is_menu)
WARN_ON(step);
else
WARN_ON(cfg->menu_skip_mask);
- if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
+ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
qmenu = v4l2_ctrl_get_menu(cfg->id);
- else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
- qmenu_int == NULL) {
+ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index d6c9ebd8d263..278a4b679b8f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -584,11 +584,14 @@ static int msm_init_cm_dll(struct sdhci_host *host)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
int wait_cnt = 50;
- unsigned long flags;
+ unsigned long flags, xo_clk = 0;
u32 config;
const struct sdhci_msm_offset *msm_offset =
msm_host->offset;

+ if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
+ xo_clk = clk_get_rate(msm_host->xo_clk);
+
spin_lock_irqsave(&host->lock, flags);

/*
@@ -636,10 +639,10 @@ static int msm_init_cm_dll(struct sdhci_host *host)
config &= CORE_FLL_CYCLE_CNT;
if (config)
mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
- clk_get_rate(msm_host->xo_clk));
+ xo_clk);
else
mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
- clk_get_rate(msm_host->xo_clk));
+ xo_clk);

config = readl_relaxed(host->ioaddr +
msm_offset->core_dll_config_2);
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 2c0e09187773..d56f8bbb7556 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -508,7 +508,8 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
const struct nand_sdr_timings *timings;
- u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
+ u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
+ u32 thold;

timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
@@ -544,11 +545,28 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
twh &= 0xf;

- twst = timings->tWP_min / 1000;
+ /* Calculate real WE#/RE# hold time in nanosecond */
+ thold = (twh + 1) * 1000000 / rate;
+ /* nanosecond to picosecond */
+ thold *= 1000;
+
+ /*
+ * WE# low level time should be expaned to meet WE# pulse time
+ * and WE# cycle time at the same time.
+ */
+ if (thold < timings->tWC_min)
+ twst = timings->tWC_min - thold;
+ twst = max(timings->tWP_min, twst) / 1000;
twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
twst &= 0xf;

- trlt = max(timings->tREA_max, timings->tRP_min) / 1000;
+ /*
+ * RE# low level time should be expaned to meet RE# pulse time,
+ * RE# access time and RE# cycle time at the same time.
+ */
+ if (thold < timings->tRC_min)
+ trlt = timings->tRC_min - thold;
+ trlt = max3(trlt, timings->tREA_max, timings->tRP_min) / 1000;
trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
trlt &= 0xf;

diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index fa87ae28cdfe..2fa37bbb8794 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -572,12 +572,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
if (ret == -EBADMSG) {
ecc_failed = true;
mtd->ecc_stats.failed++;
- ret = 0;
} else {
mtd->ecc_stats.corrected += ret;
max_bitflips = max_t(unsigned int, max_bitflips, ret);
}

+ ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 59e919b92873..7b9a18e36a93 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3866,8 +3866,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
+ int slave_cnt;
u32 slave_id;

/* Start with the curr_active_slave that joined the bond as the
@@ -3876,23 +3876,32 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
- if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
- slave = rcu_dereference(bond->curr_active_slave);
- if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_xmit_slave_id(bond, skb, 0);
- } else {
- int slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph;

- if (likely(slave_cnt)) {
- slave_id = bond_rr_gen_slave_id(bond);
- bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
- } else {
- bond_tx_drop(bond_dev, skb);
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+ goto non_igmp;
+
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_IGMP) {
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+ bond_xmit_slave_id(bond, skb, 0);
+ return NETDEV_TX_OK;
}
}

+non_igmp:
+ slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (likely(slave_cnt)) {
+ slave_id = bond_rr_gen_slave_id(bond);
+ bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+ } else {
+ bond_tx_drop(bond_dev, skb);
+ }
return NETDEV_TX_OK;
}

diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecb1bd7eb508..78a01880931c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3858,9 +3858,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)

if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+ bp->eth_stats.ptp_skip_tx_ts++;
BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (bp->ptp_tx_skb) {
- BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ bp->eth_stats.ptp_skip_tx_ts++;
+ netdev_err_once(bp->dev,
+ "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 59f227fcc68b..0e1b884a5344 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -182,7 +182,9 @@ static const struct {
{ STATS_OFFSET32(driver_filtered_tx_pkt),
4, false, "driver_filtered_tx_pkt" },
{ STATS_OFFSET32(eee_tx_lpi),
- 4, true, "Tx LPI entry count"}
+ 4, true, "Tx LPI entry count"},
+ { STATS_OFFSET32(ptp_skip_tx_ts),
+ 4, false, "ptp_skipped_tx_tstamp" },
};

#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 626b491f7674..7a075f1f1242 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15243,11 +15243,24 @@ static void bnx2x_ptp_task(struct work_struct *work)
u32 val_seq;
u64 timestamp, ns;
struct skb_shared_hwtstamps shhwtstamps;
+ bool bail = true;
+ int i;
+
+ /* FW may take a while to complete timestamping; try a bit and if it's
+ * still not complete, may indicate an error state - bail out then.
+ */
+ for (i = 0; i < 10; i++) {
+ /* Read Tx timestamp registers */
+ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+ if (val_seq & 0x10000) {
+ bail = false;
+ break;
+ }
+ msleep(1 << i);
+ }

- /* Read Tx timestamp registers */
- val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
- NIG_REG_P0_TLLH_PTP_BUF_SEQID);
- if (val_seq & 0x10000) {
+ if (!bail) {
/* There is a valid timestamp value */
timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
@@ -15262,16 +15275,18 @@ static void bnx2x_ptp_task(struct work_struct *work)
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(bp->ptp_tx_skb);
- bp->ptp_tx_skb = NULL;

DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
timestamp, ns);
} else {
- DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
- /* Reschedule to keep checking for a valid timestamp value */
- schedule_work(&bp->ptp_task);
+ DP(BNX2X_MSG_PTP,
+ "Tx timestamp is not recorded (register read=%u)\n",
+ val_seq);
+ bp->eth_stats.ptp_skip_tx_ts++;
}
+
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
}

void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index b2644ed13d06..d55e63692cf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -207,6 +207,9 @@ struct bnx2x_eth_stats {
u32 driver_filtered_tx_pkt;
/* src: Clear-on-Read register; Will not survive PMF Migration */
u32 eee_tx_lpi;
+
+ /* PTP */
+ u32 ptp_skip_tx_ts;
};

struct bnx2x_eth_q_stats {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 30cafe4cdb6e..09557bf49bb0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5481,7 +5481,16 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)

static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
+ int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
+ int cp = bp->cp_nr_rings;
+
+ if (!ulp_stat)
+ return cp;
+
+ if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
+ return bnxt_get_ulp_msix_base(bp) + ulp_stat;
+
+ return cp + ulp_stat;
}

static bool bnxt_need_reserve_rings(struct bnxt *bp)
@@ -7373,11 +7382,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)

unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
{
- unsigned int stat;
-
- stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
- stat -= bp->cp_nr_rings;
- return stat;
+ return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}

int bnxt_get_avail_msix(struct bnxt *bp, int num)
@@ -10165,10 +10170,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
- bnxt_cleanup_pci(bp);
bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@ -10730,6 +10735,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)

if (system_state == SYSTEM_POWER_OFF) {
bnxt_clear_int_mode(bp);
+ pci_disable_device(pdev);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 878ccce1dfcd..87a9c5716958 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1689,10 +1689,10 @@ static void fec_get_mac(struct net_device *ndev)
*/
if (!is_valid_ether_addr(iap)) {
/* Report it and use a random ethernet address instead */
- netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+ dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
eth_hw_addr_random(ndev);
- netdev_info(ndev, "Using random MAC address: %pM\n",
- ndev->dev_addr);
+ dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
return;
}

diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 17ab4f4af6ad..0da814618565 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -247,6 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)

ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
}

list_del(&ae_algo->node);
@@ -347,6 +348,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)

ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
}

list_del(&ae_dev->node);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index c7d310903319..ecf6ad5bdc2d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -4,6 +4,9 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -24,8 +27,7 @@
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)

-static void hns3_clear_all_ring(struct hnae3_handle *h);
-static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);

static const char hns3_driver_name[] = "hns3";
@@ -72,23 +74,6 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
return IRQ_HANDLED;
}

-/* This callback function is used to set affinity changes to the irq affinity
- * masks when the irq_set_affinity_notifier function is used.
- */
-static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hns3_enet_tqp_vector *tqp_vectors =
- container_of(notify, struct hns3_enet_tqp_vector,
- affinity_notify);
-
- tqp_vectors->affinity_mask = *mask;
-}
-
-static void hns3_nic_irq_affinity_release(struct kref *ref)
-{
-}
-
static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
{
struct hns3_enet_tqp_vector *tqp_vectors;
@@ -100,8 +85,7 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
continue;

- /* clear the affinity notifier and affinity mask */
- irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
+ /* clear the affinity mask */
irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);

/* release the irq resource */
@@ -154,12 +138,6 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
return ret;
}

- tqp_vectors->affinity_notify.notify =
- hns3_nic_irq_affinity_notify;
- tqp_vectors->affinity_notify.release =
- hns3_nic_irq_affinity_release;
- irq_set_affinity_notifier(tqp_vectors->vector_irq,
- &tqp_vectors->affinity_notify);
irq_set_affinity_hint(tqp_vectors->vector_irq,
&tqp_vectors->affinity_mask);

@@ -333,6 +311,40 @@ static void hns3_tqp_disable(struct hnae3_queue *tqp)
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
}

+static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+#endif
+}
+
+static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int i, ret;
+
+ if (!netdev->rx_cpu_rmap) {
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
+ if (!netdev->rx_cpu_rmap)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ tqp_vector->vector_irq);
+ if (ret) {
+ hns3_free_rx_cpu_rmap(netdev);
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -344,11 +356,16 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
return ret;

+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
+
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
- return ret;
+ goto free_rmap;
}

clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
@@ -377,7 +394,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
hns3_vector_disable(&priv->tqp_vector[j]);

hns3_nic_uninit_irq(priv);
-
+free_rmap:
+ hns3_free_rx_cpu_rmap(netdev);
return ret;
}

@@ -440,6 +458,20 @@ static int hns3_nic_net_open(struct net_device *netdev)
return 0;
}

+static void hns3_reset_tx_queue(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct netdev_queue *dev_queue;
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ dev_queue = netdev_get_tx_queue(ndev,
+ priv->ring_data[i].queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ }
+}
+
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -460,10 +492,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (ops->stop)
ops->stop(priv->ae_handle);

+ hns3_free_rx_cpu_rmap(netdev);
+
/* free irq resources */
hns3_nic_uninit_irq(priv);

- hns3_clear_all_ring(priv->ae_handle);
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(netdev))
+ hns3_clear_all_ring(priv->ae_handle, false);
+
+ hns3_reset_tx_queue(priv->ae_handle);
}

static int hns3_nic_net_stop(struct net_device *netdev)
@@ -1476,12 +1517,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
- struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_knic_private_info *kinfo = &h->kinfo;
u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
+ struct hnae3_knic_private_info *kinfo;
u8 tc = mqprio_qopt->qopt.num_tc;
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
+ struct hnae3_handle *h;

if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
@@ -1493,6 +1534,9 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (!netdev)
return -EINVAL;

+ h = hns3_get_handle(netdev);
+ kinfo = &h->kinfo;
+
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
}
@@ -1826,9 +1870,9 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;

- if (!ae_dev) {
+ if (!ae_dev || !ae_dev->ops) {
dev_err(&pdev->dev,
- "Can't recover - error happened during device init\n");
+ "Can't recover - error happened before device initialized\n");
return PCI_ERS_RESULT_NONE;
}

@@ -1847,6 +1891,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)

dev_info(dev, "requesting reset due to PCI error\n");

+ if (!ae_dev || !ae_dev->ops)
+ return PCI_ERS_RESULT_NONE;
+
/* request the reset */
if (ae_dev->ops->reset_event) {
if (!ae_dev->override_pci_need_reset)
@@ -3198,8 +3245,6 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);

if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
- irq_set_affinity_notifier(tqp_vector->vector_irq,
- NULL);
irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
free_irq(tqp_vector->vector_irq, tqp_vector);
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -3712,7 +3757,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)

hns3_del_all_fd_rules(netdev, true);

- hns3_force_clear_all_rx_ring(handle);
+ hns3_clear_all_ring(handle, true);

hns3_uninit_phy(netdev);

@@ -3884,40 +3929,26 @@ static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
}
}

-static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
-{
- struct net_device *ndev = h->kinfo.netdev;
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hns3_enet_ring *ring;
- u32 i;
-
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
- hns3_force_clear_rx_ring(ring);
- }
-}
-
-static void hns3_clear_all_ring(struct hnae3_handle *h)
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
{
struct net_device *ndev = h->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
u32 i;

for (i = 0; i < h->kinfo.num_tqps; i++) {
- struct netdev_queue *dev_queue;
struct hns3_enet_ring *ring;

ring = priv->ring_data[i].ring;
hns3_clear_tx_ring(ring);
- dev_queue = netdev_get_tx_queue(ndev,
- priv->ring_data[i].queue_index);
- netdev_tx_reset_queue(dev_queue);

ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
/* Continue to clear other rings even if clearing some
* rings failed.
*/
- hns3_clear_rx_ring(ring);
+ if (force)
+ hns3_force_clear_rx_ring(ring);
+ else
+ hns3_clear_rx_ring(ring);
}
}

@@ -4120,7 +4151,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}

- hns3_force_clear_all_rx_ring(handle);
+ hns3_clear_all_ring(handle, true);
+ hns3_reset_tx_queue(priv->ae_handle);

hns3_nic_uninit_vector_data(priv);

diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index ea94b5152963..cf20fa6768d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -241,11 +241,13 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)

skb_get(skb);
tx_ret = hns3_nic_net_xmit(skb, ndev);
- if (tx_ret == NETDEV_TX_OK)
+ if (tx_ret == NETDEV_TX_OK) {
good_cnt++;
- else
+ } else {
+ kfree_skb(skb);
netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
tx_ret);
+ }
}
if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 6d4d5a470163..14d37c26196b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -550,8 +550,7 @@ static u8 *hclge_comm_get_strings(u32 stringset,
return buff;

for (i = 0; i < size; i++) {
- snprintf(buff, ETH_GSTRING_LEN,
- strs[i].desc);
+ snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}

@@ -890,6 +889,7 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}

static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 48eda2c6fdae..71a6f7c734b6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -215,6 +215,13 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_and(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);

+ /* supported flag is Pause and Asym Pause, but default advertising
+ * should be rx on, tx on, so need clear Asym Pause in advertising
+ * flag
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
return 0;
}

diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index a7bbb6d3091a..0d53062f7bb5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -54,7 +54,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
u32 tick;

/* Calc tick */
- if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
+ if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
+ ir > HCLGE_ETHER_MAX_RATE)
return -EINVAL;

tick = tick_array[shaper_level];
@@ -1124,6 +1125,9 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
int ret;
u8 i;

+ if (vport->vport_id >= HNAE3_MAX_TC)
+ return -EINVAL;
+
ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8dd7fef863f6..d7a15d5b6b61 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2425,6 +2425,12 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}

+ if (pdev->revision >= 0x21) {
+ ret = hclgevf_set_promisc_mode(hdev, true);
+ if (ret)
+ return ret;
+ }
+
dev_info(&hdev->pdev->dev, "Reset done\n");

return 0;
@@ -2504,9 +2510,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
* firmware makes sure broadcast packets can be accepted.
* For revision 0x21, default to enable broadcast promisc mode.
*/
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- goto err_config;
+ if (pdev->revision >= 0x21) {
+ ret = hclgevf_set_promisc_mode(hdev, true);
+ if (ret)
+ goto err_config;
+ }

/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 9b4d7cec2e18..2a261d849d5a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
#endif

+ if (!size)
+ return;
+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);

@@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;

+ if (!size)
+ return NULL;
+
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);

@@ -1290,7 +1296,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ void *va;
#if (PAGE_SIZE < 8192)
unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
@@ -1299,7 +1305,10 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
unsigned int headlen;
struct sk_buff *skb;

+ if (!rx_buffer)
+ return NULL;
/* prefetch first cache line of first page */
+ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
@@ -1354,7 +1363,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ void *va;
#if (PAGE_SIZE < 8192)
unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
@@ -1363,7 +1372,10 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;

+ if (!rx_buffer)
+ return NULL;
/* prefetch first cache line of first page */
+ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
@@ -1398,6 +1410,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer)
{
+ if (!rx_buffer)
+ return;
+
if (iavf_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
iavf_reuse_rx_page(rx_ring, rx_buffer);
@@ -1496,11 +1511,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
* verified the descriptor has been written back.
*/
dma_rmb();
+#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
+ if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+ break;

size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
- if (!size)
- break;

iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = iavf_get_rx_buffer(rx_ring, size);
@@ -1516,7 +1532,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- rx_buffer->pagecnt_bias++;
+ if (rx_buffer)
+ rx_buffer->pagecnt_bias++;
break;
}

diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 580d14b49fda..a725dc709632 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5687,6 +5687,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
*/
if (tx_ring->launchtime_enable) {
ts = ns_to_timespec64(first->skb->tstamp);
+ first->skb->tstamp = 0;
context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
} else {
context_desc->seqnum_seed = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index acba067cc15a..7c52ae8ac005 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3226,7 +3226,8 @@ static int ixgbe_get_module_info(struct net_device *dev,
page_swap = true;
}

- if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
+ !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
/* We have a SFP, but it does not support SFF-8472 */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index ff85ce5791a3..31629fc7e820 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -842,6 +842,9 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
struct ixgbe_ipsec *ipsec = adapter->ipsec;
int i;

+ if (!ipsec)
+ return;
+
/* search rx sa table */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
if (!ipsec->rx_tbl[i].used)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 214b01085718..6544c4539c0d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -45,6 +45,7 @@
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_DDM_IMPLEMENTED 0x40
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c5dac6bd2be4..ee7857298361 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -64,7 +64,7 @@

struct orion_mdio_dev {
void __iomem *regs;
- struct clk *clk[3];
+ struct clk *clk[4];
/*
* If we have access to the error interrupt pin (which is
* somewhat misnamed as it not only reflects internal errors
@@ -321,6 +321,10 @@ static int orion_mdio_probe(struct platform_device *pdev)

for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+ if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
if (IS_ERR(dev->clk[i]))
break;
clk_prepare_enable(dev->clk[i]);
@@ -362,6 +366,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
if (dev->err_interrupt > 0)
writel(0, dev->regs + MVMDIO_ERR_INT_MASK);

+out_clk:
for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
if (IS_ERR(dev->clk[i]))
break;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index ae2240074d8e..5692c6087bbb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -312,7 +312,8 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
}

/* Set value */
- pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
+ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
+ shift & MVPP2_PRS_SRAM_SHIFT_MASK;

/* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 8a67fd197b79..16ed6ebd31ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -950,7 +950,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));

root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->vport);
+ mlx5_eswitch_vport_num_to_index(esw, vport->vport));
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
@@ -1068,7 +1068,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));

root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport->vport);
+ mlx5_eswitch_vport_num_to_index(esw, vport->vport));
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 866cdc86a3f2..08045fd69fad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3441,6 +3441,7 @@ static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
void __iomem *p_regview,
void __iomem *p_doorbells,
+ u64 db_phys_addr,
enum qed_pci_personality personality)
{
struct qed_dev *cdev = p_hwfn->cdev;
@@ -3449,6 +3450,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Split PCI bars evenly between hwfns */
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ p_hwfn->db_phys_addr = db_phys_addr;

if (IS_VF(p_hwfn->cdev))
return qed_vf_hw_prepare(p_hwfn);
@@ -3544,7 +3546,9 @@ int qed_hw_prepare(struct qed_dev *cdev,
/* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn,
cdev->regview,
- cdev->doorbells, personality);
+ cdev->doorbells,
+ cdev->db_phys_addr,
+ personality);
if (rc)
return rc;

@@ -3553,22 +3557,25 @@ int qed_hw_prepare(struct qed_dev *cdev,
/* Initialize the rest of the hwfns */
if (cdev->num_hwfns > 1) {
void __iomem *p_regview, *p_doorbell;
- u8 __iomem *addr;
+ u64 db_phys_addr;
+ u32 offset;

/* adjust bar offset for second engine */
- addr = cdev->regview +
- qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
- BAR_ID_0) / 2;
- p_regview = addr;
+ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
+ p_regview = cdev->regview + offset;

- addr = cdev->doorbells +
- qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
- BAR_ID_1) / 2;
- p_doorbell = addr;
+ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
+
+ p_doorbell = cdev->doorbells + offset;
+
+ db_phys_addr = cdev->db_phys_addr + offset;

/* prepare second hw function */
rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
- p_doorbell, personality);
+ p_doorbell, db_phys_addr,
+ personality);

/* in case of error, need to free the previously
* initiliazed hwfn 0.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index ded556b7bab5..eeea8683d99b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2708,6 +2708,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
data.input.rx_num_desc = n_ooo_bufs * 2;
data.input.tx_num_desc = data.input.rx_num_desc;
data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
+ data.input.tx_tc = PKT_LB_TC;
+ data.input.tx_dest = QED_LL2_TX_DEST_LB;
data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
data.input.secondary_queue = true;
data.cbs = &cbs;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 7873d6dfd91f..13802b825d65 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -803,7 +803,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
dpi_start_offset +
((out_params->dpi) * p_hwfn->dpi_size));

- out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
dpi_start_offset +
((out_params->dpi) * p_hwfn->dpi_size);

diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index cba5881b2746..a10ef700f16d 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1029,7 +1029,6 @@ static void netsec_free_dring(struct netsec_priv *priv, int id)
static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
{
struct netsec_desc_ring *dring = &priv->desc_ring[id];
- int i;

dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
&dring->desc_dma, GFP_KERNEL);
@@ -1040,19 +1039,6 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
if (!dring->desc)
goto err;

- if (id == NETSEC_RING_TX) {
- for (i = 0; i < DESC_NUM; i++) {
- struct netsec_de *de;
-
- de = dring->vaddr + (DESC_SZ * i);
- /* de->attr is not going to be accessed by the NIC
- * until netsec_set_tx_de() is called.
- * No need for a dma_wmb() here
- */
- de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
- }
- }
-
return 0;
err:
netsec_free_dring(priv, id);
@@ -1060,6 +1046,23 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
return -ENOMEM;
}

+static void netsec_setup_tx_dring(struct netsec_priv *priv)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ int i;
+
+ for (i = 0; i < DESC_NUM; i++) {
+ struct netsec_de *de;
+
+ de = dring->vaddr + (DESC_SZ * i);
+ /* de->attr is not going to be accessed by the NIC
+ * until netsec_set_tx_de() is called.
+ * No need for a dma_wmb() here
+ */
+ de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
+ }
+}
+
static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
@@ -1361,6 +1364,7 @@ static int netsec_netdev_open(struct net_device *ndev)

pm_runtime_get_sync(priv->dev);

+ netsec_setup_tx_dring(priv);
ret = netsec_setup_rx_dring(priv);
if (ret) {
netif_err(priv, probe, priv->ndev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 272b9ca66314..b069b3a2453b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -261,7 +261,7 @@ struct stmmac_safety_stats {
#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 25
+#define STMMAC_TX_FRAMES 1

/* Packets types */
enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index ba124a4da793..8325e6499739 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -893,6 +893,11 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
* address. No need to mask it again.
*/
reg |= 1 << H3_EPHY_ADDR_SHIFT;
+ } else {
+ /* For SoCs without internal PHY the PHY selection bit should be
+ * set to 0 (external PHY).
+ */
+ reg &= ~H3_EPHY_SELECT;
}

if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 0877bde6e860..21d131347e2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -216,6 +216,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg));
reg++;
}
+
+ while (reg <= perfect_addr_number) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
}

#ifdef FRAME_FILTER_DEBUG
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 7e5d5db0d516..d0e6e1503581 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -444,14 +444,20 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
* are required
*/
value |= GMAC_PACKET_FILTER_PR;
- } else if (!netdev_uc_empty(dev)) {
- int reg = 1;
+ } else {
struct netdev_hw_addr *ha;
+ int reg = 1;

netdev_for_each_uc_addr(ha, dev) {
dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
+
+ while (reg <= GMAC_MAX_PERFECT_ADDRESSES) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
}

writel(value, ioaddr + GMAC_PACKET_FILTER);
@@ -469,8 +475,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
if (fc & FLOW_RX) {
pr_debug("\tReceive Flow-Control ON\n");
flow |= GMAC_RX_FLOW_CTRL_RFE;
- writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
}
+ writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+
if (fc & FLOW_TX) {
pr_debug("\tTransmit Flow-Control ON\n");

@@ -478,7 +485,7 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
pr_debug("\tduplex mode: PAUSE %d\n", pause_time);

for (queue = 0; queue < tx_cnt; queue++) {
- flow |= GMAC_TX_FLOW_CTRL_TFE;
+ flow = GMAC_TX_FLOW_CTRL_TFE;

if (duplex)
flow |=
@@ -486,6 +493,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,

writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
+ } else {
+ for (queue = 0; queue < tx_cnt; queue++)
+ writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
}

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a634054dcb11..f3735d0458eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2058,6 +2058,9 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
&priv->xstats, chan);
struct stmmac_channel *ch = &priv->channel[chan];

+ if (status)
+ status |= handle_rx | handle_tx;
+
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
napi_schedule_irqoff(&ch->rx_napi);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4041c75997ba..38a8ef194e05 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -614,6 +614,10 @@ static void axienet_start_xmit_done(struct net_device *ndev)

ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
netif_wake_queue(ndev);
}

@@ -669,9 +673,19 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];

if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
+ if (netif_queue_stopped(ndev))
+ return NETDEV_TX_BUSY;
+
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (axienet_check_tx_bd_space(lp, num_frag))
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(ndev);
}

if (skb->ip_summed == CHECKSUM_PARTIAL) {
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7a145172d503..d178d5bad7e4 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -289,16 +289,29 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}

-static void gtp_encap_destroy(struct sock *sk)
+static void __gtp_encap_destroy(struct sock *sk)
{
struct gtp_dev *gtp;

- gtp = rcu_dereference_sk_user_data(sk);
+ lock_sock(sk);
+ gtp = sk->sk_user_data;
if (gtp) {
+ if (gtp->sk0 == sk)
+ gtp->sk0 = NULL;
+ else
+ gtp->sk1u = NULL;
udp_sk(sk)->encap_type = 0;
rcu_assign_sk_user_data(sk, NULL);
sock_put(sk);
}
+ release_sock(sk);
+}
+
+static void gtp_encap_destroy(struct sock *sk)
+{
+ rtnl_lock();
+ __gtp_encap_destroy(sk);
+ rtnl_unlock();
}

static void gtp_encap_disable_sock(struct sock *sk)
@@ -306,7 +319,7 @@ static void gtp_encap_disable_sock(struct sock *sk)
if (!sk)
return;

- gtp_encap_destroy(sk);
+ __gtp_encap_destroy(sk);
}

static void gtp_encap_disable(struct gtp_dev *gtp)
@@ -800,7 +813,8 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
goto out_sock;
}

- if (rcu_dereference_sk_user_data(sock->sk)) {
+ lock_sock(sock->sk);
+ if (sock->sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_sock;
}
@@ -816,6 +830,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);

out_sock:
+ release_sock(sock->sk);
sockfd_put(sock);
return sk;
}
@@ -847,8 +862,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])

if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
- if (role > GTP_ROLE_SGSN)
+ if (role > GTP_ROLE_SGSN) {
+ if (sk0)
+ gtp_encap_disable_sock(sk0);
+ if (sk1u)
+ gtp_encap_disable_sock(sk1u);
return -EINVAL;
+ }
}

gtp->sk0 = sk0;
@@ -949,7 +969,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,

}

- pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+ pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL)
return -ENOMEM;

@@ -1038,6 +1058,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}

+ rtnl_lock();
rcu_read_lock();

gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
@@ -1062,6 +1083,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)

out_unlock:
rcu_read_unlock();
+ rtnl_unlock();
return err;
}

@@ -1363,9 +1385,9 @@ late_initcall(gtp_init);

static void __exit gtp_fini(void)
{
- unregister_pernet_subsys(&gtp_net_ops);
genl_unregister_family(&gtp_genl_family);
rtnl_link_unregister(&gtp_link_ops);
+ unregister_pernet_subsys(&gtp_net_ops);

pr_info("GTP module unloaded\n");
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index f6a6cc5bf118..e748aee82033 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -948,6 +948,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
{
int rc;

+ if (!dev)
+ return -EINVAL;
+
rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
if (rc)
return rc;
@@ -1290,6 +1293,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
struct device *d;
int rc;

+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
/* Search the list of PHY devices on the mdio bus for the
* PHY with the requested name
*/
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 71812be0ac64..b6efd2d41dce 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -186,10 +186,11 @@ struct sfp {
struct gpio_desc *gpio[GPIO_MAX];

bool attached;
+ struct mutex st_mutex; /* Protects state */
unsigned int state;
struct delayed_work poll;
struct delayed_work timeout;
- struct mutex sm_mutex;
+ struct mutex sm_mutex; /* Protects state machine */
unsigned char sm_mod_state;
unsigned char sm_dev_state;
unsigned short sm_state;
@@ -1719,6 +1720,7 @@ static void sfp_check_state(struct sfp *sfp)
{
unsigned int state, i, changed;

+ mutex_lock(&sfp->st_mutex);
state = sfp_get_state(sfp);
changed = state ^ sfp->state;
changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
@@ -1744,6 +1746,7 @@ static void sfp_check_state(struct sfp *sfp)
sfp_sm_event(sfp, state & SFP_F_LOS ?
SFP_E_LOS_HIGH : SFP_E_LOS_LOW);
rtnl_unlock();
+ mutex_unlock(&sfp->st_mutex);
}

static irqreturn_t sfp_irq(int irq, void *data)
@@ -1774,6 +1777,7 @@ static struct sfp *sfp_alloc(struct device *dev)
sfp->dev = dev;

mutex_init(&sfp->sm_mutex);
+ mutex_init(&sfp->st_mutex);
INIT_DELAYED_WORK(&sfp->poll, sfp_poll);
INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout);

diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 3d93993e74da..2eca4168af2f 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -238,7 +238,7 @@ static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits)
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret = 0;
- u8 buf[ETH_ALEN];
+ u8 buf[ETH_ALEN] = {0};
int i;
unsigned long gpio_bits = dev->driver_info->data;

@@ -689,7 +689,7 @@ static int asix_resume(struct usb_interface *intf)
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, i;
- u8 buf[ETH_ALEN], chipcode = 0;
+ u8 buf[ETH_ALEN] = {0}, chipcode = 0;
u32 phyid;
struct asix_common_private *priv;

@@ -1073,7 +1073,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
- u8 buf[ETH_ALEN];
+ u8 buf[ETH_ALEN] = {0};

usbnet_get_endpoints(dev,intf);

diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 38ecb66fb3e9..82c25f07261f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -806,6 +806,14 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
return f;
}

+static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
+ __be32 src_vni, struct vxlan_fdb *f)
+{
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->hlist,
+ vxlan_fdb_head(vxlan, mac, src_vni));
+}
+
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
@@ -831,18 +839,13 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return rc;
}

- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
-
*fdb = f;

return 0;
}

-static void vxlan_fdb_free(struct rcu_head *head)
+static void __vxlan_fdb_free(struct vxlan_fdb *f)
{
- struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
struct vxlan_rdst *rd, *nd;

list_for_each_entry_safe(rd, nd, &f->remotes, list) {
@@ -852,6 +855,13 @@ static void vxlan_fdb_free(struct rcu_head *head)
kfree(f);
}

+static void vxlan_fdb_free(struct rcu_head *head)
+{
+ struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
+
+ __vxlan_fdb_free(f);
+}
+
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify, bool swdev_notify)
{
@@ -979,6 +989,7 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
if (rc < 0)
return rc;

+ vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack);
if (rc)
@@ -3573,12 +3584,17 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
if (err)
goto errout;

- /* notify default fdb entry */
if (f) {
+ vxlan_fdb_insert(vxlan, all_zeros_mac,
+ vxlan->default_dst.remote_vni, f);
+
+ /* notify default fdb entry */
err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
RTM_NEWNEIGH, true, extack);
- if (err)
- goto errout;
+ if (err) {
+ vxlan_fdb_destroy(vxlan, f, false, false);
+ goto unregister;
+ }
}

list_add(&vxlan->next, &vn->vxlan_list);
@@ -3590,7 +3606,8 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
* destroy the entry by hand here.
*/
if (f)
- vxlan_fdb_destroy(vxlan, f, false, false);
+ __vxlan_fdb_free(f);
+unregister:
if (unregister)
unregister_netdevice(dev);
return err;
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index c704ae371c4d..42931a669b02 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -663,6 +663,13 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,

mutex_lock(&ar->conf_mutex);

+ if (!arsta->tx_stats) {
+ ath10k_warn(ar, "failed to get tx stats");
+ mutex_unlock(&ar->conf_mutex);
+ kfree(buf);
+ return 0;
+ }
+
spin_lock_bh(&ar->data_lock);
for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 1acc622d2183..f22840bbc389 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2277,7 +2277,9 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macro.
*/
- if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
tx_done.msdu_id, tx_done.status);
ath10k_txrx_tx_unref(htt, &tx_done);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index ad082b7d7643..b242085c3c16 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -158,7 +158,7 @@ const struct ath10k_hw_values qca6174_values = {
};

const struct ath10k_hw_values qca99x0_values = {
- .rtc_state_val_on = 5,
+ .rtc_state_val_on = 7,
.ce_count = 12,
.msi_assign_ce_max = 12,
.num_target_ce_config_wlan = 10,
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 9c703d287333..b500fd427595 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1630,6 +1630,10 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;

+ /* For mesh, probe response and beacon share the same template */
+ if (ieee80211_vif_is_mesh(vif))
+ return 0;
+
prb = ieee80211_proberesp_get(hw, vif);
if (!prb) {
ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
@@ -5588,8 +5592,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
- u8 rate, basic_rate_idx;
- int rateidx, ret = 0, hw_rate_code;
+ u8 rate, basic_rate_idx, rateidx;
+ int ret = 0, hw_rate_code, mcast_rate;
enum nl80211_band band;
const struct ieee80211_supported_band *sband;

@@ -5776,7 +5780,11 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath10k_mac_vif_chan(arvif->vif, &def)) {
band = def.chan->band;
- rateidx = vif->bss_conf.mcast_rate[band] - 1;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;

if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 2c27f407a851..6e5f7ae00253 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2059,6 +2059,11 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)

ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");

+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_irq_sync(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+
/* Most likely the device has HTT Rx ring configured. The only way to
* prevent the device from accessing (and possible corrupting) host
* memory is to reset the chip now.
@@ -2072,10 +2077,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
*/
ath10k_pci_safe_chip_reset(ar);

- ath10k_pci_irq_disable(ar);
- ath10k_pci_irq_sync(ar);
- napi_synchronize(&ar->napi);
- napi_disable(&ar->napi);
ath10k_pci_flush(ar);

spin_lock_irqsave(&ar_pci->ps_lock, flags);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index a7bc2c70d076..8f8f717a23ee 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -1002,6 +1002,7 @@ int ath10k_qmi_deinit(struct ath10k *ar)
qmi_handle_release(&qmi->qmi_hdl);
cancel_work_sync(&qmi->event_work);
destroy_workqueue(qmi->event_wq);
+ kfree(qmi);
ar_snoc->qmi = NULL;

return 0;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index fae56c67766f..28bdf0212538 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -602,6 +602,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
full_len,
last_in_bundle,
last_in_bundle);
+ if (ret) {
+ ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
+ goto err;
+ }
}

ar_sdio->n_rx_pkts = i;
@@ -2077,6 +2081,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
cancel_work_sync(&ar_sdio->wr_async_work);
ath10k_core_unregister(ar);
ath10k_core_destroy(ar);
+
+ flush_workqueue(ar_sdio->workqueue);
+ destroy_workqueue(ar_sdio->workqueue);
}

static const struct sdio_device_id ath10k_sdio_devices[] = {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index c5818d28f55a..4102df016931 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -150,6 +150,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
{
struct ath10k_peer *peer;

+ if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
+ return NULL;
+
lockdep_assert_held(&ar->data_lock);

list_for_each_entry(peer, &ar->peers, list)
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 582fb11f648a..02709fc99034 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -2840,8 +2840,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control))
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
buf_len += IEEE80211_CCMP_MIC_LEN;
+ }

buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
buf_len = round_up(buf_len, 4);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index e1c40bb69932..12f57f9adbba 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -4535,9 +4535,10 @@ enum wmi_10_4_stats_id {
};

enum wmi_tlv_stats_id {
- WMI_TLV_STAT_PDEV = BIT(0),
- WMI_TLV_STAT_VDEV = BIT(1),
- WMI_TLV_STAT_PEER = BIT(2),
+ WMI_TLV_STAT_PEER = BIT(0),
+ WMI_TLV_STAT_AP = BIT(1),
+ WMI_TLV_STAT_PDEV = BIT(2),
+ WMI_TLV_STAT_VDEV = BIT(3),
WMI_TLV_STAT_PEER_EXTD = BIT(10),
};

diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 68854c45d0a4..9ab6aa9ded5c 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1176,6 +1176,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
return -EINVAL;

ev = (struct wmi_pstream_timeout_event *) datap;
+ if (ev->traffic_class >= WMM_NUM_AC) {
+ ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
+ return -EINVAL;
+ }

/*
* When the pstream (fat pipe == AC) timesout, it means there were
@@ -1517,6 +1521,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;

reply = (struct wmi_cac_event *) datap;
+ if (reply->ac >= WMM_NUM_AC) {
+ ath6kl_err("invalid AC: %d\n", reply->ac);
+ return -EINVAL;
+ }

if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
@@ -2633,7 +2641,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
u16 active_tsids = 0;
int ret;

- if (traffic_class > 3) {
+ if (traffic_class >= WMM_NUM_AC) {
ath6kl_err("invalid traffic class: %d\n", traffic_class);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8581d917635a..b6773d613f0c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -252,8 +252,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
/* Chip Revisions */
/******************/

-static void ath9k_hw_read_revisions(struct ath_hw *ah)
+static bool ath9k_hw_read_revisions(struct ath_hw *ah)
{
+ u32 srev;
u32 val;

if (ah->get_mac_revision)
@@ -269,25 +270,33 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
val = REG_READ(ah, AR_SREV);
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
}
- return;
+ return true;
case AR9300_DEVID_AR9340:
ah->hw_version.macVersion = AR_SREV_VERSION_9340;
- return;
+ return true;
case AR9300_DEVID_QCA955X:
ah->hw_version.macVersion = AR_SREV_VERSION_9550;
- return;
+ return true;
case AR9300_DEVID_AR953X:
ah->hw_version.macVersion = AR_SREV_VERSION_9531;
- return;
+ return true;
case AR9300_DEVID_QCA956X:
ah->hw_version.macVersion = AR_SREV_VERSION_9561;
- return;
+ return true;
}

- val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+ srev = REG_READ(ah, AR_SREV);
+
+ if (srev == -EIO) {
+ ath_err(ath9k_hw_common(ah),
+ "Failed to read SREV register");
+ return false;
+ }
+
+ val = srev & AR_SREV_ID;

if (val == 0xFF) {
- val = REG_READ(ah, AR_SREV);
+ val = srev;
ah->hw_version.macVersion =
(val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
@@ -306,6 +315,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
ah->is_pciexpress = true;
}
+
+ return true;
}

/************************************/
@@ -559,7 +570,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;

- ath9k_hw_read_revisions(ah);
+ if (!ath9k_hw_read_revisions(ah)) {
+ ath_err(common, "Could not read hardware revisions");
+ return -EOPNOTSUPP;
+ }

switch (ah->hw_version.macVersion) {
case AR_SREV_VERSION_5416_PCI:
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4e97f7f3b2a3..06e660858766 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -815,6 +815,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr;
bool discard_current = sc->rx.discard_next;
+ bool is_phyerr;

/*
* Discard corrupt descriptors which are marked in
@@ -827,8 +828,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,

/*
* Discard zero-length packets and packets smaller than an ACK
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
*/
- if (rx_stats->rs_datalen < 10) {
+ is_phyerr = rx_stats->rs_status & ATH9K_RXERR_PHY;
+ if (!rx_stats->rs_datalen ||
+ (rx_stats->rs_datalen < 10 && !is_phyerr)) {
RX_STAT_INC(sc, rx_len_err);
goto corrupt;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index b17e1ca40995..3be0aeedb9b5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -668,7 +668,8 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
static void ath_tx_count_airtime(struct ath_softc *sc,
struct ieee80211_sta *sta,
struct ath_buf *bf,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts,
+ u8 tid)
{
u32 airtime = 0;
int i;
@@ -679,7 +680,7 @@ static void ath_tx_count_airtime(struct ath_softc *sc,
airtime += rate_dur * bf->rates[i].count;
}

- ieee80211_sta_register_airtime(sta, ts->tid, airtime, 0);
+ ieee80211_sta_register_airtime(sta, tid, airtime, 0);
}

static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
@@ -709,7 +710,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (sta) {
struct ath_node *an = (struct ath_node *)sta->drv_priv;
tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
- ath_tx_count_airtime(sc, sta, bf, ts);
+ ath_tx_count_airtime(sc, sta, bf, ts, tid->tidno);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
}
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index d52b31b45df7..a274eb0d1968 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -111,7 +111,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
- JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+ JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false),
JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 3f5bd177d55f..b00a13d6d530 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -296,21 +296,24 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
bool need_unmask = true;

+ wil6210_mask_irq_rx(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);

if (unlikely(!isr)) {
wil_err_ratelimited(wil, "spurious IRQ: RX\n");
+ wil6210_unmask_irq_rx(wil);
return IRQ_NONE;
}

- wil6210_mask_irq_rx(wil);
-
/* RX_DONE and RX_HTRSH interrupts are the same if interrupt
* moderation is not used. Interrupt moderation may cause RX
* buffer overflow while RX_DONE is delayed. The required
@@ -355,21 +358,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_INT_GEN_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
bool need_unmask = true;

+ wil6210_mask_irq_rx_edma(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);

if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: RX\n");
+ wil6210_unmask_irq_rx_edma(wil);
return IRQ_NONE;
}

- wil6210_mask_irq_rx_edma(wil);
-
if (likely(isr & BIT_RX_STATUS_IRQ)) {
wil_dbg_irq(wil, "RX status ring\n");
isr &= ~BIT_RX_STATUS_IRQ;
@@ -403,21 +409,24 @@ static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_INT_GEN_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
bool need_unmask = true;

+ wil6210_mask_irq_tx_edma(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);

if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: TX\n");
+ wil6210_unmask_irq_tx_edma(wil);
return IRQ_NONE;
}

- wil6210_mask_irq_tx_edma(wil);
-
if (likely(isr & BIT_TX_STATUS_IRQ)) {
wil_dbg_irq(wil, "TX status ring\n");
isr &= ~BIT_TX_STATUS_IRQ;
@@ -446,21 +455,24 @@ static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
bool need_unmask = true;

+ wil6210_mask_irq_tx(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);

if (unlikely(!isr)) {
wil_err_ratelimited(wil, "spurious IRQ: TX\n");
+ wil6210_unmask_irq_tx(wil);
return IRQ_NONE;
}

- wil6210_mask_irq_tx(wil);
-
if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
wil_dbg_irq(wil, "TX done\n");
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
@@ -532,20 +544,23 @@ static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
+
+ wil6210_mask_irq_misc(wil, false);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));

trace_wil6210_irq_misc(isr);
wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);

if (!isr) {
wil_err(wil, "spurious IRQ: MISC\n");
+ wil6210_unmask_irq_misc(wil, false);
return IRQ_NONE;
}

- wil6210_mask_irq_misc(wil, false);
-
if (isr & ISR_MISC_FW_ERROR) {
u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
u32 ucode_assert_code =
@@ -580,7 +595,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
/* no need to handle HALP ICRs until next vote */
wil->halp.handle_icr = false;
wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
- wil6210_mask_halp(wil);
+ wil6210_mask_irq_misc(wil, true);
complete(&wil->halp.comp);
}
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 4ccfd1404458..d74837cce67f 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -750,6 +750,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
[GRO_HELD] = "GRO_HELD",
[GRO_NORMAL] = "GRO_NORMAL",
[GRO_DROP] = "GRO_DROP",
+ [GRO_CONSUMED] = "GRO_CONSUMED",
};

wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 63116f4b62c7..de52e532c105 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -3211,7 +3211,18 @@ static void wmi_event_handle(struct wil6210_priv *wil,
/* check if someone waits for this event */
if (wil->reply_id && wil->reply_id == id &&
wil->reply_mid == mid) {
- WARN_ON(wil->reply_buf);
+ if (wil->reply_buf) {
+ /* event received while wmi_call is waiting
+ * with a buffer. Such event should be handled
+ * in wmi_recv_cmd function. Handling the event
+ * here means a previous wmi_call was timeout.
+ * Drop the event and do not handle it.
+ */
+ wil_err(wil,
+ "Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n",
+ id, eventid2name(id));
+ return;
+ }

wmi_evt_call_handler(vif, id, evt_data,
len - sizeof(*wmi));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index d7380016f1c0..c30f626b1602 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -2146,8 +2146,6 @@ void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
fwrt->fw->dbg.dest_tlv) {
- /* wait before we collect the data till the DBGC stop */
- udelay(500);
iwl_fw_dbg_restart_recording(fwrt, &params);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index a199056234d3..97fcd57e17d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -297,7 +297,10 @@ _iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
}

iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0);
- udelay(100);
+ /* wait for the DBGC to finish writing the internal buffer to DRAM to
+ * avoid halting the HW while writing
+ */
+ usleep_range(700, 1000);
iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0);
#ifdef CONFIG_IWLWIFI_DEBUGFS
trans->dbg_rec_on = false;
@@ -327,7 +330,6 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
} else {
iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
- udelay(100);
iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index ff85d69c2a8c..557ee47bffd8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
.len = { 0, },
};
struct iwl_rx_packet *pkt;
+ int ret;

if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
@@ -141,8 +142,13 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
else
cmd.id = SHARED_MEM_CFG;

- if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd)))
+ ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
+
+ if (ret) {
+ WARN(ret != -ERFKILL,
+ "Could not send the SMEM command: %d\n", ret);
return;
+ }

pkt = cmd.resp_pkt;
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index e539bc94eff7..426c90bb13b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -335,6 +335,7 @@ enum {
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105100)
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
+#define CSR_HW_RF_ID_TYPE_HR1 (0x0010c100)
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)

diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 153717587aeb..559f6df1a74d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -419,6 +419,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)

lockdep_assert_held(&mvm->mutex);

+ mvm->rfkill_safe_init_done = false;
+
iwl_init_notification_wait(&mvm->notif_wait,
&init_wait,
init_complete,
@@ -537,8 +539,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)

lockdep_assert_held(&mvm->mutex);

- if (WARN_ON_ONCE(mvm->rfkill_safe_init_done))
- return 0;
+ mvm->rfkill_safe_init_done = false;

iwl_init_notification_wait(&mvm->notif_wait,
&calib_wait,
@@ -1108,10 +1109,13 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)

iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);

+ mvm->rfkill_safe_init_done = false;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret)
return ret;

+ mvm->rfkill_safe_init_done = true;
+
iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);

return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 4ddf620c267d..5caadaef707d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -207,6 +207,12 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
},
};

+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!iwl_mvm_is_d0i3_supported(mvm))
@@ -2535,7 +2541,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ret;
+ int ret, i;

/*
* iwl_mvm_mac_ctxt_add() might read directly from the device
@@ -2609,6 +2615,20 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;

+ /* send all the early keys to the device now */
+ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
+ struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i];
+
+ if (!key)
+ continue;
+
+ mvmvif->ap_early_keys[i] = NULL;
+
+ ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
+ if (ret)
+ goto out_quota_failed;
+ }
+
if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
iwl_mvm_vif_set_low_latency(mvmvif, true,
LOW_LATENCY_VIF_TYPE);
@@ -3378,11 +3398,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_key_pn *ptk_pn;
int keyidx = key->keyidx;
- int ret;
+ int ret, i;
u8 key_offset;

if (iwlwifi_mod_params.swcrypto) {
@@ -3455,6 +3476,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->hw_key_idx = STA_KEY_IDX_INVALID;
break;
}
+
+ if (!mvmvif->ap_ibss_active) {
+ for (i = 0;
+ i < ARRAY_SIZE(mvmvif->ap_early_keys);
+ i++) {
+ if (!mvmvif->ap_early_keys[i]) {
+ mvmvif->ap_early_keys[i] = key;
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(mvmvif->ap_early_keys))
+ ret = -ENOSPC;
+
+ break;
+ }
}

/* During FW restart, in order to restore the state as it was,
@@ -3523,6 +3560,18 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,

break;
case DISABLE_KEY:
+ ret = -ENOENT;
+ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
+ if (mvmvif->ap_early_keys[i] == key) {
+ mvmvif->ap_early_keys[i] = NULL;
+ ret = 0;
+ }
+ }
+
+ /* found in pending list - don't do anything else */
+ if (ret == 0)
+ break;
+
if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
ret = 0;
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index b698d55ace1b..e1f90a4ae14f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -498,6 +498,9 @@ struct iwl_mvm_vif {
netdev_features_t features;

struct iwl_probe_resp_data __rcu *probe_resp_data;
+
+ /* we can only have 2 GTK + 2 IGTK active at a time */
+ struct ieee80211_key_conf *ap_early_keys[4];
};

static inline struct iwl_mvm_vif *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 0c2aabc842f9..96f8d38ea321 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -726,6 +726,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)

memcpy(&info, skb->cb, sizeof(info));

+ if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
+ return -1;
+
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
return -1;

diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 1e36459948db..504c535309d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -168,7 +168,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,

memcpy(iml_img, trans->iml, trans->iml_len);

- iwl_enable_interrupts(trans);
+ iwl_enable_fw_load_int_ctx_info(trans);

/* kick FW self load */
iwl_write64(trans, CSR_CTXT_INFO_ADDR,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 9274e317cc77..eeb349134056 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -222,7 +222,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,

trans_pcie->ctxt_info = ctxt_info;

- iwl_enable_interrupts(trans);
+ iwl_enable_fw_load_int_ctx_info(trans);

/* Configure debug, if exists */
if (iwl_pcie_dbg_on(trans))
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 2afce5c41322..98128024b95e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -894,6 +894,33 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}

+static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
+
+ if (!trans_pcie->msix_enabled) {
+ /*
+ * When we'll receive the ALIVE interrupt, the ISR will call
+ * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
+ * interrupt (which is not really needed anymore) but also the
+ * RX interrupt which will allow us to receive the ALIVE
+ * notification (which is Rx) and continue the flow.
+ */
+ trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ iwl_enable_hw_int_msk_msix(trans,
+ MSIX_HW_INT_CAUSES_REG_ALIVE);
+ /*
+ * Leave all the FH causes enabled to get the ALIVE
+ * notification.
+ */
+ iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
+ }
+}
+
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 12f02aaf923e..8c124debf53a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1832,26 +1832,26 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
goto out;
}

- if (iwl_have_debug_level(IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(trans,
- "Scheduler finished to transmit the frame/frames.\n");
- isr_stats->sch++;
- }
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(trans,
+ "Scheduler finished to transmit the frame/frames.\n");
+ isr_stats->sch++;
+ }

- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(trans, "Alive interrupt\n");
- isr_stats->alive++;
- if (trans->cfg->gen2) {
- /*
- * We can restock, since firmware configured
- * the RFH
- */
- iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
- }
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+ isr_stats->alive++;
+ if (trans->cfg->gen2) {
+ /*
+ * We can restock, since firmware configured
+ * the RFH
+ */
+ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
}
+
+ handled |= CSR_INT_BIT_ALIVE;
}

/* Safely ignore these bits for debug checks below */
@@ -1970,6 +1970,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
+ /* Re-enable the ALIVE / Rx interrupt if it occurred */
+ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
+ iwl_enable_fw_load_int_ctx_info(trans);
spin_unlock(&trans_pcie->irq_lock);

out:
@@ -2113,10 +2116,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
return IRQ_NONE;
}

- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
- inta_fh,
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ inta_fh, trans_pcie->fh_mask,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+ if (inta_fh & ~trans_pcie->fh_mask)
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta_fh & ~trans_pcie->fh_mask);
+ }
+
+ inta_fh &= trans_pcie->fh_mask;

if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
inta_fh & MSIX_FH_INT_CAUSES_Q0) {
@@ -2156,11 +2167,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}

/* After checking FH register check HW register */
- if (iwl_have_debug_level(IWL_DL_ISR))
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
IWL_DEBUG_ISR(trans,
- "ISR inta_hw 0x%08x, enabled 0x%08x\n",
- inta_hw,
+ "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ inta_hw, trans_pcie->hw_mask,
iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
+ if (inta_hw & ~trans_pcie->hw_mask)
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt 0x%08x\n",
+ inta_hw & ~trans_pcie->hw_mask);
+ }
+
+ inta_hw &= trans_pcie->hw_mask;

/* Alive notification via Rx interrupt will do the real work */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 9c203ca75de9..8e0b40c4f4a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -272,6 +272,15 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
* paging memory cannot be freed included since FW will still use it
*/
iwl_pcie_ctxt_info_free(trans);
+
+ /*
+ * Re-enable all the interrupts, including the RF-Kill one, now that
+ * the firmware is alive.
+ */
+ iwl_enable_interrupts(trans);
+ mutex_lock(&trans_pcie->mutex);
+ iwl_pcie_check_hw_rf_kill(trans);
+ mutex_unlock(&trans_pcie->mutex);
}

int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 80695584e406..8ccfe0226818 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3562,9 +3562,11 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
- if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
+ (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index f7edeffb2b19..401444f36402 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -193,10 +193,23 @@ static void mt7601u_complete_rx(struct urb *urb)
struct mt7601u_rx_queue *q = &dev->rx_q;
unsigned long flags;

- spin_lock_irqsave(&dev->rx_lock, flags);
+ /* do no schedule rx tasklet if urb has been unlinked
+ * or the device has been removed
+ */
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+ urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }

- if (mt7601u_urb_has_error(urb))
- dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+ spin_lock_irqsave(&dev->rx_lock, flags);
if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
goto out;

@@ -228,14 +241,25 @@ static void mt7601u_complete_tx(struct urb *urb)
struct sk_buff *skb;
unsigned long flags;

- spin_lock_irqsave(&dev->tx_lock, flags);
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
+ urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }

- if (mt7601u_urb_has_error(urb))
- dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+ spin_lock_irqsave(&dev->tx_lock, flags);
if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
goto out;

skb = q->e[q->start].skb;
+ q->e[q->start].skb = NULL;
trace_mt_tx_dma_done(dev, skb);

__skb_queue_tail(&dev->tx_skb_done, skb);
@@ -363,19 +387,9 @@ int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
static void mt7601u_kill_rx(struct mt7601u_dev *dev)
{
int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rx_lock, flags);
-
- for (i = 0; i < dev->rx_q.entries; i++) {
- int next = dev->rx_q.end;

- spin_unlock_irqrestore(&dev->rx_lock, flags);
- usb_poison_urb(dev->rx_q.e[next].urb);
- spin_lock_irqsave(&dev->rx_lock, flags);
- }
-
- spin_unlock_irqrestore(&dev->rx_lock, flags);
+ for (i = 0; i < dev->rx_q.entries; i++)
+ usb_poison_urb(dev->rx_q.e[i].urb);
}

static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
@@ -445,10 +459,10 @@ static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
{
int i;

- WARN_ON(q->used);
-
for (i = 0; i < q->entries; i++) {
usb_poison_urb(q->e[i].urb);
+ if (q->e[i].skb)
+ mt7601u_tx_status(q->dev, q->e[i].skb);
usb_free_urb(q->e[i].urb);
}
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index 3600e911a63e..4d81c45722fb 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -117,9 +117,9 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;

- spin_lock(&dev->mac_lock);
+ spin_lock_bh(&dev->mac_lock);
ieee80211_tx_status(dev->hw, skb);
- spin_unlock(&dev->mac_lock);
+ spin_unlock_bh(&dev->mac_lock);
}

static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 086aad22743d..139bcdb42536 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -367,14 +367,9 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
struct queue_entry *entry = (struct queue_entry *)urb->context;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;

- if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;

- /*
- * Report the frame as DMA done
- */
- rt2x00lib_dmadone(entry);
-
/*
* Check if the received data is simply too small
* to be actually valid, or if the urb is signaling
@@ -383,6 +378,11 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
if (urb->actual_length < entry->queue->desc_size || urb->status)
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);

+ /*
+ * Report the frame as DMA done
+ */
+ rt2x00lib_dmadone(entry);
+
/*
* Schedule the delayed work for reading the RX status
* from the device.
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index e24fda5e9087..34d68dbf4b4c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1064,13 +1064,13 @@ int rtl_usb_probe(struct usb_interface *intf,
rtlpriv->cfg->ops->read_eeprom_info(hw);
err = _rtl_usb_init(hw);
if (err)
- goto error_out;
+ goto error_out2;
rtl_usb_init_sw(hw);
/* Init mac80211 sw */
err = rtl_init_core(hw);
if (err) {
pr_err("Can't allocate sw for mac80211\n");
- goto error_out;
+ goto error_out2;
}
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
pr_err("Can't init_sw_vars\n");
@@ -1091,6 +1091,7 @@ int rtl_usb_probe(struct usb_interface *intf,

error_out:
rtl_deinit_core(hw);
+error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
complete(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 0453f49dc708..326f02ffca81 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -126,7 +126,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_unlock(&ndns->dev);
if (!dax_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, DAX_SIG);
dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index dde9853453d3..e901e3a3b04c 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -36,6 +36,7 @@ struct nd_pfn_sb {
__le32 end_trunc;
/* minor-version-2 record the base alignment of the mapping */
__le32 align;
+ /* minor-version-3 guarantee the padding and flags are zero */
u8 padding[4000];
__le64 checksum;
};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index d271bd731af7..f0e918186504 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -420,6 +420,15 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
return 0;
}

+/**
+ * nd_pfn_validate - read and validate info-block
+ * @nd_pfn: fsdax namespace runtime state / properties
+ * @sig: 'devdax' or 'fsdax' signature
+ *
+ * Upon return the info-block buffer contents (->pfn_sb) are
+ * indeterminate when validation fails, and a coherent info-block
+ * otherwise.
+ */
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
@@ -565,7 +574,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_unlock(&ndns->dev);
if (!pfn_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, PFN_SIG);
@@ -702,7 +711,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
u64 checksum;
int rc;

- pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
if (!pfn_sb)
return -ENOMEM;

@@ -711,11 +720,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
sig = DAX_SIG;
else
sig = PFN_SIG;
+
rc = nd_pfn_validate(nd_pfn, sig);
if (rc != -ENODEV)
return rc;

/* no info block, do init */;
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+
nd_region = to_nd_region(nd_pfn->dev.parent);
if (nd_region->ro) {
dev_info(&nd_pfn->dev,
@@ -768,7 +780,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
- pfn_sb->version_minor = cpu_to_le16(2);
+ pfn_sb->version_minor = cpu_to_le16(3);
pfn_sb->start_pad = cpu_to_le32(start_pad);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3a390b2c7540..cbbdd3dae5a1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3341,6 +3341,14 @@ static void nvme_ns_remove(struct nvme_ns *ns)
return;

nvme_fault_inject_fini(ns);
+
+ mutex_lock(&ns->ctrl->subsys->lock);
+ list_del_rcu(&ns->siblings);
+ mutex_unlock(&ns->ctrl->subsys->lock);
+ synchronize_rcu(); /* guarantee not available in head->list */
+ nvme_mpath_clear_current_path(ns);
+ synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
+
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
@@ -3348,16 +3356,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_integrity_unregister(ns->disk);
}

- mutex_lock(&ns->ctrl->subsys->lock);
- list_del_rcu(&ns->siblings);
- nvme_mpath_clear_current_path(ns);
- mutex_unlock(&ns->ctrl->subsys->lock);
-
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem);

- synchronize_srcu(&ns->head->srcu);
nvme_mpath_check_last_path(ns);
nvme_put_ns(ns);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 693f2a856200..914eea2ea557 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2085,6 +2085,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev,
};
unsigned int irq_queues, this_p_queues;
+ unsigned int nr_cpus = num_possible_cpus();

/*
* Poll queues don't need interrupts, but we need at least one IO
@@ -2095,7 +2096,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
this_p_queues = nr_io_queues - 1;
irq_queues = 1;
} else {
- irq_queues = nr_io_queues - this_p_queues + 1;
+ if (nr_cpus < nr_io_queues - this_p_queues)
+ irq_queues = nr_cpus + 1;
+ else
+ irq_queues = nr_io_queues - this_p_queues + 1;
}
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;

@@ -2504,11 +2508,13 @@ static void nvme_reset_work(struct work_struct *work)
struct nvme_dev *dev =
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
- int result = -ENODEV;
+ int result;
enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;

- if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
+ if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
+ result = -ENODEV;
goto out;
+ }

/*
* If we're called to reset a live controller first shut it down before
@@ -2545,6 +2551,7 @@ static void nvme_reset_work(struct work_struct *work)
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
dev_warn(dev->ctrl.device,
"failed to mark controller CONNECTING\n");
+ result = -EBUSY;
goto out;
}

@@ -2605,6 +2612,7 @@ static void nvme_reset_work(struct work_struct *work)
if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
dev_warn(dev->ctrl.device,
"failed to mark controller state %d\n", new_state);
+ result = -ENODEV;
goto out;
}

diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 0420f7e8ad5b..f5d565804e52 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -631,7 +631,7 @@ static int _set_opp_custom(const struct opp_table *opp_table,

data->old_opp.rate = old_freq;
size = sizeof(*old_supply) * opp_table->regulator_count;
- if (IS_ERR(old_supply))
+ if (!old_supply)
memset(data->old_opp.supplies, 0, size);
else
memcpy(data->old_opp.supplies, old_supply, size);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index a7f703556790..e292801fff7f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)

static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
+ /* Ensure that PERST has been asserted for at least 100 ms */
+ msleep(100);
gpiod_set_value_cansleep(pcie->reset, 0);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 82acd6155adf..40b625458afa 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1875,6 +1875,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
static void hv_eject_device_work(struct work_struct *work)
{
struct pci_eject_response *ejct_pkt;
+ struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
struct pci_dev *pdev;
unsigned long flags;
@@ -1885,6 +1886,7 @@ static void hv_eject_device_work(struct work_struct *work)
} ctxt;

hpdev = container_of(work, struct hv_pci_dev, wrk);
+ hbus = hpdev->hbus;

WARN_ON(hpdev->state != hv_pcichild_ejecting);

@@ -1895,8 +1897,7 @@ static void hv_eject_device_work(struct work_struct *work)
* because hbus->pci_bus may not exist yet.
*/
wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
- pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
- wslot);
+ pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
if (pdev) {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
@@ -1904,9 +1905,9 @@ static void hv_eject_device_work(struct work_struct *work)
pci_unlock_rescan_remove();
}

- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
list_del(&hpdev->list_entry);
- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);

if (hpdev->pci_slot)
pci_destroy_slot(hpdev->pci_slot);
@@ -1915,7 +1916,7 @@ static void hv_eject_device_work(struct work_struct *work)
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
- vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
+ vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);

@@ -1924,7 +1925,9 @@ static void hv_eject_device_work(struct work_struct *work)
/* For the two refs got in new_pcichild_device() */
put_pcichild(hpdev);
put_pcichild(hpdev);
- put_hvpcibus(hpdev->hbus);
+ /* hpdev has been freed. Do not use it any more. */
+
+ put_hvpcibus(hbus);
}

/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 766f5779db92..b1e923cdef30 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -999,15 +999,10 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D0) {
pci_platform_power_transition(dev, PCI_D0);
/*
- * Mandatory power management transition delays, see
- * PCI Express Base Specification Revision 2.0 Section
- * 6.6.1: Conventional Reset. Do not delay for
- * devices powered on/off by corresponding bridge,
- * because have already delayed for the bridge.
+ * Mandatory power management transition delays are
+ * handled in the PCIe portdrv resume hooks.
*/
if (dev->runtime_d3cold) {
- if (dev->d3cold_delay && !dev->imm_ready)
- msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
* whole hierarchy may be powered on into
@@ -2057,6 +2052,13 @@ static void pci_pme_list_scan(struct work_struct *work)
*/
if (bridge && bridge->current_state != PCI_D0)
continue;
+ /*
+ * If the device is in D3cold it should not be
+ * polled either.
+ */
+ if (pme_dev->dev->current_state == PCI_D3cold)
+ continue;
+
pci_pme_wakeup(pme_dev->dev, NULL);
} else {
list_del(&pme_dev->list);
@@ -4581,14 +4583,16 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)

return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
+
/**
- * pcie_wait_for_link - Wait until link is active or inactive
+ * pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
{
int timeout = 1000;
bool ret;
@@ -4625,13 +4629,25 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
timeout -= 10;
}
if (active && ret)
- msleep(100);
+ msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}

+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ return pcie_wait_for_link_delay(pdev, active, 100);
+}
+
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9cb99380c61e..59802b3def4b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -493,6 +493,7 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
u32 service);

+bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 1b330129089f..308c3e0c4a34 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -378,6 +379,67 @@ static int pm_iter(struct device *dev, void *data)
return 0;
}

+static int get_downstream_delay(struct pci_bus *bus)
+{
+ struct pci_dev *pdev;
+ int min_delay = 100;
+ int max_delay = 0;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (!pdev->imm_ready)
+ min_delay = 0;
+ else if (pdev->d3cold_delay < min_delay)
+ min_delay = pdev->d3cold_delay;
+ if (pdev->d3cold_delay > max_delay)
+ max_delay = pdev->d3cold_delay;
+ }
+
+ return max(min_delay, max_delay);
+}
+
+/*
+ * wait_for_downstream_link - Wait for downstream link to establish
+ * @pdev: PCIe port whose downstream link is waited
+ *
+ * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
+ * access to the downstream component is permitted.
+ *
+ * This blocks PCI core resume of the hierarchy below this port until the
+ * link is trained. Should be called before resuming port services to
+ * prevent pciehp from starting to tear-down the hierarchy too soon.
+ */
+static void wait_for_downstream_link(struct pci_dev *pdev)
+{
+ int delay;
+
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ if (pci_dev_is_disconnected(pdev))
+ return;
+
+ if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
+ !pdev->bridge_d3)
+ return;
+
+ delay = get_downstream_delay(pdev->subordinate);
+ if (!delay)
+ return;
+
+ dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
+
+ /*
+ * If downstream port does not support speeds greater than 5 GT/s
+ * need to wait 100ms. For higher speeds (gen3) we need to wait
+ * first for the data link layer to become active.
+ */
+ if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
+ msleep(delay);
+ else
+ pcie_wait_for_link_delay(pdev, true, delay);
+}
+
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
@@ -391,6 +453,8 @@ int pcie_port_device_suspend(struct device *dev)
int pcie_port_device_resume_noirq(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
+
+ wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}

@@ -421,6 +485,8 @@ int pcie_port_device_runtime_suspend(struct device *dev)
int pcie_port_device_runtime_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
+
+ wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
#endif /* PM */
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index f85d6b7a1984..5d2b2c02cbbe 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -369,7 +369,9 @@ static int pfn_set(void *data, u64 val)
{
*(u64 *)data = val;

- return cec_add_elem(val);
+ cec_add_elem(val);
+
+ return 0;
}

DEFINE_DEBUGFS_ATTRIBUTE(pfn_ops, u64_get, pfn_set, "0x%llx\n");
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 4d7fe4819c1c..4e95e3d0fcd5 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -299,6 +299,8 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"da9211-enable");
+ if (IS_ERR(pdata->gpiod_ren[n]))
+ pdata->gpiod_ren[n] = NULL;
n++;
}

diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 134c62db36c5..8812c2c3cfc2 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -372,8 +372,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV),
regulator_desc_s2mps11_buck9,
regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
};
@@ -821,9 +821,12 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s2mps11-regulator");
- if (IS_ERR(gpio[reg])) {
+ if (PTR_ERR(gpio[reg]) == -ENOENT)
+ gpio[reg] = NULL;
+ else if (IS_ERR(gpio[reg])) {
dev_err(&pdev->dev, "Failed to get control GPIO for %d/%s\n",
reg, rdata[reg].name);
+ gpio[reg] = NULL;
continue;
}
if (gpio[reg])
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index bb9d1a083299..6ca27e9d5ef7 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -574,7 +574,9 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s5m8767");
- if (IS_ERR(rdata->ext_control_gpiod))
+ if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
+ rdata->ext_control_gpiod = NULL;
+ else if (IS_ERR(rdata->ext_control_gpiod))
return PTR_ERR(rdata->ext_control_gpiod);

rdata->id = i;
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 0614551796a1..f6466db57900 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -381,11 +381,12 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
"dcdc-ext-control-gpios", 0,
gflags,
"tps65090");
- if (IS_ERR(rpdata->gpiod))
- return ERR_CAST(rpdata->gpiod);
- if (!rpdata->gpiod)
+ if (PTR_ERR(rpdata->gpiod) == -ENOENT) {
dev_err(&pdev->dev,
"could not find DCDC external control GPIO\n");
+ rpdata->gpiod = NULL;
+ } else if (IS_ERR(rpdata->gpiod))
+ return ERR_CAST(rpdata->gpiod);
}

if (of_property_read_u32(tps65090_matches[idx].of_node,
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9537e656e927..06b94b2ee199 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -738,6 +738,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)

switch (state) {
case SLSB_P_OUTPUT_EMPTY:
+ case SLSB_P_OUTPUT_PENDING:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index d94496ee6883..296bbc3c4606 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/blktrace_api.h>
+#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
#include "zfcp_ext.h"
@@ -741,6 +742,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,

static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
+ const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
int req_id = req->req_id;
@@ -757,8 +759,20 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
return -EIO;
}

+ /*
+ * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
+ * ONLY TOUCH SYNC req AGAIN ON req->completion.
+ *
+ * The request might complete and be freed concurrently at any point
+ * now. This is not protected by the QDIO-lock (req_q_lock). So any
+ * uncontrolled access after this might result in an use-after-free bug.
+ * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
+ * when it is completed via req->completion, is it safe to use req
+ * again.
+ */
+
/* Don't increase for unsolicited status */
- if (!zfcp_fsf_req_is_status_read_buffer(req))
+ if (!is_srb)
adapter->fsf_req_seq_no++;
adapter->req_no++;

@@ -805,6 +819,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
retval = zfcp_fsf_req_send(req);
if (retval)
goto failed_req_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */

goto out;

@@ -914,8 +929,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
req->qtcb->bottom.support.req_handle = (u64) old_req_id;

zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
- if (!zfcp_fsf_req_send(req))
+ if (!zfcp_fsf_req_send(req)) {
+ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
+ }

out_error_free:
zfcp_fsf_req_free(req);
@@ -1098,6 +1115,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */

goto out;

@@ -1198,6 +1216,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */

goto out;

@@ -1243,6 +1262,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1279,8 +1299,10 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
- if (!retval)
+ if (!retval) {
+ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+ }

zfcp_fsf_req_free(req);
return retval;
@@ -1330,6 +1352,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1372,8 +1395,10 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);

- if (!retval)
+ if (!retval) {
+ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+ }

zfcp_fsf_req_free(req);

@@ -1493,6 +1518,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
erp_action->fsf_req_id = 0;
put_device(&port->dev);
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1557,6 +1583,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1600,6 +1627,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
+ unsigned long req_id = 0;
int retval = -EIO;

spin_lock_irq(&qdio->req_q_lock);
@@ -1622,14 +1650,17 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
req->data = wka_port;

+ req_id = req->req_id;
+
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
- zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
return retval;
}

@@ -1655,6 +1686,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
+ unsigned long req_id = 0;
int retval = -EIO;

spin_lock_irq(&qdio->req_q_lock);
@@ -1677,14 +1709,17 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
req->data = wka_port;
req->qtcb->header.port_handle = wka_port->handle;

+ req_id = req->req_id;
+
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
- zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
return retval;
}

@@ -1776,6 +1811,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1899,6 +1935,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -1987,6 +2024,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
@@ -2299,6 +2337,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
goto failed_scsi_cmnd;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */

goto out;

@@ -2373,8 +2412,10 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);

zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
- if (!zfcp_fsf_req_send(req))
+ if (!zfcp_fsf_req_send(req)) {
+ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
+ }

zfcp_fsf_req_free(req);
req = NULL;
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 01c23d27f290..b1a663d67aea 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -710,6 +710,8 @@ static void NCR5380_main(struct work_struct *work)
NCR5380_information_transfer(instance);
done = 0;
}
+ if (!hostdata->connected)
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
spin_unlock_irq(&hostdata->lock);
if (!done)
cond_resched();
@@ -1111,8 +1113,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_reselect(instance);
- if (!hostdata->connected)
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
goto out;
}
@@ -1120,7 +1120,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
if (err < 0) {
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);

/* Can't touch cmd if it has been reclaimed by the scsi ML */
if (!hostdata->selecting)
@@ -1158,7 +1157,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
if (err < 0) {
shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
goto out;
}
if (!hostdata->selecting) {
@@ -1764,10 +1762,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
scmd_printk(KERN_INFO, cmd,
"switching to slow handshake\n");
cmd->device->borken = 1;
- sink = 1;
- do_abort(instance);
- cmd->result = DID_ERROR << 16;
- /* XXX - need to source or sink data here, as appropriate */
+ do_reset(instance);
+ bus_reset_cleanup(instance);
}
} else {
/* Transfer a small chunk so that the
@@ -1827,9 +1823,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
NCR5380_write(TARGET_COMMAND_REG, 0);

- /* Enable reselect interrupts */
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-
maybe_release_dma_irq(instance);
return;
case MESSAGE_REJECT:
@@ -1861,8 +1854,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
NCR5380_write(TARGET_COMMAND_REG, 0);

- /* Enable reselect interrupts */
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -1965,7 +1956,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
maybe_release_dma_irq(instance);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return;
}
msgout = NOP;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index efca509b92b0..5935fd6d1a05 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -235,7 +235,7 @@ struct NCR5380_cmd {
#define NCR5380_PIO_CHUNK_SIZE 256

/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
-#define NCR5380_REG_POLL_TIME 15
+#define NCR5380_REG_POLL_TIME 10

static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
{
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 8b4b5b1a13d7..27364b71e833 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -3,6 +3,8 @@
*
* Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
*
+ * Copyright 2019 Finn Thain
+ *
* derived in part from:
*/
/*
@@ -11,6 +13,7 @@
* Copyright 1995, Russell King
*/

+#include <linux/delay.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/ioport.h>
@@ -52,7 +55,7 @@ static int setup_cmd_per_lun = -1;
module_param(setup_cmd_per_lun, int, 0);
static int setup_sg_tablesize = -1;
module_param(setup_sg_tablesize, int, 0);
-static int setup_use_pdma = -1;
+static int setup_use_pdma = 512;
module_param(setup_use_pdma, int, 0);
static int setup_hostid = -1;
module_param(setup_hostid, int, 0);
@@ -89,101 +92,217 @@ static int __init mac_scsi_setup(char *str)
__setup("mac5380=", mac_scsi_setup);
#endif /* !MODULE */

-/* Pseudo DMA asm originally by Ove Edlund */
-
-#define CP_IO_TO_MEM(s,d,n) \
-__asm__ __volatile__ \
- (" cmp.w #4,%2\n" \
- " bls 8f\n" \
- " move.w %1,%%d0\n" \
- " neg.b %%d0\n" \
- " and.w #3,%%d0\n" \
- " sub.w %%d0,%2\n" \
- " bra 2f\n" \
- " 1: move.b (%0),(%1)+\n" \
- " 2: dbf %%d0,1b\n" \
- " move.w %2,%%d0\n" \
- " lsr.w #5,%%d0\n" \
- " bra 4f\n" \
- " 3: move.l (%0),(%1)+\n" \
- "31: move.l (%0),(%1)+\n" \
- "32: move.l (%0),(%1)+\n" \
- "33: move.l (%0),(%1)+\n" \
- "34: move.l (%0),(%1)+\n" \
- "35: move.l (%0),(%1)+\n" \
- "36: move.l (%0),(%1)+\n" \
- "37: move.l (%0),(%1)+\n" \
- " 4: dbf %%d0,3b\n" \
- " move.w %2,%%d0\n" \
- " lsr.w #2,%%d0\n" \
- " and.w #7,%%d0\n" \
- " bra 6f\n" \
- " 5: move.l (%0),(%1)+\n" \
- " 6: dbf %%d0,5b\n" \
- " and.w #3,%2\n" \
- " bra 8f\n" \
- " 7: move.b (%0),(%1)+\n" \
- " 8: dbf %2,7b\n" \
- " moveq.l #0, %2\n" \
- " 9: \n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "91: moveq.l #1, %2\n" \
- " jra 9b\n" \
- "94: moveq.l #4, %2\n" \
- " jra 9b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,91b\n" \
- " .long 3b,94b\n" \
- " .long 31b,94b\n" \
- " .long 32b,94b\n" \
- " .long 33b,94b\n" \
- " .long 34b,94b\n" \
- " .long 35b,94b\n" \
- " .long 36b,94b\n" \
- " .long 37b,94b\n" \
- " .long 5b,94b\n" \
- " .long 7b,91b\n" \
- ".previous" \
- : "=a"(s), "=a"(d), "=d"(n) \
- : "0"(s), "1"(d), "2"(n) \
- : "d0")
+/*
+ * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to
+ * specify the number of bytes between the delays expected from a SCSI target.
+ * This allows the operating system to "prevent bus errors when a target fails
+ * to deliver the next byte within the processor bus error timeout period."
+ * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
+ * so bus errors are unavoidable.
+ *
+ * If a MOVE.B instruction faults, we assume that zero bytes were transferred
+ * and simply retry. That assumption probably depends on target behaviour but
+ * seems to hold up okay. The NOP provides synchronization: without it the
+ * fault can sometimes occur after the program counter has moved past the
+ * offending instruction. Post-increment addressing can't be used.
+ */
+
+#define MOVE_BYTE(operands) \
+ asm volatile ( \
+ "1: moveb " operands " \n" \
+ "11: nop \n" \
+ " addq #1,%0 \n" \
+ " subq #1,%1 \n" \
+ "40: \n" \
+ " \n" \
+ ".section .fixup,\"ax\" \n" \
+ ".even \n" \
+ "90: movel #1, %2 \n" \
+ " jra 40b \n" \
+ ".previous \n" \
+ " \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 4 \n" \
+ ".long 1b,90b \n" \
+ ".long 11b,90b \n" \
+ ".previous \n" \
+ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+/*
+ * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because
+ * the residual byte count would be uncertain. In that situation the MOVE_WORD
+ * macro clears n in the fixup section to abort the transfer.
+ */
+
+#define MOVE_WORD(operands) \
+ asm volatile ( \
+ "1: movew " operands " \n" \
+ "11: nop \n" \
+ " subq #2,%1 \n" \
+ "40: \n" \
+ " \n" \
+ ".section .fixup,\"ax\" \n" \
+ ".even \n" \
+ "90: movel #0, %1 \n" \
+ " movel #2, %2 \n" \
+ " jra 40b \n" \
+ ".previous \n" \
+ " \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 4 \n" \
+ ".long 1b,90b \n" \
+ ".long 11b,90b \n" \
+ ".previous \n" \
+ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+#define MOVE_16_WORDS(operands) \
+ asm volatile ( \
+ "1: movew " operands " \n" \
+ "2: movew " operands " \n" \
+ "3: movew " operands " \n" \
+ "4: movew " operands " \n" \
+ "5: movew " operands " \n" \
+ "6: movew " operands " \n" \
+ "7: movew " operands " \n" \
+ "8: movew " operands " \n" \
+ "9: movew " operands " \n" \
+ "10: movew " operands " \n" \
+ "11: movew " operands " \n" \
+ "12: movew " operands " \n" \
+ "13: movew " operands " \n" \
+ "14: movew " operands " \n" \
+ "15: movew " operands " \n" \
+ "16: movew " operands " \n" \
+ "17: nop \n" \
+ " subl #32,%1 \n" \
+ "40: \n" \
+ " \n" \
+ ".section .fixup,\"ax\" \n" \
+ ".even \n" \
+ "90: movel #0, %1 \n" \
+ " movel #2, %2 \n" \
+ " jra 40b \n" \
+ ".previous \n" \
+ " \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 4 \n" \
+ ".long 1b,90b \n" \
+ ".long 2b,90b \n" \
+ ".long 3b,90b \n" \
+ ".long 4b,90b \n" \
+ ".long 5b,90b \n" \
+ ".long 6b,90b \n" \
+ ".long 7b,90b \n" \
+ ".long 8b,90b \n" \
+ ".long 9b,90b \n" \
+ ".long 10b,90b \n" \
+ ".long 11b,90b \n" \
+ ".long 12b,90b \n" \
+ ".long 13b,90b \n" \
+ ".long 14b,90b \n" \
+ ".long 15b,90b \n" \
+ ".long 16b,90b \n" \
+ ".long 17b,90b \n" \
+ ".previous \n" \
+ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+#define MAC_PDMA_DELAY 32
+
+static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
+{
+ unsigned char *addr = start;
+ int result = 0;
+
+ if (n >= 1) {
+ MOVE_BYTE("%3@,%0@");
+ if (result)
+ goto out;
+ }
+ if (n >= 1 && ((unsigned long)addr & 1)) {
+ MOVE_BYTE("%3@,%0@");
+ if (result)
+ goto out;
+ }
+ while (n >= 32)
+ MOVE_16_WORDS("%3@,%0@+");
+ while (n >= 2)
+ MOVE_WORD("%3@,%0@+");
+ if (result)
+ return start - addr; /* Negated to indicate uncertain length */
+ if (n == 1)
+ MOVE_BYTE("%3@,%0@");
+out:
+ return addr - start;
+}
+
+static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
+{
+ unsigned char *addr = start;
+ int result = 0;
+
+ if (n >= 1) {
+ MOVE_BYTE("%0@,%3@");
+ if (result)
+ goto out;
+ }
+ if (n >= 1 && ((unsigned long)addr & 1)) {
+ MOVE_BYTE("%0@,%3@");
+ if (result)
+ goto out;
+ }
+ while (n >= 32)
+ MOVE_16_WORDS("%0@+,%3@");
+ while (n >= 2)
+ MOVE_WORD("%0@+,%3@");
+ if (result)
+ return start - addr; /* Negated to indicate uncertain length */
+ if (n == 1)
+ MOVE_BYTE("%0@,%3@");
+out:
+ return addr - start;
+}

static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
- int n = len;
- int transferred;
+
+ hostdata->pdma_residual = len;

while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
- CP_IO_TO_MEM(s, d, n);
+ int bytes;

- transferred = d - dst - n;
- hostdata->pdma_residual = len - transferred;
+ bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));

- /* No bus error. */
- if (n == 0)
+ if (bytes > 0) {
+ d += bytes;
+ hostdata->pdma_residual -= bytes;
+ }
+
+ if (hostdata->pdma_residual == 0)
return 0;

- /* Target changed phase early? */
if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
- scmd_printk(KERN_ERR, hostdata->connected,
+ BUS_AND_STATUS_REG, BASR_ACK,
+ BASR_ACK, HZ / 64) < 0)
+ scmd_printk(KERN_DEBUG, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
return 0;

+ if (bytes == 0)
+ udelay(MAC_PDMA_DELAY);
+
+ if (bytes >= 0)
+ continue;
+
dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
- "%s: bus error (%d/%d)\n", __func__, transferred, len);
+ "%s: bus error (%d/%d)\n", __func__, d - dst, len);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- d = dst + transferred;
- n = len - transferred;
+ return -1;
}

scmd_printk(KERN_ERR, hostdata->connected,
@@ -192,93 +311,27 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
return -1;
}

-
-#define CP_MEM_TO_IO(s,d,n) \
-__asm__ __volatile__ \
- (" cmp.w #4,%2\n" \
- " bls 8f\n" \
- " move.w %0,%%d0\n" \
- " neg.b %%d0\n" \
- " and.w #3,%%d0\n" \
- " sub.w %%d0,%2\n" \
- " bra 2f\n" \
- " 1: move.b (%0)+,(%1)\n" \
- " 2: dbf %%d0,1b\n" \
- " move.w %2,%%d0\n" \
- " lsr.w #5,%%d0\n" \
- " bra 4f\n" \
- " 3: move.l (%0)+,(%1)\n" \
- "31: move.l (%0)+,(%1)\n" \
- "32: move.l (%0)+,(%1)\n" \
- "33: move.l (%0)+,(%1)\n" \
- "34: move.l (%0)+,(%1)\n" \
- "35: move.l (%0)+,(%1)\n" \
- "36: move.l (%0)+,(%1)\n" \
- "37: move.l (%0)+,(%1)\n" \
- " 4: dbf %%d0,3b\n" \
- " move.w %2,%%d0\n" \
- " lsr.w #2,%%d0\n" \
- " and.w #7,%%d0\n" \
- " bra 6f\n" \
- " 5: move.l (%0)+,(%1)\n" \
- " 6: dbf %%d0,5b\n" \
- " and.w #3,%2\n" \
- " bra 8f\n" \
- " 7: move.b (%0)+,(%1)\n" \
- " 8: dbf %2,7b\n" \
- " moveq.l #0, %2\n" \
- " 9: \n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "91: moveq.l #1, %2\n" \
- " jra 9b\n" \
- "94: moveq.l #4, %2\n" \
- " jra 9b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,91b\n" \
- " .long 3b,94b\n" \
- " .long 31b,94b\n" \
- " .long 32b,94b\n" \
- " .long 33b,94b\n" \
- " .long 34b,94b\n" \
- " .long 35b,94b\n" \
- " .long 36b,94b\n" \
- " .long 37b,94b\n" \
- " .long 5b,94b\n" \
- " .long 7b,91b\n" \
- ".previous" \
- : "=a"(s), "=a"(d), "=d"(n) \
- : "0"(s), "1"(d), "2"(n) \
- : "d0")
-
static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *src, int len)
{
unsigned char *s = src;
u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
- int n = len;
- int transferred;
+
+ hostdata->pdma_residual = len;

while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
- CP_MEM_TO_IO(s, d, n);
+ int bytes;

- transferred = s - src - n;
- hostdata->pdma_residual = len - transferred;
+ bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));

- /* Target changed phase early? */
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
- scmd_printk(KERN_ERR, hostdata->connected,
- "%s: !REQ and !ACK\n", __func__);
- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
- return 0;
+ if (bytes > 0) {
+ s += bytes;
+ hostdata->pdma_residual -= bytes;
+ }

- /* No bus error. */
- if (n == 0) {
+ if (hostdata->pdma_residual == 0) {
if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT,
TCR_LAST_BYTE_SENT, HZ / 64) < 0)
@@ -287,17 +340,29 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
return 0;
}

+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+ BUS_AND_STATUS_REG, BASR_ACK,
+ BASR_ACK, HZ / 64) < 0)
+ scmd_printk(KERN_DEBUG, hostdata->connected,
+ "%s: !REQ and !ACK\n", __func__);
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+ return 0;
+
+ if (bytes == 0)
+ udelay(MAC_PDMA_DELAY);
+
+ if (bytes >= 0)
+ continue;
+
dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
- "%s: bus error (%d/%d)\n", __func__, transferred, len);
+ "%s: bus error (%d/%d)\n", __func__, s - src, len);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- s = src + transferred;
- n = len - transferred;
+ return -1;
}

scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-
return -1;
}

@@ -305,7 +370,7 @@ static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
- cmd->SCp.this_residual < 16)
+ cmd->SCp.this_residual < setup_use_pdma)
return 0;

return cmd->SCp.this_residual;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 293f5cf524d7..2166df5e5cb8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6168,7 +6168,8 @@ megasas_get_target_prop(struct megasas_instance *instance,
int ret;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- u16 targetId = (sdev->channel % 2) + sdev->id;
+ u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;

cmd = megasas_get_cmd(instance);

diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 07dfc17d4824..00a6650dfefd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -71,11 +71,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
struct kmem_cache *cache;
int ret = 0;

+ mutex_lock(&scsi_sense_cache_mutex);
cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
if (cache)
- return 0;
+ goto exit;

- mutex_lock(&scsi_sense_cache_mutex);
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
@@ -91,7 +91,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
if (!scsi_sense_cache)
ret = -ENOMEM;
}
-
+ exit:
mutex_unlock(&scsi_sense_cache_mutex);
return ret;
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index a340af797a85..bd70843940dc 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -431,7 +431,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
struct gendisk *disk = sdkp->disk;
unsigned int nr_zones;
- u32 zone_blocks;
+ u32 zone_blocks = 0;
int ret;

if (!sd_is_zoned(sdkp))
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 3912526ead66..19f6a76f1c07 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -425,7 +425,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
.direction = DMA_MEM_TO_DEV,
.dst_addr = rs->dma_addr_tx,
.dst_addr_width = rs->n_bytes,
- .dst_maxburst = rs->fifo_len / 2,
+ .dst_maxburst = rs->fifo_len / 4,
};

dmaengine_slave_config(master->dma_tx, &txconf);
@@ -526,7 +526,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
else
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);

- writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);

diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a83fcddf1dad..7f6fb383d7a7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2281,11 +2281,6 @@ int spi_register_controller(struct spi_controller *ctlr)
if (status)
return status;

- /* even if it's just one always-selected device, there must
- * be at least one chipselect
- */
- if (ctlr->num_chipselect == 0)
- return -EINVAL;
if (ctlr->bus_num >= 0) {
/* devices with a fixed bus num must check-in with the num */
mutex_lock(&board_lock);
@@ -2356,6 +2351,13 @@ int spi_register_controller(struct spi_controller *ctlr)
}
}

+ /*
+ * Even if it's just one always-selected device, there must
+ * be at least one chipselect.
+ */
+ if (!ctlr->num_chipselect)
+ return -EINVAL;
+
status = device_add(&ctlr->dev);
if (status < 0) {
/* free bus id */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 510202a3b091..84cca18e3e9d 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -419,6 +419,9 @@ static int vpfe_open(struct file *file)
/* If decoder is not initialized. initialize it */
if (!video->initialized && vpfe_update_pipe_state(video)) {
mutex_unlock(&video->lock);
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
+ kfree(handle);
return -ENODEV;
}
/* Increment device users counter */
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 2ddcc42ab8ff..e9d621e19d6d 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -455,13 +455,9 @@ static void mipi_csis_set_params(struct csi_state *state)
MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL);
}

-static void mipi_csis_clk_enable(struct csi_state *state)
+static int mipi_csis_clk_enable(struct csi_state *state)
{
- int ret;
-
- ret = clk_bulk_prepare_enable(state->num_clks, state->clks);
- if (ret < 0)
- dev_err(state->dev, "failed to enable clocks\n");
+ return clk_bulk_prepare_enable(state->num_clks, state->clks);
}

static void mipi_csis_clk_disable(struct csi_state *state)
@@ -985,7 +981,11 @@ static int mipi_csis_probe(struct platform_device *pdev)
if (ret < 0)
return ret;

- mipi_csis_clk_enable(state);
+ ret = mipi_csis_clk_enable(state);
+ if (ret < 0) {
+ dev_err(state->dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }

ret = devm_request_irq(dev, state->irq, mipi_csis_irq_handler,
0, dev_name(dev), state);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 4e680d753941..e2fa3a3bc81d 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -89,6 +89,12 @@ static int chap_check_algorithm(const char *a_str)
return CHAP_DIGEST_UNKNOWN;
}

+static void chap_close(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+ conn->auth_protocol = NULL;
+}
+
static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
@@ -126,7 +132,7 @@ static struct iscsi_chap *chap_server_open(
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
- kfree(conn->auth_protocol);
+ chap_close(conn);
return NULL;
}

@@ -141,19 +147,13 @@ static struct iscsi_chap *chap_server_open(
* Generate Challenge.
*/
if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
- kfree(conn->auth_protocol);
+ chap_close(conn);
return NULL;
}

return chap;
}

-static void chap_close(struct iscsi_conn *conn)
-{
- kfree(conn->auth_protocol);
- conn->auth_protocol = NULL;
-}
-
static int chap_server_compute_md5(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index fa783531ee88..a02448105527 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -63,7 +63,7 @@ struct usb_dev_state {
unsigned int discsignr;
struct pid *disc_pid;
const struct cred *cred;
- void __user *disccontext;
+ sigval_t disccontext;
unsigned long ifclaimed;
u32 disabled_bulk_eps;
bool privileges_dropped;
@@ -90,6 +90,7 @@ struct async {
unsigned int ifnum;
void __user *userbuffer;
void __user *userurb;
+ sigval_t userurb_sigval;
struct urb *urb;
struct usb_memory *usbm;
unsigned int mem_usage;
@@ -582,22 +583,19 @@ static void async_completed(struct urb *urb)
{
struct async *as = urb->context;
struct usb_dev_state *ps = as->ps;
- struct kernel_siginfo sinfo;
struct pid *pid = NULL;
const struct cred *cred = NULL;
unsigned long flags;
- int signr;
+ sigval_t addr;
+ int signr, errno;

spin_lock_irqsave(&ps->lock, flags);
list_move_tail(&as->asynclist, &ps->async_completed);
as->status = urb->status;
signr = as->signr;
if (signr) {
- clear_siginfo(&sinfo);
- sinfo.si_signo = as->signr;
- sinfo.si_errno = as->status;
- sinfo.si_code = SI_ASYNCIO;
- sinfo.si_addr = as->userurb;
+ errno = as->status;
+ addr = as->userurb_sigval;
pid = get_pid(as->pid);
cred = get_cred(as->cred);
}
@@ -615,7 +613,7 @@ static void async_completed(struct urb *urb)
spin_unlock_irqrestore(&ps->lock, flags);

if (signr) {
- kill_pid_info_as_cred(sinfo.si_signo, &sinfo, pid, cred);
+ kill_pid_usb_asyncio(signr, errno, addr, pid, cred);
put_pid(pid);
put_cred(cred);
}
@@ -1427,7 +1425,7 @@ find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb)

static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb,
struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
- void __user *arg)
+ void __user *arg, sigval_t userurb_sigval)
{
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_host_endpoint *ep;
@@ -1727,6 +1725,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
isopkt = NULL;
as->ps = ps;
as->userurb = arg;
+ as->userurb_sigval = userurb_sigval;
if (as->usbm) {
unsigned long uurb_start = (unsigned long)uurb->buffer;

@@ -1801,13 +1800,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
static int proc_submiturb(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
+ sigval_t userurb_sigval;

if (copy_from_user(&uurb, arg, sizeof(uurb)))
return -EFAULT;

+ memset(&userurb_sigval, 0, sizeof(userurb_sigval));
+ userurb_sigval.sival_ptr = arg;
+
return proc_do_submiturb(ps, &uurb,
(((struct usbdevfs_urb __user *)arg)->iso_frame_desc),
- arg);
+ arg, userurb_sigval);
}

static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
@@ -1977,7 +1980,7 @@ static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *a
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
ps->discsignr = ds.signr;
- ps->disccontext = compat_ptr(ds.context);
+ ps->disccontext.sival_int = ds.context;
return 0;
}

@@ -2005,13 +2008,17 @@ static int get_urb32(struct usbdevfs_urb *kurb,
static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
+ sigval_t userurb_sigval;

if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg))
return -EFAULT;

+ memset(&userurb_sigval, 0, sizeof(userurb_sigval));
+ userurb_sigval.sival_int = ptr_to_compat(arg);
+
return proc_do_submiturb(ps, &uurb,
((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc,
- arg);
+ arg, userurb_sigval);
}

static int processcompl_compat(struct async *as, void __user * __user *arg)
@@ -2092,7 +2099,7 @@ static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg)
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
ps->discsignr = ds.signr;
- ps->disccontext = ds.context;
+ ps->disccontext.sival_ptr = ds.context;
return 0;
}

@@ -2614,22 +2621,15 @@ const struct file_operations usbdev_file_operations = {
static void usbdev_remove(struct usb_device *udev)
{
struct usb_dev_state *ps;
- struct kernel_siginfo sinfo;

while (!list_empty(&udev->filelist)) {
ps = list_entry(udev->filelist.next, struct usb_dev_state, list);
destroy_all_async(ps);
wake_up_all(&ps->wait);
list_del_init(&ps->list);
- if (ps->discsignr) {
- clear_siginfo(&sinfo);
- sinfo.si_signo = ps->discsignr;
- sinfo.si_errno = EPIPE;
- sinfo.si_code = SI_ASYNCIO;
- sinfo.si_addr = ps->disccontext;
- kill_pid_info_as_cred(ps->discsignr, &sinfo,
- ps->disc_pid, ps->cred);
- }
+ if (ps->discsignr)
+ kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext,
+ ps->disc_pid, ps->cred);
}
}

diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 310eef451db8..b10d14af6416 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3616,6 +3616,7 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
struct usb_device *hdev;
struct usb_device *udev;
int connect_change = 0;
+ u16 link_state;
int ret;

hdev = hub->hdev;
@@ -3625,9 +3626,11 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
return 0;
usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
} else {
+ link_state = portstatus & USB_PORT_STAT_LINK_STATE;
if (!udev || udev->state != USB_STATE_SUSPENDED ||
- (portstatus & USB_PORT_STAT_LINK_STATE) !=
- USB_SS_PORT_LS_U0)
+ (link_state != USB_SS_PORT_LS_U0 &&
+ link_state != USB_SS_PORT_LS_U1 &&
+ link_state != USB_SS_PORT_LS_U2))
return 0;
}

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df51a35cf537..8beacbee2553 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -36,7 +36,7 @@

#include "vhost.h"

-static int experimental_zcopytx = 1;
+static int experimental_zcopytx = 0;
module_param(experimental_zcopytx, int, 0444);
MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
" 1 -Enable; 0 - Disable");
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index d37dd5bb7a8f..559768dc2567 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -538,8 +538,15 @@ static void balloon_process(struct work_struct *work)
state = reserve_additional_memory();
}

- if (credit < 0)
- state = decrease_reservation(-credit, GFP_BALLOON);
+ if (credit < 0) {
+ long n_pages;
+
+ n_pages = min(-credit, si_mem_available());
+ state = decrease_reservation(n_pages, GFP_BALLOON);
+ if (state == BP_DONE && n_pages != -credit &&
+ n_pages < totalreserve_pages)
+ state = BP_EAGAIN;
+ }

state = update_schedule(state);

@@ -578,6 +585,9 @@ static int add_ballooned_pages(int nr_pages)
}
}

+ if (si_mem_available() < nr_pages)
+ return -ENOMEM;
+
st = decrease_reservation(nr_pages, GFP_USER);
if (st != BP_DONE)
return -ENOMEM;
@@ -710,7 +720,7 @@ static int __init balloon_init(void)
balloon_stats.schedule_delay = 1;
balloon_stats.max_schedule_delay = 32;
balloon_stats.retry_count = 1;
- balloon_stats.max_retry_count = RETRY_UNLIMITED;
+ balloon_stats.max_retry_count = 4;

#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
set_online_page_callback(&xen_online_page);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 117e76b2f939..cf4ec27407db 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1293,7 +1293,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
}

/* Rebind an evtchn so that it gets delivered to a specific cpu */
-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
+static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
int masked;
@@ -1327,7 +1327,6 @@ int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)

return 0;
}
-EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);

static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
@@ -1341,6 +1340,15 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
return ret;
}

+/* To be called with desc->lock held. */
+int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+
+ return set_affinity_irq(d, cpumask_of(tcpu), false);
+}
+EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
+
static void enable_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 6d1a5e58968f..47c70b826a6a 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -447,7 +447,7 @@ static void evtchn_bind_interdom_next_vcpu(int evtchn)
this_cpu_write(bind_last_selected_cpu, selected_cpu);

/* unmask expects irqs to be disabled */
- xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
+ xen_set_affinity_evtchn(desc, selected_cpu);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index ef11808b592b..99f1820637e4 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2713,6 +2713,11 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
* for detecting, at fsync time, if the inode isn't yet in the
* log tree or it's there but not up to date.
*/
+ struct timespec64 now = current_time(inode);
+
+ inode_inc_iversion(inode);
+ inode->i_mtime = now;
+ inode->i_ctime = now;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index fc93e0d6e48d..c7bcf8ba82ca 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3308,6 +3308,30 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
return 0;
}

+/*
+ * Check if an inode was logged in the current transaction. We can't always rely
+ * on an inode's logged_trans value, because it's an in-memory only field and
+ * therefore not persisted. This means that its value is lost if the inode gets
+ * evicted and loaded again from disk (in which case it has a value of 0, and
+ * certainly it is smaller then any possible transaction ID), when that happens
+ * the full_sync flag is set in the inode's runtime flags, so on that case we
+ * assume eviction happened and ignore the logged_trans value, assuming the
+ * worst case, that the inode was logged before in the current transaction.
+ */
+static bool inode_logged(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode)
+{
+ if (inode->logged_trans == trans->transid)
+ return true;
+
+ if (inode->last_trans == trans->transid &&
+ test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
+ !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
+ return true;
+
+ return false;
+}
+
/*
* If both a file and directory are logged, and unlinks or renames are
* mixed in, we have a few interesting corners:
@@ -3342,7 +3366,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
int bytes_del = 0;
u64 dir_ino = btrfs_ino(dir);

- if (dir->logged_trans < trans->transid)
+ if (!inode_logged(trans, dir))
return 0;

ret = join_running_log_trans(root);
@@ -3447,7 +3471,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
u64 index;
int ret;

- if (inode->logged_trans < trans->transid)
+ if (!inode_logged(trans, inode))
return 0;

ret = join_running_log_trans(root);
@@ -5407,9 +5431,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
}
}

+ /*
+ * Don't update last_log_commit if we logged that an inode exists after
+ * it was loaded to memory (full_sync bit set).
+ * This is to prevent data loss when we do a write to the inode, then
+ * the inode gets evicted after all delalloc was flushed, then we log
+ * it exists (due to a rename for example) and then fsync it. This last
+ * fsync would do nothing (not logging the extents previously written).
+ */
spin_lock(&inode->lock);
inode->logged_trans = trans->transid;
- inode->last_log_commit = inode->last_sub_trans;
+ if (inode_only != LOG_INODE_EXISTS ||
+ !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+ inode->last_log_commit = inode->last_sub_trans;
spin_unlock(&inode->lock);
out_unlock:
mutex_unlock(&inode->log_mutex);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 9f53c3d99304..06ebde492b31 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1006,7 +1006,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
* may block.
*/
truncate_inode_pages_range(inode->i_mapping, pos,
- (pos+len) | (PAGE_SIZE - 1));
+ PAGE_ALIGN(pos + len) - 1);

req->r_mtime = mtime;
}
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index ed49222abecb..afa56237a0c3 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -83,5 +83,10 @@ struct cifs_sb_info {
* failover properly.
*/
char *origin_fullpath; /* \\HOST\SHARE\[OPTIONAL PATH] */
+ /*
+ * Indicate whether serverino option was turned off later
+ * (cifs_autodisable_serverino) in order to match new mounts.
+ */
+ bool mnt_cifs_serverino_autodisabled;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e9507fba0b36..cccc89c73140 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1221,11 +1221,11 @@ cifs_demultiplex_thread(void *p)
atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", bufs[i],
HEADER_SIZE(server));
+ smb2_add_credits_from_hdr(bufs[i], server);
#ifdef CONFIG_CIFS_DEBUG2
if (server->ops->dump_detail)
server->ops->dump_detail(bufs[i],
server);
- smb2_add_credits_from_hdr(bufs[i], server);
cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
}
@@ -3455,12 +3455,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
{
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
+ unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
+ unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;

if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
return 0;

- if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
- (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
+ if (old->mnt_cifs_serverino_autodisabled)
+ newflags &= ~CIFS_MOUNT_SERVER_INUM;
+
+ if (oldflags != newflags)
return 0;

/*
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 538fd7d807e4..9e71619ce7bc 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2371,6 +2371,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
struct inode *inode = d_inode(direntry);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
+ struct cifsFileInfo *wfile;
+ struct cifs_tcon *tcon;
char *full_path = NULL;
int rc = -EACCES;
__u32 dosattr = 0;
@@ -2417,6 +2419,20 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
mapping_set_error(inode->i_mapping, rc);
rc = 0;

+ if (attrs->ia_valid & ATTR_MTIME) {
+ rc = cifs_get_writable_file(cifsInode, false, &wfile);
+ if (!rc) {
+ tcon = tlink_tcon(wfile->tlink);
+ rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
+ cifsFileInfo_put(wfile);
+ if (rc)
+ return rc;
+ } else if (rc != -EBADF)
+ return rc;
+ else
+ rc = 0;
+ }
+
if (attrs->ia_valid & ATTR_SIZE) {
rc = cifs_set_file_size(inode, attrs, xid, full_path);
if (rc != 0)
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 0dc6f08020ac..f4e14cebe942 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -539,6 +539,7 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
tcon = cifs_sb_master_tcon(cifs_sb);

cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+ cifs_sb->mnt_cifs_serverino_autodisabled = true;
cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s.\n",
tcon ? tcon->treeName : "new server");
cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS).\n");
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 278405d26c47..d8d9cdfa30b6 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -120,6 +120,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
@@ -147,6 +149,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_DISPOSITION_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
@@ -163,6 +167,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
@@ -180,6 +186,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_BASIC_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
@@ -206,6 +214,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
@@ -231,6 +241,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_LINK_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
+ if (rc)
+ goto finished;
smb2_set_next_command(tcon, &rqst[num_rqst]);
smb2_set_related(&rqst[num_rqst++]);
trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index aa61dcf471b3..db191975a994 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -705,8 +705,51 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)

smb2_set_related(&rqst[1]);

+ /*
+ * We do not hold the lock for the open because in case
+ * SMB2_open needs to reconnect, it will end up calling
+ * cifs_mark_open_files_invalid() which takes the lock again
+ * thus causing a deadlock
+ */
+
+ mutex_unlock(&tcon->crfid.fid_mutex);
rc = compound_send_recv(xid, ses, flags, 2, rqst,
resp_buftype, rsp_iov);
+ mutex_lock(&tcon->crfid.fid_mutex);
+
+ /*
+ * Now we need to check again as the cached root might have
+ * been successfully re-opened from a concurrent process
+ */
+
+ if (tcon->crfid.is_valid) {
+ /* work was already done */
+
+ /* stash fids for close() later */
+ struct cifs_fid fid = {
+ .persistent_fid = pfid->persistent_fid,
+ .volatile_fid = pfid->volatile_fid,
+ };
+
+ /*
+ * caller expects this func to set pfid to a valid
+ * cached root, so we copy the existing one and get a
+ * reference.
+ */
+ memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
+ kref_get(&tcon->crfid.refcount);
+
+ mutex_unlock(&tcon->crfid.fid_mutex);
+
+ if (rc == 0) {
+ /* close extra handle outside of crit sec */
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ }
+ goto oshr_free;
+ }
+
+ /* Cached root is still invalid, continue normaly */
+
if (rc)
goto oshr_exit;

@@ -740,8 +783,9 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
(char *)&tcon->crfid.file_all_info))
tcon->crfid.file_all_info_is_valid = 1;

- oshr_exit:
+oshr_exit:
mutex_unlock(&tcon->crfid.fid_mutex);
+oshr_free:
SMB2_open_free(&rqst[0]);
SMB2_query_info_free(&rqst[1]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
@@ -2004,6 +2048,10 @@ smb2_set_related(struct smb_rqst *rqst)
struct smb2_sync_hdr *shdr;

shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ if (shdr == NULL) {
+ cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
+ return;
+ }
shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
}

@@ -2018,6 +2066,12 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
unsigned long len = smb_rqst_len(server, rqst);
int i, num_padding;

+ shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ if (shdr == NULL) {
+ cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
+ return;
+ }
+
/* SMB headers in a compound are 8 byte aligned. */

/* No padding needed */
@@ -2057,7 +2111,6 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
}

finished:
- shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
shdr->NextCommand = cpu_to_le32(len);
}

diff --git a/fs/coda/file.c b/fs/coda/file.c
index 1cbc1f2298ee..43d371551d2b 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -27,6 +27,13 @@
#include "coda_linux.h"
#include "coda_int.h"

+struct coda_vm_ops {
+ atomic_t refcnt;
+ struct file *coda_file;
+ const struct vm_operations_struct *host_vm_ops;
+ struct vm_operations_struct vm_ops;
+};
+
static ssize_t
coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
@@ -61,6 +68,34 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
return ret;
}

+static void
+coda_vm_open(struct vm_area_struct *vma)
+{
+ struct coda_vm_ops *cvm_ops =
+ container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
+
+ atomic_inc(&cvm_ops->refcnt);
+
+ if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
+ cvm_ops->host_vm_ops->open(vma);
+}
+
+static void
+coda_vm_close(struct vm_area_struct *vma)
+{
+ struct coda_vm_ops *cvm_ops =
+ container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
+
+ if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
+ cvm_ops->host_vm_ops->close(vma);
+
+ if (atomic_dec_and_test(&cvm_ops->refcnt)) {
+ vma->vm_ops = cvm_ops->host_vm_ops;
+ fput(cvm_ops->coda_file);
+ kfree(cvm_ops);
+ }
+}
+
static int
coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
{
@@ -68,6 +103,8 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
struct coda_inode_info *cii;
struct file *host_file;
struct inode *coda_inode, *host_inode;
+ struct coda_vm_ops *cvm_ops;
+ int ret;

cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
@@ -76,6 +113,13 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
if (!host_file->f_op->mmap)
return -ENODEV;

+ if (WARN_ON(coda_file != vma->vm_file))
+ return -EIO;
+
+ cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
+ if (!cvm_ops)
+ return -ENOMEM;
+
coda_inode = file_inode(coda_file);
host_inode = file_inode(host_file);

@@ -89,6 +133,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
* the container file on us! */
else if (coda_inode->i_mapping != host_inode->i_mapping) {
spin_unlock(&cii->c_lock);
+ kfree(cvm_ops);
return -EBUSY;
}

@@ -97,7 +142,29 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
cfi->cfi_mapcount++;
spin_unlock(&cii->c_lock);

- return call_mmap(host_file, vma);
+ vma->vm_file = get_file(host_file);
+ ret = call_mmap(vma->vm_file, vma);
+
+ if (ret) {
+ /* if call_mmap fails, our caller will put coda_file so we
+ * should drop the reference to the host_file that we got.
+ */
+ fput(host_file);
+ kfree(cvm_ops);
+ } else {
+ /* here we add redirects for the open/close vm_operations */
+ cvm_ops->host_vm_ops = vma->vm_ops;
+ if (vma->vm_ops)
+ cvm_ops->vm_ops = *vma->vm_ops;
+
+ cvm_ops->vm_ops.open = coda_vm_open;
+ cvm_ops->vm_ops.close = coda_vm_close;
+ cvm_ops->coda_file = coda_file;
+ atomic_set(&cvm_ops->refcnt, 1);
+
+ vma->vm_ops = &cvm_ops->vm_ops;
+ }
+ return ret;
}

int coda_open(struct inode *coda_inode, struct file *coda_file)
@@ -207,4 +274,3 @@ const struct file_operations coda_file_operations = {
.fsync = coda_fsync,
.splice_read = generic_file_splice_read,
};
-
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index fe38b5306045..5b3d525aa213 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -159,7 +159,10 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;

- BUG_ON(len == 0);
+ if (WARN_ON_ONCE(len <= 0))
+ return -EINVAL;
+ if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
+ return -EINVAL;

fscrypt_generate_iv(&iv, lblk_num, ci);

@@ -243,8 +246,6 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
struct page *ciphertext_page = page;
int err;

- BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
-
if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
/* with inplace-encryption we just encrypt the page */
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
@@ -256,7 +257,8 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
return ciphertext_page;
}

- BUG_ON(!PageLocked(page));
+ if (WARN_ON_ONCE(!PageLocked(page)))
+ return ERR_PTR(-EINVAL);

ctx = fscrypt_get_ctx(inode, gfp_flags);
if (IS_ERR(ctx))
@@ -304,8 +306,9 @@ EXPORT_SYMBOL(fscrypt_encrypt_page);
int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs, u64 lblk_num)
{
- if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
- BUG_ON(!PageLocked(page));
+ if (WARN_ON_ONCE(!PageLocked(page) &&
+ !(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)))
+ return -EINVAL;

return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
len, offs, GFP_NOFS);
diff --git a/fs/dax.c b/fs/dax.c
index 9fd908f3df32..4b85d539b845 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -131,6 +131,15 @@ static int dax_is_empty_entry(void *entry)
return xa_to_value(entry) & DAX_EMPTY;
}

+/*
+ * true if the entry that was found is of a smaller order than the entry
+ * we were looking for
+ */
+static bool dax_is_conflict(void *entry)
+{
+ return entry == XA_RETRY_ENTRY;
+}
+
/*
* DAX page cache entry locking
*/
@@ -203,11 +212,13 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
* Look up entry in page cache, wait for it to become unlocked if it
* is a DAX entry and return it. The caller must subsequently call
* put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
- * if it did.
+ * if it did. The entry returned may have a larger order than @order.
+ * If @order is larger than the order of the entry found in i_pages, this
+ * function returns a dax_is_conflict entry.
*
* Must be called with the i_pages lock held.
*/
-static void *get_unlocked_entry(struct xa_state *xas)
+static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
{
void *entry;
struct wait_exceptional_entry_queue ewait;
@@ -218,6 +229,8 @@ static void *get_unlocked_entry(struct xa_state *xas)

for (;;) {
entry = xas_find_conflict(xas);
+ if (dax_entry_order(entry) < order)
+ return XA_RETRY_ENTRY;
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
!dax_is_locked(entry))
return entry;
@@ -262,7 +275,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
- if (entry)
+ if (entry && dax_is_conflict(entry))
dax_wake_entry(xas, entry, false);
}

@@ -469,7 +482,7 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
* overlap with xarray value entries.
*/
static void *grab_mapping_entry(struct xa_state *xas,
- struct address_space *mapping, unsigned long size_flag)
+ struct address_space *mapping, unsigned int order)
{
unsigned long index = xas->xa_index;
bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
@@ -477,20 +490,17 @@ static void *grab_mapping_entry(struct xa_state *xas,

retry:
xas_lock_irq(xas);
- entry = get_unlocked_entry(xas);
+ entry = get_unlocked_entry(xas, order);

if (entry) {
+ if (dax_is_conflict(entry))
+ goto fallback;
if (!xa_is_value(entry)) {
xas_set_err(xas, EIO);
goto out_unlock;
}

- if (size_flag & DAX_PMD) {
- if (dax_is_pte_entry(entry)) {
- put_unlocked_entry(xas, entry);
- goto fallback;
- }
- } else { /* trying to grab a PTE entry */
+ if (order == 0) {
if (dax_is_pmd_entry(entry) &&
(dax_is_zero_entry(entry) ||
dax_is_empty_entry(entry))) {
@@ -531,7 +541,11 @@ static void *grab_mapping_entry(struct xa_state *xas,
if (entry) {
dax_lock_entry(xas, entry);
} else {
- entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
+ unsigned long flags = DAX_EMPTY;
+
+ if (order > 0)
+ flags |= DAX_PMD;
+ entry = dax_make_entry(pfn_to_pfn_t(0), flags);
dax_lock_entry(xas, entry);
if (xas_error(xas))
goto out_unlock;
@@ -602,7 +616,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
if (WARN_ON_ONCE(!xa_is_value(entry)))
continue;
if (unlikely(dax_is_locked(entry)))
- entry = get_unlocked_entry(&xas);
+ entry = get_unlocked_entry(&xas, 0);
if (entry)
page = dax_busy_page(entry);
put_unlocked_entry(&xas, entry);
@@ -629,7 +643,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
void *entry;

xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas);
+ entry = get_unlocked_entry(&xas, 0);
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
goto out;
if (!trunc &&
@@ -856,7 +870,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
if (unlikely(dax_is_locked(entry))) {
void *old_entry = entry;

- entry = get_unlocked_entry(xas);
+ entry = get_unlocked_entry(xas, 0);

/* Entry got punched out / reallocated? */
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
@@ -1517,7 +1531,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* entry is already in the array, for instance), it will return
* VM_FAULT_FALLBACK.
*/
- entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
+ entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
if (xa_is_internal(entry)) {
result = xa_to_internal(entry);
goto fallback;
@@ -1666,11 +1680,10 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
vm_fault_t ret;

xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas);
+ entry = get_unlocked_entry(&xas, order);
/* Did we race with someone splitting entry or so? */
- if (!entry ||
- (order == 0 && !dax_is_pte_entry(entry)) ||
- (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
+ if (!entry || dax_is_conflict(entry) ||
+ (order == 0 && !dax_is_pte_entry(entry))) {
put_unlocked_entry(&xas, entry);
xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f664da55234e..8b247e5ad12e 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1019,8 +1019,10 @@ int ecryptfs_read_and_validate_header_region(struct inode *inode)

rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES,
inode);
- if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
- return rc >= 0 ? -EINVAL : rc;
+ if (rc < 0)
+ return rc;
+ else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+ return -EINVAL;
rc = ecryptfs_validate_marker(marker);
if (!rc)
ecryptfs_i_size_init(file_size, inode);
@@ -1382,8 +1384,10 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
ecryptfs_inode_to_lower(inode),
ECRYPTFS_XATTR_NAME, file_size,
ECRYPTFS_SIZE_AND_MARKER_BYTES);
- if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
- return rc >= 0 ? -EINVAL : rc;
+ if (rc < 0)
+ return rc;
+ else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+ return -EINVAL;
rc = ecryptfs_validate_marker(marker);
if (!rc)
ecryptfs_i_size_init(file_size, inode);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index b16645b417d9..bd9474e82f38 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -714,6 +714,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
void wbc_account_io(struct writeback_control *wbc, struct page *page,
size_t bytes)
{
+ struct cgroup_subsys_state *css;
int id;

/*
@@ -725,7 +726,12 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
if (!wbc->wb)
return;

- id = mem_cgroup_css_from_page(page)->id;
+ css = mem_cgroup_css_from_page(page);
+ /* dead cgroups shouldn't contribute to inode ownership arbitration */
+ if (!(css->flags & CSS_ONLINE))
+ return;
+
+ id = css->id;

if (id == wbc->wb_id) {
wbc->wb_bytes += bytes;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index a71d0b42d160..43ca14dd3466 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -139,19 +139,12 @@ struct nfs_cache_array {
struct nfs_cache_array_entry array[0];
};

-struct readdirvec {
- unsigned long nr;
- unsigned long index;
- struct page *pages[NFS_MAX_READDIR_RAPAGES];
-};
-
typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool);
typedef struct {
struct file *file;
struct page *page;
struct dir_context *ctx;
unsigned long page_index;
- struct readdirvec pvec;
u64 *dir_cookie;
u64 last_cookie;
loff_t current_index;
@@ -531,10 +524,6 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
struct nfs_cache_array *array;
unsigned int count = 0;
int status;
- int max_rapages = NFS_MAX_READDIR_RAPAGES;
-
- desc->pvec.index = desc->page_index;
- desc->pvec.nr = 0;

scratch = alloc_page(GFP_KERNEL);
if (scratch == NULL)
@@ -559,40 +548,20 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
if (desc->plus)
nfs_prime_dcache(file_dentry(desc->file), entry);

- status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
- if (status == -ENOSPC) {
- desc->pvec.nr++;
- if (desc->pvec.nr == max_rapages)
- break;
- status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
- }
+ status = nfs_readdir_add_to_array(entry, page);
if (status != 0)
break;
} while (!entry->eof);

- /*
- * page and desc->pvec.pages[0] are valid, don't need to check
- * whether or not to be NULL.
- */
- copy_highpage(page, desc->pvec.pages[0]);
-
out_nopages:
if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
- array = kmap_atomic(desc->pvec.pages[desc->pvec.nr]);
+ array = kmap(page);
array->eof_index = array->size;
status = 0;
- kunmap_atomic(array);
+ kunmap(page);
}

put_page(scratch);
-
- /*
- * desc->pvec.nr > 0 means at least one page was completely filled,
- * we should return -ENOSPC. Otherwise function
- * nfs_readdir_xdr_to_array will enter infinite loop.
- */
- if (desc->pvec.nr > 0)
- return -ENOSPC;
return status;
}

@@ -626,24 +595,6 @@ int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages)
return -ENOMEM;
}

-/*
- * nfs_readdir_rapages_init initialize rapages by nfs_cache_array structure.
- */
-static
-void nfs_readdir_rapages_init(nfs_readdir_descriptor_t *desc)
-{
- struct nfs_cache_array *array;
- int max_rapages = NFS_MAX_READDIR_RAPAGES;
- int index;
-
- for (index = 0; index < max_rapages; index++) {
- array = kmap_atomic(desc->pvec.pages[index]);
- memset(array, 0, sizeof(struct nfs_cache_array));
- array->eof_index = -1;
- kunmap_atomic(array);
- }
-}
-
static
int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
{
@@ -654,12 +605,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
int status = -ENOMEM;
unsigned int array_size = ARRAY_SIZE(pages);

- /*
- * This means we hit readdir rdpages miss, the preallocated rdpages
- * are useless, the preallocate rdpages should be reinitialized.
- */
- nfs_readdir_rapages_init(desc);
-
entry.prev_cookie = 0;
entry.cookie = desc->last_cookie;
entry.eof = 0;
@@ -719,24 +664,9 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
struct inode *inode = file_inode(desc->file);
int ret;

- /*
- * If desc->page_index in range desc->pvec.index and
- * desc->pvec.index + desc->pvec.nr, we get readdir cache hit.
- */
- if (desc->page_index >= desc->pvec.index &&
- desc->page_index < (desc->pvec.index + desc->pvec.nr)) {
- /*
- * page and desc->pvec.pages[x] are valid, don't need to check
- * whether or not to be NULL.
- */
- copy_highpage(page, desc->pvec.pages[desc->page_index - desc->pvec.index]);
- ret = 0;
- } else {
- ret = nfs_readdir_xdr_to_array(desc, page, inode);
- if (ret < 0)
- goto error;
- }
-
+ ret = nfs_readdir_xdr_to_array(desc, page, inode);
+ if (ret < 0)
+ goto error;
SetPageUptodate(page);

if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
@@ -901,7 +831,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
*desc = &my_desc;
struct nfs_open_dir_context *dir_ctx = file->private_data;
int res = 0;
- int max_rapages = NFS_MAX_READDIR_RAPAGES;

dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
file, (long long)ctx->pos);
@@ -921,12 +850,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
desc->decode = NFS_PROTO(inode)->decode_dirent;
desc->plus = nfs_use_readdirplus(inode, ctx);

- res = nfs_readdir_alloc_pages(desc->pvec.pages, max_rapages);
- if (res < 0)
- return -ENOMEM;
-
- nfs_readdir_rapages_init(desc);
-
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
res = nfs_revalidate_mapping(inode, file->f_mapping);
if (res < 0)
@@ -962,7 +885,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
break;
} while (!desc->eof);
out:
- nfs_readdir_free_pages(desc->pvec.pages, max_rapages);
if (res > 0)
res = 0;
dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 19f856f45689..3eda40a320a5 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -257,7 +257,7 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
if (status == 0)
return 0;

- if (mirror->mirror_ds == NULL)
+ if (IS_ERR_OR_NULL(mirror->mirror_ds))
return -EINVAL;

dserr = kmalloc(sizeof(*dserr), gfp_flags);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 414a90d48493..e94af89ac877 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1094,6 +1094,7 @@ int nfs_open(struct inode *inode, struct file *filp)
nfs_fscache_open_file(inode, filp);
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_open);

/*
* This function is called whenever some part of NFS notices that
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index c7cf23ae6597..b26622d9686f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -69,8 +69,7 @@ struct nfs_clone_mount {
* Maximum number of pages that readdir can use for creating
* a vmapped array of pages.
*/
-#define NFS_MAX_READDIR_PAGES 64
-#define NFS_MAX_READDIR_RAPAGES 8
+#define NFS_MAX_READDIR_PAGES 8

struct nfs_client_initdata {
unsigned long init_flags;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index f10b660805fc..1cf694c6ee67 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -49,7 +49,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
return err;

if ((openflags & O_ACCMODE) == 3)
- openflags--;
+ return nfs_open(inode, filp);

/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 7066cd7c7aff..ca38ae357094 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1890,7 +1890,7 @@ pnfs_update_layout(struct inode *ino,
spin_unlock(&ino->i_lock);
lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
!atomic_read(&lo->plh_outstanding)));
- if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
+ if (IS_ERR(lseg))
goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
goto lookup_again;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 7325baa8f9d4..c95f32b83a94 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -498,6 +498,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,

if (root->set_ownership)
root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+ else {
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ }

return inode;
}
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index c60ee46f3e39..fc91f7263f79 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -330,22 +330,21 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
goto fail;
inode->i_mode = S_IFREG | 0444;
inode->i_fop = &pstore_file_operations;
- private = kzalloc(sizeof(*private), GFP_KERNEL);
- if (!private)
- goto fail_alloc;
- private->record = record;
-
scnprintf(name, sizeof(name), "%s-%s-%llu%s",
pstore_type_to_name(record->type),
record->psi->name, record->id,
record->compressed ? ".enc.z" : "");

+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ if (!private)
+ goto fail_inode;
+
dentry = d_alloc_name(root, name);
if (!dentry)
goto fail_private;

+ private->record = record;
inode->i_size = private->total_size = size;
-
inode->i_private = private;

if (record->time.tv_sec)
@@ -361,7 +360,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)

fail_private:
free_pstore_private(private);
-fail_alloc:
+fail_inode:
iput(inode);

fail:
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a7ceae90110e..76748255f843 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -517,6 +517,9 @@ xfs_file_dio_aio_write(
}

if (iocb->ki_flags & IOCB_NOWAIT) {
+ /* unaligned dio always waits, bail */
+ if (unaligned_io)
+ return -EAGAIN;
if (!xfs_ilock_nowait(ip, iolock))
return -EAGAIN;
} else {
@@ -536,9 +539,6 @@ xfs_file_dio_aio_write(
* xfs_file_aio_write_checks() for other reasons.
*/
if (unaligned_io) {
- /* unaligned dio always waits, bail */
- if (iocb->ki_flags & IOCB_NOWAIT)
- return -EAGAIN;
inode_dio_wait(inode);
} else if (iolock == XFS_IOLOCK_EXCL) {
xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 0e9bd9c83870..aa6c093d9ce9 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -104,8 +104,10 @@ extern void warn_slowpath_null(const char *file, const int line);
warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN() __WARN_TAINT(TAINT_WARN)
-#define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0)
+#define __WARN() do { \
+ printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
+} while (0)
+#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
#define __WARN_printf_taint(taint, arg...) \
do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
#endif
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index c0d4df6a606f..9d3b745c3107 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -40,6 +40,7 @@
#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
#define DATA_BLOCK_TILED_DISPLAY 0x12
+#define DATA_BLOCK_CTA 0x81

#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f

@@ -90,4 +91,13 @@ struct displayid_detailed_timing_block {
struct displayid_block base;
struct displayid_detailed_timings_1 timings[0];
};
+
+#define for_each_displayid_db(displayid, block, idx, length) \
+ for ((block) = (struct displayid_block *)&(displayid)[idx]; \
+ (idx) + sizeof(struct displayid_block) <= (length) && \
+ (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
+ (block)->num_bytes > 0; \
+ (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
+ (block) = (struct displayid_block *)&(displayid)[idx])
+
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 317ab30d2904..08a84d130120 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -662,7 +662,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}

-static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
+static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
@@ -1400,7 +1400,7 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}

-static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
+static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);

diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index dec95654f3ae..04c4a478323b 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -116,10 +116,10 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
+ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
- CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6b10c21630f5..ab6bf81311a8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -590,6 +590,11 @@ static inline bool is_vmalloc_addr(const void *x)
return false;
#endif
}
+
+#ifndef is_ioremap_addr
+#define is_ioremap_addr(x) is_vmalloc_addr(x)
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index b25d20822e75..3508f4508a11 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -586,7 +586,7 @@ static inline void rcu_preempt_sleep_check(void) { }
* read-side critical sections may be preempted and they may also block, but
* only when acquiring spinlocks that are subject to priority inheritance.
*/
-static inline void rcu_read_lock(void)
+static __always_inline void rcu_read_lock(void)
{
__rcu_read_lock();
__acquire(RCU);
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index e412c092c1e8..c735113f7d93 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -328,7 +328,7 @@ extern void force_sigsegv(int sig, struct task_struct *p);
extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
-extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *,
+extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
const struct cred *);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 047f9a5ccaad..1790bb41c964 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -803,11 +803,12 @@ struct ipvs_master_sync_state {
struct ip_vs_sync_buff *sync_buff;
unsigned long sync_queue_len;
unsigned int sync_queue_delay;
- struct task_struct *master_thread;
struct delayed_work master_wakeup_work;
struct netns_ipvs *ipvs;
};

+struct ip_vs_sync_thread_data;
+
/* How much time to keep dests in trash */
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)

@@ -938,7 +939,8 @@ struct netns_ipvs {
spinlock_t sync_lock;
struct ipvs_master_sync_state *ms;
spinlock_t sync_buff_lock;
- struct task_struct **backup_threads;
+ struct ip_vs_sync_thread_data *master_tinfo;
+ struct ip_vs_sync_thread_data *backup_tinfo;
int threads_mask;
volatile int sync_state;
struct mutex sync_mutex;
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index d074b6d60f8a..ac3c047d058c 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -67,6 +67,8 @@ struct xdp_sock {
* in the SKB destructor callback.
*/
spinlock_t tx_completion_lock;
+ /* Protects generic receive. */
+ spinlock_t rx_lock;
u64 rx_dropped;
};

diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9b9e17bcc201..417a096e43d6 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -293,8 +293,8 @@ struct ib_rss_caps {
};

enum ib_tm_cap_flags {
- /* Support tag matching on RC transport */
- IB_TM_CAP_RC = 1 << 0,
+ /* Support tag matching with rendezvous offload for RC transport */
+ IB_TM_CAP_RNDV_RC = 1 << 0,
};

struct ib_tm_caps {
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index cc7c8d42d4fd..a0d9d788527d 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -262,6 +262,8 @@ struct hda_codec {
unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
unsigned int force_pin_prefix:1; /* Add location prefix */
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
+
#ifdef CONFIG_PM
unsigned long power_on_acct;
unsigned long power_off_acct;
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 7b60fd186cfe..77bc53ce419f 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -1383,7 +1383,7 @@ TRACE_EVENT(rxrpc_rx_eproto,
),

TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call ? call->debug_id : 0;
__entry->serial = serial;
__entry->why = why;
),
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 9d01f4788d3e..9ae3f28ca469 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2871,6 +2871,7 @@ struct bpf_prog_info {
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 gpl_compatible:1;
+ __u32 :31; /* alignment pad */
__u64 netns_dev;
__u64 netns_ino;
__u32 nr_jited_ksyms;
diff --git a/include/xen/events.h b/include/xen/events.h
index a48897199975..c0e6a0598397 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -3,6 +3,7 @@
#define _XEN_EVENTS_H

#include <linux/interrupt.h>
+#include <linux/irq.h>
#ifdef CONFIG_PCI_MSI
#include <linux/msi.h>
#endif
@@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn);

void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(int evtchn, int irq);
-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
+int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);

static inline void notify_remote_via_evtchn(int port)
{
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 4c2fa3ac56f6..29d781061cd5 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-y := core.o
+CFLAGS_core.o += $(call cc-disable-warning, override-init)

obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 06ba9c5f156b..932fd3fa5a5a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1367,10 +1367,10 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
insn++;
CONT;
ALU_ARSH_X:
- DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
+ DST = (u64) (u32) (((s32) DST) >> SRC);
CONT;
ALU_ARSH_K:
- DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
+ DST = (u64) (u32) (((s32) DST) >> IMM);
CONT;
ALU64_ARSH_X:
(*(s64 *) &DST) >>= SRC;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4ff130ddfbf6..cbc03f051598 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6197,17 +6197,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* the state of the call instruction (with WRITTEN set), and r0 comes
* from callee with its full parentage chain, anyway.
*/
- for (j = 0; j <= cur->curframe; j++)
- for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
- cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
/* clear write marks in current state: the writes we did are not writes
* our child did, so they don't screen off its reads from us.
* (There are no read marks in current state, because reads always mark
* their parent and current state never has children yet. Only
* explored_states can get read marks.)
*/
- for (i = 0; i < BPF_REG_FP; i++)
- cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
+ for (j = 0; j <= cur->curframe; j++) {
+ for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
+ cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
+ for (i = 0; i < BPF_REG_FP; i++)
+ cur->frame[j]->regs[i].live = REG_LIVE_NONE;
+ }

/* all stack frames are accessible from callee, clear them all */
for (j = 0; j <= cur->curframe; j++) {
diff --git a/kernel/iomem.c b/kernel/iomem.c
index f7525e14ebc6..98950b84c50c 100644
--- a/kernel/iomem.c
+++ b/kernel/iomem.c
@@ -121,7 +121,7 @@ EXPORT_SYMBOL(memremap);

void memunmap(void *addr)
{
- if (is_vmalloc_addr(addr))
+ if (is_ioremap_addr(addr))
iounmap((void __iomem *) addr);
}
EXPORT_SYMBOL(memunmap);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 04fe4f989bd8..bfac4d6761b3 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -754,6 +754,8 @@ void handle_fasteoi_nmi(struct irq_desc *desc)
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;

+ __kstat_incr_irqs_this_cpu(desc);
+
trace_irq_handler_entry(irq, action);
/*
* NMIs cannot be shared, there is only one action.
@@ -968,6 +970,8 @@ void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;

+ __kstat_incr_irqs_this_cpu(desc);
+
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
trace_irq_handler_exit(irq, action, res);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9f8a709337cf..6c5b2b775ca6 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -679,6 +679,8 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
* @hwirq: The HW irq number to convert to a logical one
* @regs: Register file coming from the low-level handling code
*
+ * This function must be called from an NMI context.
+ *
* Returns: 0 on success, or -EINVAL if conversion has failed
*/
int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
@@ -688,7 +690,10 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
unsigned int irq;
int ret = 0;

- nmi_enter();
+ /*
+ * NMI context needs to be setup earlier in order to deal with tracing.
+ */
+ WARN_ON(!in_nmi());

irq = irq_find_mapping(domain, hwirq);

@@ -701,7 +706,6 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
else
ret = -EINVAL;

- nmi_exit();
set_irq_regs(old_regs);
return ret;
}
@@ -945,6 +949,11 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
}

+static bool irq_is_nmi(struct irq_desc *desc)
+{
+ return desc->istate & IRQS_NMI;
+}
+
/**
* kstat_irqs - Get the statistics for an interrupt
* @irq: The interrupt number
@@ -962,7 +971,8 @@ unsigned int kstat_irqs(unsigned int irq)
if (!desc || !desc->kstat_irqs)
return 0;
if (!irq_settings_is_per_cpu_devid(desc) &&
- !irq_settings_is_per_cpu(desc))
+ !irq_settings_is_per_cpu(desc) &&
+ !irq_is_nmi(desc))
return desc->tot_count;

for_each_possible_cpu(cpu)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e221be724fe8..89b3f38a57f3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3611,19 +3611,19 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (depth) {
hlock = curr->held_locks + depth - 1;
if (hlock->class_idx == class_idx && nest_lock) {
- if (hlock->references) {
- /*
- * Check: unsigned int references:12, overflow.
- */
- if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
- return 0;
+ if (!references)
+ references++;

+ if (!hlock->references)
hlock->references++;
- } else {
- hlock->references = 2;
- }

- return 1;
+ hlock->references += references;
+
+ /* Overflow */
+ if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
+ return 0;
+
+ return 2;
}
}

@@ -3829,22 +3829,33 @@ static struct held_lock *find_held_lock(struct task_struct *curr,
}

static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
- int idx)
+ int idx, unsigned int *merged)
{
struct held_lock *hlock;
+ int first_idx = idx;

if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;

for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
- if (!__lock_acquire(hlock->instance,
+ switch (__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass,
hlock->trylock,
hlock->read, hlock->check,
hlock->hardirqs_off,
hlock->nest_lock, hlock->acquire_ip,
- hlock->references, hlock->pin_count))
+ hlock->references, hlock->pin_count)) {
+ case 0:
return 1;
+ case 1:
+ break;
+ case 2:
+ *merged += (idx == first_idx);
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
}
return 0;
}
@@ -3855,9 +3866,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
unsigned long ip)
{
struct task_struct *curr = current;
+ unsigned int depth, merged = 0;
struct held_lock *hlock;
struct lock_class *class;
- unsigned int depth;
int i;

if (unlikely(!debug_locks))
@@ -3882,14 +3893,14 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;

- if (reacquire_held_locks(curr, depth, i))
+ if (reacquire_held_locks(curr, depth, i, &merged))
return 0;

/*
* I took it apart and put it back together again, except now I have
* these 'spare' parts.. where shall I put them.
*/
- if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
return 0;
return 1;
}
@@ -3897,8 +3908,8 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
+ unsigned int depth, merged = 0;
struct held_lock *hlock;
- unsigned int depth;
int i;

if (unlikely(!debug_locks))
@@ -3923,7 +3934,11 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
hlock->read = 1;
hlock->acquire_ip = ip;

- if (reacquire_held_locks(curr, depth, i))
+ if (reacquire_held_locks(curr, depth, i, &merged))
+ return 0;
+
+ /* Merging can't happen with unchanged classes.. */
+ if (DEBUG_LOCKS_WARN_ON(merged))
return 0;

/*
@@ -3932,6 +3947,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
*/
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
return 0;
+
return 1;
}

@@ -3946,8 +3962,8 @@ static int
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
{
struct task_struct *curr = current;
+ unsigned int depth, merged = 1;
struct held_lock *hlock;
- unsigned int depth;
int i;

if (unlikely(!debug_locks))
@@ -4002,14 +4018,15 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
if (i == depth-1)
return 1;

- if (reacquire_held_locks(curr, depth, i + 1))
+ if (reacquire_held_locks(curr, depth, i + 1, &merged))
return 0;

/*
* We had N bottles of beer on the wall, we drank one, but now
* there's not N-1 bottles of beer left on the wall...
+ * Pouring two of the bottles together is acceptable.
*/
- DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
+ DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);

/*
* Since reacquire_held_locks() would have called check_chain_key()
diff --git a/kernel/padata.c b/kernel/padata.c
index 3e2633ae3bca..cb268716cefb 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -267,7 +267,12 @@ static void padata_reorder(struct parallel_data *pd)
* The next object that needs serialization might have arrived to
* the reorder queues in the meantime, we will be called again
* from the timer function if no one else cares for it.
+ *
+ * Ensure reorder_objects is read after pd->lock is dropped so we see
+ * an increment from another task in padata_do_serial. Pairs with
+ * smp_mb__after_atomic in padata_do_serial.
*/
+ smp_mb();
if (atomic_read(&pd->reorder_objects)
&& !(pinst->flags & PADATA_RESET))
mod_timer(&pd->timer, jiffies + HZ);
@@ -387,6 +392,13 @@ void padata_do_serial(struct padata_priv *padata)
list_add_tail(&padata->list, &pqueue->reorder.list);
spin_unlock(&pqueue->reorder.lock);

+ /*
+ * Ensure the atomic_inc of reorder_objects above is ordered correctly
+ * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
+ * in padata_reorder.
+ */
+ smp_mb__after_atomic();
+
put_cpu();

/* If we're running on the wrong CPU, call padata_reorder() via a
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index aa6e72fb7c08..098233ebe589 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -325,7 +325,7 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
}

read_lock(&tasklist_lock);
- force_sig(SIGKILL, pid_ns->child_reaper);
+ send_sig(SIGKILL, pid_ns->child_reaper, 1);
read_unlock(&tasklist_lock);

do_exit(0);
diff --git a/kernel/resource.c b/kernel/resource.c
index 92190f62ebc5..eaeaa36a4938 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -325,7 +325,7 @@ EXPORT_SYMBOL(release_resource);
*
* If a resource is found, returns 0 and @*res is overwritten with the part
* of the resource that's within [@start..@end]; if none is found, returns
- * -1 or -EINVAL for other invalid parameters.
+ * -ENODEV. Returns -EINVAL for invalid parameters.
*
* This function walks the whole tree and not just first level children
* unless @first_lvl is true.
@@ -364,16 +364,16 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end,
break;
}

+ if (p) {
+ /* copy data */
+ res->start = max(start, p->start);
+ res->end = min(end, p->end);
+ res->flags = p->flags;
+ res->desc = p->desc;
+ }
+
read_unlock(&resource_lock);
- if (!p)
- return -1;
-
- /* copy data */
- res->start = max(start, p->start);
- res->end = min(end, p->end);
- res->flags = p->flags;
- res->desc = p->desc;
- return 0;
+ return p ? 0 : -ENODEV;
}

static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a75ad50b5e2f..242233490a49 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5175,7 +5175,7 @@ long __sched io_schedule_timeout(long timeout)
}
EXPORT_SYMBOL(io_schedule_timeout);

-void io_schedule(void)
+void __sched io_schedule(void)
{
int token;

diff --git a/kernel/sched/sched-pelt.h b/kernel/sched/sched-pelt.h
index a26473674fb7..c529706bed11 100644
--- a/kernel/sched/sched-pelt.h
+++ b/kernel/sched/sched-pelt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Generated by Documentation/scheduler/sched-pelt; do not modify. */

-static const u32 runnable_avg_yN_inv[] = {
+static const u32 runnable_avg_yN_inv[] __maybe_unused = {
0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
diff --git a/kernel/signal.c b/kernel/signal.c
index 5f3dd69b50e2..ef46559100f3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1053,27 +1053,6 @@ static inline bool legacy_queue(struct sigpending *signals, int sig)
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}

-#ifdef CONFIG_USER_NS
-static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
-{
- if (current_user_ns() == task_cred_xxx(t, user_ns))
- return;
-
- if (SI_FROMKERNEL(info))
- return;
-
- rcu_read_lock();
- info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
- make_kuid(current_user_ns(), info->si_uid));
- rcu_read_unlock();
-}
-#else
-static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
-{
- return;
-}
-#endif
-
static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
enum pid_type type, int from_ancestor_ns)
{
@@ -1131,7 +1110,11 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
q->info.si_code = SI_USER;
q->info.si_pid = task_tgid_nr_ns(current,
task_active_pid_ns(t));
- q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
+ rcu_read_lock();
+ q->info.si_uid =
+ from_kuid_munged(task_cred_xxx(t, user_ns),
+ current_uid());
+ rcu_read_unlock();
break;
case (unsigned long) SEND_SIG_PRIV:
clear_siginfo(&q->info);
@@ -1143,13 +1126,8 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
break;
default:
copy_siginfo(&q->info, info);
- if (from_ancestor_ns)
- q->info.si_pid = 0;
break;
}
-
- userns_fixup_signal_uid(&q->info, t);
-
} else if (!is_si_special(info)) {
if (sig >= SIGRTMIN && info->si_code != SI_USER) {
/*
@@ -1193,6 +1171,28 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
return ret;
}

+static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
+{
+ bool ret = false;
+ switch (siginfo_layout(info->si_signo, info->si_code)) {
+ case SIL_KILL:
+ case SIL_CHLD:
+ case SIL_RT:
+ ret = true;
+ break;
+ case SIL_TIMER:
+ case SIL_POLL:
+ case SIL_FAULT:
+ case SIL_FAULT_MCEERR:
+ case SIL_FAULT_BNDERR:
+ case SIL_FAULT_PKUERR:
+ case SIL_SYS:
+ ret = false;
+ break;
+ }
+ return ret;
+}
+
static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
enum pid_type type)
{
@@ -1202,7 +1202,20 @@ static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct
from_ancestor_ns = si_fromuser(info) &&
!task_pid_nr_ns(current, task_active_pid_ns(t));
#endif
+ if (!is_si_special(info) && has_si_pid_and_uid(info)) {
+ struct user_namespace *t_user_ns;
+
+ rcu_read_lock();
+ t_user_ns = task_cred_xxx(t, user_ns);
+ if (current_user_ns() != t_user_ns) {
+ kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
+ info->si_uid = from_kuid_munged(t_user_ns, uid);
+ }
+ rcu_read_unlock();

+ if (!task_pid_nr_ns(current, task_active_pid_ns(t)))
+ info->si_pid = 0;
+ }
return __send_signal(sig, info, t, type, from_ancestor_ns);
}

@@ -1436,13 +1449,44 @@ static inline bool kill_as_cred_perm(const struct cred *cred,
uid_eq(cred->uid, pcred->uid);
}

-/* like kill_pid_info(), but doesn't use uid/euid of "current" */
-int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
- const struct cred *cred)
+/*
+ * The usb asyncio usage of siginfo is wrong. The glibc support
+ * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
+ * AKA after the generic fields:
+ * kernel_pid_t si_pid;
+ * kernel_uid32_t si_uid;
+ * sigval_t si_value;
+ *
+ * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
+ * after the generic fields is:
+ * void __user *si_addr;
+ *
+ * This is a practical problem when there is a 64bit big endian kernel
+ * and a 32bit userspace. As the 32bit address will encoded in the low
+ * 32bits of the pointer. Those low 32bits will be stored at higher
+ * address than appear in a 32 bit pointer. So userspace will not
+ * see the address it was expecting for it's completions.
+ *
+ * There is nothing in the encoding that can allow
+ * copy_siginfo_to_user32 to detect this confusion of formats, so
+ * handle this by requiring the caller of kill_pid_usb_asyncio to
+ * notice when this situration takes place and to store the 32bit
+ * pointer in sival_int, instead of sival_addr of the sigval_t addr
+ * parameter.
+ */
+int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
+ struct pid *pid, const struct cred *cred)
{
- int ret = -EINVAL;
+ struct kernel_siginfo info;
struct task_struct *p;
unsigned long flags;
+ int ret = -EINVAL;
+
+ clear_siginfo(&info);
+ info.si_signo = sig;
+ info.si_errno = errno;
+ info.si_code = SI_ASYNCIO;
+ *((sigval_t *)&info.si_pid) = addr;

if (!valid_signal(sig))
return ret;
@@ -1453,17 +1497,17 @@ int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
ret = -ESRCH;
goto out_unlock;
}
- if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
+ if (!kill_as_cred_perm(cred, p)) {
ret = -EPERM;
goto out_unlock;
}
- ret = security_task_kill(p, info, sig, cred);
+ ret = security_task_kill(p, &info, sig, cred);
if (ret)
goto out_unlock;

if (sig) {
if (lock_task_sighand(p, &flags)) {
- ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
+ ret = __send_signal(sig, &info, p, PIDTYPE_TGID, 0);
unlock_task_sighand(p, &flags);
} else
ret = -ESRCH;
@@ -1472,7 +1516,7 @@ int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
+EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);

/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -4411,6 +4455,28 @@ static inline void siginfo_buildtime_checks(void)
CHECK_OFFSET(si_syscall);
CHECK_OFFSET(si_arch);
#undef CHECK_OFFSET
+
+ /* usb asyncio */
+ BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
+ offsetof(struct siginfo, si_addr));
+ if (sizeof(int) == sizeof(void __user *)) {
+ BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
+ sizeof(void __user *));
+ } else {
+ BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
+ sizeof_field(struct siginfo, si_uid)) !=
+ sizeof(void __user *));
+ BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
+ offsetof(struct siginfo, si_uid));
+ }
+#ifdef CONFIG_COMPAT
+ BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
+ offsetof(struct compat_siginfo, si_addr));
+ BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
+ sizeof(compat_uptr_t));
+ BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
+ sizeof_field(struct siginfo, si_pid));
+#endif
}

void __init signals_init(void)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f43d47c8c3b6..98b3678fd48e 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -42,6 +42,7 @@ static u64 tick_length_base;
#define MAX_TICKADJ 500LL /* usecs */
#define MAX_TICKADJ_SCALED \
(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+#define MAX_TAI_OFFSET 100000

/*
* phase-lock loop variables
@@ -690,7 +691,8 @@ static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
time_constant = max(time_constant, 0l);
}

- if (txc->modes & ADJ_TAI && txc->constant >= 0)
+ if (txc->modes & ADJ_TAI &&
+ txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
*time_tai = txc->constant;

if (txc->modes & ADJ_OFFSET)
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 98ba50dcb1b2..acb326f5f50a 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -282,23 +282,6 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
SEQ_printf(m, "\n");
}

-static int timer_list_show(struct seq_file *m, void *v)
-{
- struct timer_list_iter *iter = v;
-
- if (iter->cpu == -1 && !iter->second_pass)
- timer_list_header(m, iter->now);
- else if (!iter->second_pass)
- print_cpu(m, iter->cpu, iter->now);
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
- else if (iter->cpu == -1 && iter->second_pass)
- timer_list_show_tickdevices_header(m);
- else
- print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
-#endif
- return 0;
-}
-
void sysrq_timer_list_show(void)
{
u64 now = ktime_to_ns(ktime_get());
@@ -317,6 +300,24 @@ void sysrq_timer_list_show(void)
return;
}

+#ifdef CONFIG_PROC_FS
+static int timer_list_show(struct seq_file *m, void *v)
+{
+ struct timer_list_iter *iter = v;
+
+ if (iter->cpu == -1 && !iter->second_pass)
+ timer_list_header(m, iter->now);
+ else if (!iter->second_pass)
+ print_cpu(m, iter->cpu, iter->now);
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ else if (iter->cpu == -1 && iter->second_pass)
+ timer_list_show_tickdevices_header(m);
+ else
+ print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
+#endif
+ return 0;
+}
+
static void *move_iter(struct timer_list_iter *iter, loff_t offset)
{
for (; offset; offset--) {
@@ -376,3 +377,4 @@ static int __init init_timer_list_procfs(void)
return 0;
}
__initcall(init_timer_list_procfs);
+#endif
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index 1db74eb098d0..121beb2f0930 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -42,8 +42,18 @@
BUG_ON(pad < 0 || pad >= nn);

/* Does the caller provide the syndrome ? */
- if (s != NULL)
- goto decode;
+ if (s != NULL) {
+ for (i = 0; i < nroots; i++) {
+ /* The syndrome is in index form,
+ * so nn represents zero
+ */
+ if (s[i] != nn)
+ goto decode;
+ }
+
+ /* syndrome is zero, no errors to correct */
+ return 0;
+ }

/* form the syndromes; i.e., evaluate data(x) at roots of
* g(x) */
@@ -99,9 +109,9 @@
if (no_eras > 0) {
/* Init lambda to be the erasure locator polynomial */
lambda[1] = alpha_to[rs_modnn(rs,
- prim * (nn - 1 - eras_pos[0]))];
+ prim * (nn - 1 - (eras_pos[0] + pad)))];
for (i = 1; i < no_eras; i++) {
- u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
+ u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
for (j = i + 1; j > 0; j--) {
tmp = index_of[lambda[j - 1]];
if (tmp != nn) {
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 739dc9fe2c55..f0757a67affe 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -678,17 +678,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
if (!miter->__remaining) {
struct scatterlist *sg;
- unsigned long pgoffset;

if (!__sg_page_iter_next(&miter->piter))
return false;

sg = miter->piter.sg;
- pgoffset = miter->piter.sg_pgoffset;

- miter->__offset = pgoffset ? 0 : sg->offset;
+ miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
+ miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
+ miter->__offset &= PAGE_SIZE - 1;
miter->__remaining = sg->offset + sg->length -
- (pgoffset << PAGE_SHIFT) - miter->__offset;
+ (miter->piter.sg_pgoffset << PAGE_SHIFT) -
+ miter->__offset;
miter->__remaining = min_t(unsigned long, miter->__remaining,
PAGE_SIZE - miter->__offset);
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index b1d39cabf125..6753ee9326b8 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -782,10 +782,16 @@ static struct p9_trans_module p9_virtio_trans = {
/* The standard init function */
static int __init p9_virtio_init(void)
{
+ int rc;
+
INIT_LIST_HEAD(&virtio_chan_list);

v9fs_register_trans(&p9_virtio_trans);
- return register_virtio_driver(&p9_virtio_drv);
+ rc = register_virtio_driver(&p9_virtio_drv);
+ if (rc)
+ v9fs_unregister_trans(&p9_virtio_trans);
+
+ return rc;
}

static void __exit p9_virtio_cleanup(void)
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 29420ebb8f07..3963eb11c3fb 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -530,13 +530,19 @@ static struct xenbus_driver xen_9pfs_front_driver = {

static int p9_trans_xen_init(void)
{
+ int rc;
+
if (!xen_domain())
return -ENODEV;

pr_info("Initialising Xen transport for 9pfs\n");

v9fs_register_trans(&p9_xen_trans);
- return xenbus_register_frontend(&xen_9pfs_front_driver);
+ rc = xenbus_register_frontend(&xen_9pfs_front_driver);
+ if (rc)
+ v9fs_unregister_trans(&p9_xen_trans);
+
+ return rc;
}
module_init(p9_trans_xen_init);

diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index de61091af666..267418b6129a 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2349,7 +2349,7 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
return ret;
}

-static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
+static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
{
/* begin scheduling originator messages on that interface */
batadv_iv_ogm_schedule(hard_iface);
@@ -2695,8 +2695,8 @@ static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.name = "BATMAN_IV",
.iface = {
- .activate = batadv_iv_iface_activate,
.enable = batadv_iv_ogm_iface_enable,
+ .enabled = batadv_iv_iface_enabled,
.disable = batadv_iv_ogm_iface_disable,
.update_mac = batadv_iv_ogm_iface_update_mac,
.primary_set = batadv_iv_ogm_primary_iface_set,
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 96ef7c70b4d9..9072392e43cd 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -807,6 +807,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,

batadv_hardif_recalc_extra_skbroom(soft_iface);

+ if (bat_priv->algo_ops->iface.enabled)
+ bat_priv->algo_ops->iface.enabled(hard_iface);
+
out:
return 0;

diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 26c4e2493ddf..abad64eb7dc4 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -3826,6 +3826,8 @@ static void batadv_tt_purge(struct work_struct *work)
*/
void batadv_tt_free(struct batadv_priv *bat_priv)
{
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
+
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);

diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ed0f6a519de5..3c83c8b4f1e1 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -2135,6 +2135,9 @@ struct batadv_algo_iface_ops {
/** @enable: init routing info when hard-interface is enabled */
int (*enable)(struct batadv_hard_iface *hard_iface);

+ /** @enabled: notification when hard-interface was enabled (optional) */
+ void (*enabled)(struct batadv_hard_iface *hard_iface);
+
/** @disable: de-init routing info when hard-interface is disabled */
void (*disable)(struct batadv_hard_iface *hard_iface);

diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index a7cd23f00bde..50530561da98 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -187,10 +187,16 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
}

if (!rt) {
- nexthop = &lowpan_cb(skb)->gw;
-
- if (ipv6_addr_any(nexthop))
- return NULL;
+ if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
+ /* There is neither route nor gateway,
+ * probably the destination is a direct peer.
+ */
+ nexthop = daddr;
+ } else {
+ /* There is a known gateway
+ */
+ nexthop = &lowpan_cb(skb)->gw;
+ }
} else {
nexthop = rt6_nexthop(rt, daddr);

diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8b893baf9bbe..31eb0449479b 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5588,6 +5588,11 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_UNKNOWN_CONN_ID);

+ if (min < hcon->le_conn_min_interval ||
+ max > hcon->le_conn_max_interval)
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_INVALID_LL_PARAMS);
+
if (hci_check_conn_params(min, max, latency, timeout))
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index a442e21f3894..5abd423b55fa 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->version = req->version;
hid->country = req->country;

- strncpy(hid->name, req->name, sizeof(hid->name));
+ strscpy(hid->name, req->name, sizeof(hid->name));

snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&l2cap_pi(session->ctrl_sock->sk)->chan->src);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 2151913892ce..03be6a4baef3 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -192,6 +192,7 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
ca.version = ca32.version;
ca.flags = ca32.flags;
ca.idle_to = ca32.idle_to;
+ ca32.name[sizeof(ca32.name) - 1] = '\0';
memcpy(ca.name, ca32.name, 128);

csock = sockfd_lookup(ca.ctrl_sock, &err);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5406d7cd46ad..32d2be9d6858 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4394,6 +4394,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,

l2cap_chan_lock(chan);

+ if (chan->state != BT_DISCONN) {
+ l2cap_chan_unlock(chan);
+ mutex_unlock(&conn->chan_lock);
+ return 0;
+ }
+
l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);

@@ -5291,7 +5297,14 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,

memset(&rsp, 0, sizeof(rsp));

- err = hci_check_conn_params(min, max, latency, to_multiplier);
+ if (min < hcon->le_conn_min_interval ||
+ max > hcon->le_conn_max_interval) {
+ BT_DBG("requested connection interval exceeds current bounds.");
+ err = -EINVAL;
+ } else {
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
+ }
+
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 621146d04c03..d2542984b191 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2580,6 +2580,19 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
goto distribute;
}

+ /* Drop IRK if peer is using identity address during pairing but is
+ * providing different address as identity information.
+ *
+ * Microsoft Surface Precision Mouse is known to have this bug.
+ */
+ if (hci_is_identity_address(&hcon->dst, hcon->dst_type) &&
+ (bacmp(&info->bdaddr, &hcon->dst) ||
+ info->addr_type != hcon->dst_type)) {
+ bt_dev_err(hcon->hdev,
+ "ignoring IRK with invalid identity address");
+ goto distribute;
+ }
+
bacpy(&smp->id_addr, &info->bdaddr);
smp->id_addr_type = info->addr_type;

diff --git a/net/key/af_key.c b/net/key/af_key.c
index 4af1e1d60b9f..51c0f10bb131 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2442,8 +2442,10 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
goto out;
}
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
goto out;
+ }

out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
@@ -2694,8 +2696,10 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
return PTR_ERR(out_skb);

err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
return err;
+ }

out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = pfk->dump.msg_version;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 2c9609929c71..455804456008 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -625,7 +625,7 @@ mtype_resize(struct ip_set *set, bool retried)
goto cleanup;
}
m->size = AHASH_INIT_SIZE;
- extsize = ext_size(AHASH_INIT_SIZE, dsize);
+ extsize += ext_size(AHASH_INIT_SIZE, dsize);
RCU_INIT_POINTER(hbucket(t, key), m);
} else if (m->pos >= m->size) {
struct hbucket *ht;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 8ebf21149ec3..e72b51157cbb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -2250,7 +2250,6 @@ static const struct nf_hook_ops ip_vs_ops[] = {
static int __net_init __ip_vs_init(struct net *net)
{
struct netns_ipvs *ipvs;
- int ret;

ipvs = net_generic(net, ip_vs_net_id);
if (ipvs == NULL)
@@ -2282,17 +2281,11 @@ static int __net_init __ip_vs_init(struct net *net)
if (ip_vs_sync_net_init(ipvs) < 0)
goto sync_fail;

- ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
- if (ret < 0)
- goto hook_fail;
-
return 0;
/*
* Error handling
*/

-hook_fail:
- ip_vs_sync_net_cleanup(ipvs);
sync_fail:
ip_vs_conn_net_cleanup(ipvs);
conn_fail:
@@ -2322,6 +2315,19 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
net->ipvs = NULL;
}

+static int __net_init __ip_vs_dev_init(struct net *net)
+{
+ int ret;
+
+ ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ if (ret < 0)
+ goto hook_fail;
+ return 0;
+
+hook_fail:
+ return ret;
+}
+
static void __net_exit __ip_vs_dev_cleanup(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -2341,6 +2347,7 @@ static struct pernet_operations ipvs_core_ops = {
};

static struct pernet_operations ipvs_core_dev_ops = {
+ .init = __ip_vs_dev_init,
.exit = __ip_vs_dev_cleanup,
};

diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 053cd96b9c76..179e9d11e41b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2382,9 +2382,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state);
} else {
- mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state);
- mutex_unlock(&ipvs->sync_mutex);
}
goto out_dec;
}
@@ -3490,10 +3488,8 @@ static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;

- mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
- mutex_unlock(&ipvs->sync_mutex);
return ret;
}

diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 2526be6b3d90..a4a78c4b06de 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -195,6 +195,7 @@ union ip_vs_sync_conn {
#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))

struct ip_vs_sync_thread_data {
+ struct task_struct *task;
struct netns_ipvs *ipvs;
struct socket *sock;
char *buf;
@@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs,
max(IPVS_SYNC_SEND_DELAY, 1));
ms->sync_queue_len++;
list_add_tail(&sb->list, &ms->sync_queue);
- if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
- wake_up_process(ms->master_thread);
+ if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
+ int id = (int)(ms - ipvs->ms);
+
+ wake_up_process(ipvs->master_tinfo[id].task);
+ }
} else
ip_vs_sync_buff_release(sb);
spin_unlock(&ipvs->sync_lock);
@@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work)
spin_lock_bh(&ipvs->sync_lock);
if (ms->sync_queue_len &&
ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
+ int id = (int)(ms - ipvs->ms);
+
ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
- wake_up_process(ms->master_thread);
+ wake_up_process(ipvs->master_tinfo[id].task);
}
spin_unlock_bh(&ipvs->sync_lock);
}
@@ -1703,10 +1709,6 @@ static int sync_thread_master(void *data)
if (sb)
ip_vs_sync_buff_release(sb);

- /* release the sending multicast socket */
- sock_release(tinfo->sock);
- kfree(tinfo);
-
return 0;
}

@@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data)
}
}

- /* release the sending multicast socket */
- sock_release(tinfo->sock);
- kfree(tinfo->buf);
- kfree(tinfo);
-
return 0;
}

@@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data)
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state)
{
- struct ip_vs_sync_thread_data *tinfo = NULL;
- struct task_struct **array = NULL, *task;
+ struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
+ struct task_struct *task;
struct net_device *dev;
char *name;
int (*threadfn)(void *data);
@@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) {
result = -EEXIST;
- if (ipvs->backup_threads)
+ if (ipvs->backup_tinfo)
goto out_early;

ipvs->bcfg = *c;
@@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
master_wakeup_work_handler);
ms->ipvs = ipvs;
}
- } else {
- array = kcalloc(count, sizeof(struct task_struct *),
- GFP_KERNEL);
- result = -ENOMEM;
- if (!array)
- goto out;
}
+ result = -ENOMEM;
+ ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
+ GFP_KERNEL);
+ if (!ti)
+ goto out;

for (id = 0; id < count; id++) {
- result = -ENOMEM;
- tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
- if (!tinfo)
- goto out;
+ tinfo = &ti[id];
tinfo->ipvs = ipvs;
- tinfo->sock = NULL;
if (state == IP_VS_STATE_BACKUP) {
+ result = -ENOMEM;
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL);
if (!tinfo->buf)
goto out;
- } else {
- tinfo->buf = NULL;
}
tinfo->id = id;
if (state == IP_VS_STATE_MASTER)
@@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
result = PTR_ERR(task);
goto out;
}
- tinfo = NULL;
- if (state == IP_VS_STATE_MASTER)
- ipvs->ms[id].master_thread = task;
- else
- array[id] = task;
+ tinfo->task = task;
}

/* mark as active */

- if (state == IP_VS_STATE_BACKUP)
- ipvs->backup_threads = array;
+ if (state == IP_VS_STATE_MASTER)
+ ipvs->master_tinfo = ti;
+ else
+ ipvs->backup_tinfo = ti;
spin_lock_bh(&ipvs->sync_buff_lock);
ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock);
@@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,

out:
/* We do not need RTNL lock anymore, release it here so that
- * sock_release below and in the kthreads can use rtnl_lock
- * to leave the mcast group.
+ * sock_release below can use rtnl_lock to leave the mcast group.
*/
rtnl_unlock();
- count = id;
- while (count-- > 0) {
- if (state == IP_VS_STATE_MASTER)
- kthread_stop(ipvs->ms[count].master_thread);
- else
- kthread_stop(array[count]);
+ id = min(id, count - 1);
+ if (ti) {
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+ if (tinfo->task)
+ kthread_stop(tinfo->task);
+ }
}
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms);
ipvs->ms = NULL;
}
mutex_unlock(&ipvs->sync_mutex);
- if (tinfo) {
- if (tinfo->sock)
- sock_release(tinfo->sock);
- kfree(tinfo->buf);
- kfree(tinfo);
+
+ /* No more mutexes, release socks */
+ if (ti) {
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+ if (tinfo->sock)
+ sock_release(tinfo->sock);
+ kfree(tinfo->buf);
+ }
+ kfree(ti);
}
- kfree(array);
return result;

out_early:
@@ -1944,15 +1935,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,

int stop_sync_thread(struct netns_ipvs *ipvs, int state)
{
- struct task_struct **array;
+ struct ip_vs_sync_thread_data *ti, *tinfo;
int id;
int retc = -EINVAL;

IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));

+ mutex_lock(&ipvs->sync_mutex);
if (state == IP_VS_STATE_MASTER) {
+ retc = -ESRCH;
if (!ipvs->ms)
- return -ESRCH;
+ goto err;
+ ti = ipvs->master_tinfo;

/*
* The lock synchronizes with sb_queue_tail(), so that we don't
@@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
struct ipvs_master_sync_state *ms = &ipvs->ms[id];
int ret;

+ tinfo = &ti[id];
pr_info("stopping master sync thread %d ...\n",
- task_pid_nr(ms->master_thread));
+ task_pid_nr(tinfo->task));
cancel_delayed_work_sync(&ms->master_wakeup_work);
- ret = kthread_stop(ms->master_thread);
+ ret = kthread_stop(tinfo->task);
if (retc >= 0)
retc = ret;
}
kfree(ipvs->ms);
ipvs->ms = NULL;
+ ipvs->master_tinfo = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
- if (!ipvs->backup_threads)
- return -ESRCH;
+ retc = -ESRCH;
+ if (!ipvs->backup_tinfo)
+ goto err;
+ ti = ipvs->backup_tinfo;

ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
- array = ipvs->backup_threads;
retc = 0;
for (id = ipvs->threads_mask; id >= 0; id--) {
int ret;

+ tinfo = &ti[id];
pr_info("stopping backup sync thread %d ...\n",
- task_pid_nr(array[id]));
- ret = kthread_stop(array[id]);
+ task_pid_nr(tinfo->task));
+ ret = kthread_stop(tinfo->task);
if (retc >= 0)
retc = ret;
}
- kfree(array);
- ipvs->backup_threads = NULL;
+ ipvs->backup_tinfo = NULL;
+ } else {
+ goto err;
}
+ id = ipvs->threads_mask;
+ mutex_unlock(&ipvs->sync_mutex);
+
+ /* No more mutexes, release socks */
+ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
+ if (tinfo->sock)
+ sock_release(tinfo->sock);
+ kfree(tinfo->buf);
+ }
+ kfree(ti);

/* decrease the module use count */
ip_vs_use_count_dec();
+ return retc;

+err:
+ mutex_unlock(&ipvs->sync_mutex);
return retc;
}

@@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
{
int retc;

- mutex_lock(&ipvs->sync_mutex);
retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Master Daemon\n");
@@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Backup Daemon\n");
- mutex_unlock(&ipvs->sync_mutex);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d2715b4d2e72..061bdab37b1a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1254,7 +1254,6 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
struct nf_conntrack_tuple tuple;
struct nf_conn *ct;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
- u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
struct nf_conntrack_zone zone;
int err;

@@ -1264,11 +1263,13 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,

if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
- u3, &zone);
+ nfmsg->nfgen_family, &zone);
else if (cda[CTA_TUPLE_REPLY])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
- u3, &zone);
+ nfmsg->nfgen_family, &zone);
else {
+ u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
+
return ctnetlink_flush_conntrack(net, cda,
NETLINK_CB(skb).portid,
nlmsg_report(nlh), u3);
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 9becac953587..71a84a0517f3 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -221,7 +221,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
/* See ip_conntrack_proto_tcp.c */
if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING &&
- nf_ip_checksum(skb, state->hook, dataoff, 0)) {
+ nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
icmp_error_log(skb, state, "bad hw icmp checksum");
return -NF_ACCEPT;
}
diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
index 62743da3004f..0b0efbb953bf 100644
--- a/net/netfilter/nf_nat_proto.c
+++ b/net/netfilter/nf_nat_proto.c
@@ -567,7 +567,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,

if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
return 0;
- if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
+ if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
return 0;

inside = (void *)skb->data + hdrlen;
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
index 06dc55590441..51b454d8fa9c 100644
--- a/net/netfilter/utils.c
+++ b/net/netfilter/utils.c
@@ -17,7 +17,8 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
case CHECKSUM_COMPLETE:
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
break;
- if ((protocol == 0 && !csum_fold(skb->csum)) ||
+ if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP &&
+ !csum_fold(skb->csum)) ||
!csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - dataoff, protocol,
skb->csum)) {
@@ -26,7 +27,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
}
/* fall through */
case CHECKSUM_NONE:
- if (protocol == 0)
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP)
skb->csum = 0;
else
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index ea8d5aed1e2c..a241d7cf2071 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1767,6 +1767,7 @@ rpc_xdr_encode(struct rpc_task *task)
req->rq_snd_buf.head[0].iov_len = 0;
xdr_init_encode(&xdr, &req->rq_snd_buf,
req->rq_snd_buf.head[0].iov_base, req);
+ xdr_free_bvec(&req->rq_snd_buf);
if (rpc_encode_header(task, &xdr))
return;

@@ -1799,8 +1800,6 @@ call_encode(struct rpc_task *task)
rpc_exit(task, task->tk_status);
}
return;
- } else {
- xprt_request_prepare(task->tk_rqstp);
}

/* Add task to reply queue before transmission to avoid races */
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index d7117d241460..0e185c29d9f1 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1006,6 +1006,8 @@ xprt_request_enqueue_receive(struct rpc_task *task)

if (!xprt_request_need_enqueue_receive(task, req))
return;
+
+ xprt_request_prepare(task->tk_rqstp);
spin_lock(&xprt->queue_lock);

/* Update the softirq receive buffer */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index a437ee8ae482..1b2450191110 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -909,6 +909,7 @@ static int xs_nospace(struct rpc_rqst *req)
static void
xs_stream_prepare_request(struct rpc_rqst *req)
{
+ xdr_free_bvec(&req->rq_rcv_buf);
req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
}

diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a14e8864e4fa..5e0637db92ea 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -123,13 +123,17 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
u64 addr;
int err;

- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
- return -EINVAL;
+ spin_lock_bh(&xs->rx_lock);
+
+ if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
+ err = -EINVAL;
+ goto out_unlock;
+ }

if (!xskq_peek_addr(xs->umem->fq, &addr) ||
len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
- xs->rx_dropped++;
- return -ENOSPC;
+ err = -ENOSPC;
+ goto out_drop;
}

addr += xs->umem->headroom;
@@ -138,13 +142,21 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
memcpy(buffer, xdp->data_meta, len + metalen);
addr += metalen;
err = xskq_produce_batch_desc(xs->rx, addr, len);
- if (!err) {
- xskq_discard_addr(xs->umem->fq);
- xsk_flush(xs);
- return 0;
- }
+ if (err)
+ goto out_drop;
+
+ xskq_discard_addr(xs->umem->fq);
+ xskq_produce_flush_desc(xs->rx);

+ spin_unlock_bh(&xs->rx_lock);
+
+ xs->sk.sk_data_ready(&xs->sk);
+ return 0;
+
+out_drop:
xs->rx_dropped++;
+out_unlock:
+ spin_unlock_bh(&xs->rx_lock);
return err;
}

@@ -765,6 +777,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,

xs = xdp_sk(sk);
mutex_init(&xs->mutex);
+ spin_lock_init(&xs->rx_lock);
spin_lock_init(&xs->tx_completion_lock);

mutex_lock(&net->xdp.lock);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 610c0bdc0c2b..cd333701f4bf 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -240,7 +240,7 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
/* Order producer and data */
smp_wmb();

- q->prod_tail = q->prod_head,
+ q->prod_tail = q->prod_head;
WRITE_ONCE(q->ring->producer, q->prod_tail);
}

diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 5d43aaa17027..831668ee8229 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -14,6 +14,8 @@ config XFRM_ALGO
tristate
select XFRM
select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER

config XFRM_USER
tristate "Transformation user configuration interface"
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 6916931b1de1..6abf9625a401 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -150,6 +150,25 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,

err = -EINVAL;
switch (p->family) {
+ case AF_INET:
+ break;
+
+ case AF_INET6:
+#if IS_ENABLED(CONFIG_IPV6)
+ break;
+#else
+ err = -EAFNOSUPPORT;
+ goto out;
+#endif
+
+ default:
+ goto out;
+ }
+
+ switch (p->sel.family) {
+ case AF_UNSPEC:
+ break;
+
case AF_INET:
if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
goto out;
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 08ba146a83c5..62508d9d8ad7 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -871,11 +871,12 @@ int conf_write(const char *name)
"#\n"
"# %s\n"
"#\n", str);
- } else if (!(sym->flags & SYMBOL_CHOICE)) {
+ } else if (!(sym->flags & SYMBOL_CHOICE) &&
+ !(sym->flags & SYMBOL_WRITTEN)) {
sym_calc_value(sym);
if (!(sym->flags & SYMBOL_WRITE))
goto next;
- sym->flags &= ~SYMBOL_WRITE;
+ sym->flags |= SYMBOL_WRITTEN;

conf_write_symbol(out, sym, &kconfig_printer_cb, NULL);
}
@@ -1026,8 +1027,6 @@ int conf_write_autoconf(int overwrite)
if (!overwrite && is_present(autoconf_name))
return 0;

- sym_clear_all_valid();
-
conf_write_dep("include/config/auto.conf.cmd");

if (conf_touch_deps())
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 8dde65bc3165..017843c9a4f4 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -141,6 +141,7 @@ struct symbol {
#define SYMBOL_OPTIONAL 0x0100 /* choice is optional - values can be 'n' */
#define SYMBOL_WRITE 0x0200 /* write symbol to file (KCONFIG_CONFIG) */
#define SYMBOL_CHANGED 0x0400 /* ? */
+#define SYMBOL_WRITTEN 0x0800 /* track info to avoid double-write to .config */
#define SYMBOL_NO_WRITE 0x1000 /* Symbol for internal use only; it will not be written */
#define SYMBOL_CHECKED 0x2000 /* used during dependency checking */
#define SYMBOL_WARNED 0x8000 /* warning has been issued */
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index e19c2eb72c51..37869214c243 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -73,8 +73,9 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
return -EOPNOTSUPP;
}

-static int __integrity_init_keyring(const unsigned int id, key_perm_t perm,
- struct key_restriction *restriction)
+static int __init __integrity_init_keyring(const unsigned int id,
+ key_perm_t perm,
+ struct key_restriction *restriction)
{
const struct cred *cred = current_cred();
int err = 0;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 614bc753822c..bf37bdce9918 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -6269,11 +6269,12 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
} else if (!strcmp(name, "fscreate")) {
tsec->create_sid = sid;
} else if (!strcmp(name, "keycreate")) {
- error = avc_has_perm(&selinux_state,
- mysid, sid, SECCLASS_KEY, KEY__CREATE,
- NULL);
- if (error)
- goto abort_change;
+ if (sid) {
+ error = avc_has_perm(&selinux_state, mysid, sid,
+ SECCLASS_KEY, KEY__CREATE, NULL);
+ if (error)
+ goto abort_change;
+ }
tsec->keycreate_sid = sid;
} else if (!strcmp(name, "sockcreate")) {
tsec->sockcreate_sid = sid;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index c99e1b77a45b..1524748b224d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1004,7 +1004,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
{
struct snd_seq_client *client = file->private_data;
int written = 0, len;
- int err;
+ int err, handled;
struct snd_seq_event event;

if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1017,6 +1017,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
if (!client->accept_output || client->pool == NULL)
return -ENXIO;

+ repeat:
+ handled = 0;
/* allocate the pool now if the pool is not allocated yet */
mutex_lock(&client->ioctl_mutex);
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
@@ -1076,12 +1078,19 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
0, 0, &client->ioctl_mutex);
if (err < 0)
break;
+ handled++;

__skip_event:
/* Update pointers and counts */
count -= len;
buf += len;
written += len;
+
+ /* let's have a coffee break if too many events are queued */
+ if (++handled >= 200) {
+ mutex_unlock(&client->ioctl_mutex);
+ goto repeat;
+ }
}

out:
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index b2e9454f5816..6a190f0d2803 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -78,6 +78,8 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
snd_hdac_chip_writew(bus, RINTCNT, 1);
/* enable rirb dma and response irq */
snd_hdac_chip_writeb(bus, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
+ /* Accept unsolicited responses */
+ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
spin_unlock_irq(&bus->reg_lock);
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_init_cmd_io);
@@ -414,9 +416,6 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
return -EBUSY;
}

- /* Accept unsolicited responses */
- snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
-
/* detect codecs */
if (!bus->codec_mask) {
bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index fcdf2cd3783b..fa1d041da5d2 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2955,15 +2955,19 @@ static int hda_codec_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int hda_codec_force_resume(struct device *dev)
{
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+ bool forced_resume = !codec->relaxed_resume;
int ret;

/* The get/put pair below enforces the runtime resume even if the
* device hasn't been used at suspend time. This trick is needed to
* update the jack state change during the sleep.
*/
- pm_runtime_get_noresume(dev);
+ if (forced_resume)
+ pm_runtime_get_noresume(dev);
ret = pm_runtime_force_resume(dev);
- pm_runtime_put(dev);
+ if (forced_resume)
+ pm_runtime_put(dev);
return ret;
}

diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 0c61c05503f5..b2f17da292a4 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2304,8 +2304,10 @@ static void generic_hdmi_free(struct hda_codec *codec)
struct hdmi_spec *spec = codec->spec;
int pin_idx, pcm_idx;

- if (codec_has_acomp(codec))
+ if (codec_has_acomp(codec)) {
snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
+ codec->relaxed_resume = 0;
+ }

for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2428,7 +2430,6 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
snd_hda_override_conn_list(codec, nid, spec->num_cvts, spec->cvt_nids);
}

-#define INTEL_GET_VENDOR_VERB 0xf81
#define INTEL_GET_VENDOR_VERB 0xf81
#define INTEL_SET_VENDOR_VERB 0x781
#define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
@@ -2537,18 +2538,32 @@ static int intel_pin2port(void *audio_ptr, int pin_nid)
return -1;
}

+static int intel_port2pin(struct hda_codec *codec, int port)
+{
+ struct hdmi_spec *spec = codec->spec;
+
+ if (!spec->port_num) {
+ /* we assume only from port-B to port-D */
+ if (port < 1 || port > 3)
+ return 0;
+ /* intel port is 1-based */
+ return port + intel_base_nid(codec) - 1;
+ }
+
+ if (port < 1 || port > spec->port_num)
+ return 0;
+ return spec->port_map[port - 1];
+}
+
static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
{
struct hda_codec *codec = audio_ptr;
int pin_nid;
int dev_id = pipe;

- /* we assume only from port-B to port-D */
- if (port < 1 || port > 3)
+ pin_nid = intel_port2pin(codec, port);
+ if (!pin_nid)
return;
-
- pin_nid = port + intel_base_nid(codec) - 1; /* intel port is 1-based */
-
/* skip notification during system suspend (but not in runtime PM);
* the state will be updated at resume
*/
@@ -2578,6 +2593,8 @@ static void register_i915_notifier(struct hda_codec *codec)
spec->drm_audio_ops.pin_eld_notify = intel_pin_eld_notify;
snd_hdac_acomp_register_notifier(&codec->bus->core,
&spec->drm_audio_ops);
+ /* no need for forcible resume for jack check thanks to notifier */
+ codec->relaxed_resume = 1;
}

/* setup_stream ops override for HSW+ */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dde9a49ded78..e50bf2fee250 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7614,9 +7614,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60130},
{0x17, 0x90170110},
{0x21, 0x03211020}),
- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x21, 0x04211030}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_STANDARD_PINS,
{0x17, 0x21014020},
@@ -8751,6 +8754,11 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
{0x18, 0x01a19030},
{0x1a, 0x01813040},
{0x21, 0x01014020}),
+ SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
+ {0x16, 0x01813030},
+ {0x17, 0x02211010},
+ {0x18, 0x01a19040},
+ {0x21, 0x01014020}),
SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
{0x14, 0x01014010},
{0x18, 0x01a19020},
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 4de1fbfa8827..65177ca64827 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1880,6 +1880,12 @@ static void hdmi_codec_remove(struct snd_soc_component *component)
{
struct hdac_hdmi_priv *hdmi = snd_soc_component_get_drvdata(component);
struct hdac_device *hdev = hdmi->hdev;
+ int ret;
+
+ ret = snd_hdac_acomp_register_notifier(hdev->bus, NULL);
+ if (ret < 0)
+ dev_err(&hdev->dev, "notifier unregister failed: err: %d\n",
+ ret);

pm_runtime_disable(&hdev->dev);
}
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 69bc4848d787..f730830fb36c 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -460,9 +460,6 @@ static int graph_for_each_link(struct graph_priv *priv,
codec_ep = of_graph_get_remote_endpoint(cpu_ep);
codec_port = of_get_parent(codec_ep);

- of_node_put(codec_ep);
- of_node_put(codec_port);
-
/* get convert-xxx property */
memset(&adata, 0, sizeof(adata));
graph_get_conversion(dev, codec_ep, &adata);
@@ -482,6 +479,9 @@ static int graph_for_each_link(struct graph_priv *priv,
else
ret = func_noml(priv, cpu_ep, codec_ep, li);

+ of_node_put(codec_ep);
+ of_node_put(codec_port);
+
if (ret < 0)
return ret;

diff --git a/sound/soc/meson/axg-tdm.h b/sound/soc/meson/axg-tdm.h
index e578b6f40a07..5774ce0916d4 100644
--- a/sound/soc/meson/axg-tdm.h
+++ b/sound/soc/meson/axg-tdm.h
@@ -40,7 +40,7 @@ struct axg_tdm_iface {

static inline bool axg_tdm_lrclk_invert(unsigned int fmt)
{
- return (fmt & SND_SOC_DAIFMT_I2S) ^
+ return ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S) ^
!!(fmt & (SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_NB_IF));
}

diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index 8cb06dab234e..7647b3d4c0ba 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -108,7 +108,7 @@ static int rsnd_ctu_probe_(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
- return rsnd_cmd_attach(io, rsnd_mod_id(mod) / 4);
+ return rsnd_cmd_attach(io, rsnd_mod_id(mod));
}

static void rsnd_ctu_value_init(struct rsnd_dai_stream *io,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c010cc864cf3..c58af17228c3 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -158,9 +158,10 @@ static void soc_init_component_debugfs(struct snd_soc_component *component)
component->card->debugfs_card_root);
}

- if (!component->debugfs_root) {
+ if (IS_ERR(component->debugfs_root)) {
dev_warn(component->dev,
- "ASoC: Failed to create component debugfs directory\n");
+ "ASoC: Failed to create component debugfs directory: %ld\n",
+ PTR_ERR(component->debugfs_root));
return;
}

@@ -212,18 +213,21 @@ static void soc_init_card_debugfs(struct snd_soc_card *card)

card->debugfs_card_root = debugfs_create_dir(card->name,
snd_soc_debugfs_root);
- if (!card->debugfs_card_root) {
+ if (IS_ERR(card->debugfs_card_root)) {
dev_warn(card->dev,
- "ASoC: Failed to create card debugfs directory\n");
+ "ASoC: Failed to create card debugfs directory: %ld\n",
+ PTR_ERR(card->debugfs_card_root));
+ card->debugfs_card_root = NULL;
return;
}

card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
card->debugfs_card_root,
&card->pop_time);
- if (!card->debugfs_pop_time)
+ if (IS_ERR(card->debugfs_pop_time))
dev_warn(card->dev,
- "ASoC: Failed to create pop time debugfs file\n");
+ "ASoC: Failed to create pop time debugfs file: %ld\n",
+ PTR_ERR(card->debugfs_pop_time));
}

static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
@@ -2834,14 +2838,12 @@ static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
snd_soc_dapm_shutdown(card);
snd_soc_flush_all_delayed_work(card);

- mutex_lock(&client_mutex);
/* remove all components used by DAI links on this card */
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
soc_remove_link_components(card, rtd, order);
}
}
- mutex_unlock(&client_mutex);

soc_cleanup_card_resources(card);
if (!unregister)
@@ -2860,7 +2862,9 @@ static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
*/
int snd_soc_unregister_card(struct snd_soc_card *card)
{
+ mutex_lock(&client_mutex);
snd_soc_unbind_card(card, true);
+ mutex_unlock(&client_mutex);
dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);

return 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 5d9d7678e4fa..6b0f45b9dcdf 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2154,23 +2154,25 @@ void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
{
struct dentry *d;

- if (!parent)
+ if (!parent || IS_ERR(parent))
return;

dapm->debugfs_dapm = debugfs_create_dir("dapm", parent);

- if (!dapm->debugfs_dapm) {
+ if (IS_ERR(dapm->debugfs_dapm)) {
dev_warn(dapm->dev,
- "ASoC: Failed to create DAPM debugfs directory\n");
+ "ASoC: Failed to create DAPM debugfs directory %ld\n",
+ PTR_ERR(dapm->debugfs_dapm));
return;
}

d = debugfs_create_file("bias_level", 0444,
dapm->debugfs_dapm, dapm,
&dapm_bias_fops);
- if (!d)
+ if (IS_ERR(d))
dev_warn(dapm->dev,
- "ASoC: Failed to create bias level debugfs file\n");
+ "ASoC: Failed to create bias level debugfs file: %ld\n",
+ PTR_ERR(d));
}

static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
@@ -2184,10 +2186,10 @@ static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
d = debugfs_create_file(w->name, 0444,
dapm->debugfs_dapm, w,
&dapm_widget_power_fops);
- if (!d)
+ if (IS_ERR(d))
dev_warn(w->dapm->dev,
- "ASoC: Failed to create %s debugfs file\n",
- w->name);
+ "ASoC: Failed to create %s debugfs file: %ld\n",
+ w->name, PTR_ERR(d));
}

static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index 3ef3093560ba..bfed711258ce 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -11,6 +11,8 @@
* Licensed under the GNU General Public License, version 2.0 (GPLv2)
*/

+#define _GNU_SOURCE
+#include <stdio.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
@@ -44,11 +46,13 @@ static int fprintf_json(void *out, const char *fmt, ...)
char *s;

va_start(ap, fmt);
+ if (vasprintf(&s, fmt, ap) < 0)
+ return -1;
+ va_end(ap);
+
if (!oper_count) {
int i;

- s = va_arg(ap, char *);
-
/* Strip trailing spaces */
i = strlen(s) - 1;
while (s[i] == ' ')
@@ -61,11 +65,10 @@ static int fprintf_json(void *out, const char *fmt, ...)
} else if (!strcmp(fmt, ",")) {
/* Skip */
} else {
- s = va_arg(ap, char *);
jsonw_string(json_wtr, s);
oper_count++;
}
- va_end(ap);
+ free(s);
return 0;
}

diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 929c8e537a14..f6ce794c0f36 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -2869,6 +2869,7 @@ struct bpf_prog_info {
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 gpl_compatible:1;
+ __u32 :31; /* alignment pad */
__u64 netns_dev;
__u64 netns_ino;
__u32 nr_jited_ksyms;
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 11c25d9ea431..43dc8a8e9105 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2897,10 +2897,7 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_object **pobj, int *prog_fd)
{
- struct bpf_object_open_attr open_attr = {
- .file = attr->file,
- .prog_type = attr->prog_type,
- };
+ struct bpf_object_open_attr open_attr = {};
struct bpf_program *prog, *first_prog = NULL;
enum bpf_attach_type expected_attach_type;
enum bpf_prog_type prog_type;
@@ -2913,6 +2910,9 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
if (!attr->file)
return -EINVAL;

+ open_attr.file = attr->file;
+ open_attr.prog_type = attr->prog_type;
+
obj = bpf_object__open_xattr(&open_attr);
if (IS_ERR_OR_NULL(obj))
return -ENOENT;
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index af5f310ecca1..4ecd33ff46ec 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -336,7 +336,8 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)

channels.cmd = ETHTOOL_GCHANNELS;
ifr.ifr_data = (void *)&channels;
- strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ);
+ strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
+ ifr.ifr_name[IFNAMSIZ - 1] = '\0';
err = ioctl(fd, SIOCETHTOOL, &ifr);
if (err && errno != EOPNOTSUPP) {
ret = -errno;
@@ -559,7 +560,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
err = -errno;
goto out_socket;
}
- strncpy(xsk->ifname, ifname, IFNAMSIZ);
+ strncpy(xsk->ifname, ifname, IFNAMSIZ - 1);
+ xsk->ifname[IFNAMSIZ - 1] = '\0';

err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
if (err)
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 911426721170..0a278bbcaba6 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -31,6 +31,8 @@ struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
struct perf_evlist *evlist;
+ int wrapped_cnt;
+ bool *wrapped;
bool snapshot_mode;
size_t snapshot_size;
};
@@ -536,16 +538,131 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return 0;
}

-static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
+static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
+{
+ bool *wrapped;
+ int cnt = ptr->wrapped_cnt;
+
+ /* Make @ptr->wrapped as big as @idx */
+ while (cnt <= idx)
+ cnt++;
+
+ /*
+ * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
+ * cross compilation problems where the host's system supports
+ * reallocarray() but not the target.
+ */
+ wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
+ if (!wrapped)
+ return -ENOMEM;
+
+ wrapped[cnt - 1] = false;
+ ptr->wrapped_cnt = cnt;
+ ptr->wrapped = wrapped;
+
+ return 0;
+}
+
+static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
+ size_t buffer_size, u64 head)
+{
+ u64 i, watermark;
+ u64 *buf = (u64 *)buffer;
+ size_t buf_size = buffer_size;
+
+ /*
+ * We want to look the very last 512 byte (chosen arbitrarily) in
+ * the ring buffer.
+ */
+ watermark = buf_size - 512;
+
+ /*
+ * @head is continuously increasing - if its value is equal or greater
+ * than the size of the ring buffer, it has wrapped around.
+ */
+ if (head >= buffer_size)
+ return true;
+
+ /*
+ * The value of @head is somewhere within the size of the ring buffer.
+ * This can be that there hasn't been enough data to fill the ring
+ * buffer yet or the trace time was so long that @head has numerically
+ * wrapped around. To find we need to check if we have data at the very
+ * end of the ring buffer. We can reliably do this because mmap'ed
+ * pages are zeroed out and there is a fresh mapping with every new
+ * session.
+ */
+
+ /* @head is less than 512 byte from the end of the ring buffer */
+ if (head > watermark)
+ watermark = head;
+
+ /*
+ * Speed things up by using 64 bit transactions (see "u64 *buf" above)
+ */
+ watermark >>= 3;
+ buf_size >>= 3;
+
+ /*
+ * If we find trace data at the end of the ring buffer, @head has
+ * been there and has numerically wrapped around at least once.
+ */
+ for (i = watermark; i < buf_size; i++)
+ if (buf[i])
+ return true;
+
+ return false;
+}
+
+static int cs_etm_find_snapshot(struct auxtrace_record *itr,
int idx, struct auxtrace_mmap *mm,
- unsigned char *data __maybe_unused,
+ unsigned char *data,
u64 *head, u64 *old)
{
+ int err;
+ bool wrapped;
+ struct cs_etm_recording *ptr =
+ container_of(itr, struct cs_etm_recording, itr);
+
+ /*
+ * Allocate memory to keep track of wrapping if this is the first
+ * time we deal with this *mm.
+ */
+ if (idx >= ptr->wrapped_cnt) {
+ err = cs_etm_alloc_wrapped_array(ptr, idx);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Check to see if *head has wrapped around. If it hasn't only the
+ * amount of data between *head and *old is snapshot'ed to avoid
+ * bloating the perf.data file with zeros. But as soon as *head has
+ * wrapped around the entire size of the AUX ring buffer it taken.
+ */
+ wrapped = ptr->wrapped[idx];
+ if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
+ wrapped = true;
+ ptr->wrapped[idx] = true;
+ }
+
pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
__func__, idx, (size_t)*old, (size_t)*head, mm->len);

- *old = *head;
- *head += mm->len;
+ /* No wrap has occurred, we can just use *head and *old. */
+ if (!wrapped)
+ return 0;
+
+ /*
+ * *head has wrapped around - adjust *head and *old to pickup the
+ * entire content of the AUX buffer.
+ */
+ if (*head >= mm->len) {
+ *old = *head - mm->len;
+ } else {
+ *head += mm->len;
+ *old = *head - mm->len;
+ }

return 0;
}
@@ -586,6 +703,8 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
+
+ zfree(&ptr->wrapped);
free(ptr);
}

diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index aea7b1fe85aa..c441a34cb1c0 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
+#include <linux/string.h>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
@@ -162,8 +163,7 @@ copy_class_filename(const char * class_sign, const char * file_name, char * resu
result[i] = '\0';
} else {
/* fallback case */
- size_t file_name_len = strlen(file_name);
- strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
+ strlcpy(result, file_name, max_length);
}
}

diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index c59743def8d3..b86ecc7afdd7 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -26,7 +26,7 @@ static inline unsigned long long rdclock(void)
}

#ifndef MAX_NR_CPUS
-#define MAX_NR_CPUS 1024
+#define MAX_NR_CPUS 2048
#endif

extern const char *input_name;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 4a69c07f4101..8f3c80e13584 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -18,6 +18,32 @@
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)

+#if defined(__s390x__)
+/* Return true if kvm module is available and loaded. Test this
+ * and retun success when trace point kvm_s390_create_vm
+ * exists. Otherwise this test always fails.
+ */
+static bool kvm_s390_create_vm_valid(void)
+{
+ char *eventfile;
+ bool rc = false;
+
+ eventfile = get_events_file("kvm-s390");
+
+ if (eventfile) {
+ DIR *mydir = opendir(eventfile);
+
+ if (mydir) {
+ rc = true;
+ closedir(mydir);
+ }
+ put_events_file(eventfile);
+ }
+
+ return rc;
+}
+#endif
+
static int test__checkevent_tracepoint(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1642,6 +1668,7 @@ static struct evlist_test test__events[] = {
{
.name = "kvm-s390:kvm_s390_create_vm",
.check = test__checkevent_tracepoint,
+ .valid = kvm_s390_create_vm_valid,
.id = 100,
},
#endif
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 61c9f8fc6fa1..58a99a292930 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -44,7 +44,7 @@ trace_libc_inet_pton_backtrace() {
eventattr='max-stack=4'
echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
*)
eventattr='max-stack=3'
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 98d934a36d86..b0d089a95dac 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -97,11 +97,12 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct annotation *notes = browser__annotation(browser);
struct annotation_line *al = list_entry(entry, struct annotation_line, node);
+ const bool is_current_entry = ui_browser__is_current_entry(browser, row);
struct annotation_write_ops ops = {
.first_line = row == 0,
- .current_entry = ui_browser__is_current_entry(browser, row),
+ .current_entry = is_current_entry,
.change_color = (!notes->options->hide_src_code &&
- (!ops.current_entry ||
+ (!is_current_entry ||
(browser->use_navkeypressed &&
!browser->navkeypressed))),
.width = browser->width,
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 09762985c713..0c43c5a0d9d9 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -932,9 +932,8 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
if (sym == NULL)
return 0;
src = symbol__hists(sym, evsel->evlist->nr_entries);
- if (src == NULL)
- return -ENOMEM;
- return __symbol__inc_addr_samples(sym, map, src, evsel->idx, addr, sample);
+ return (src) ? __symbol__inc_addr_samples(sym, map, src, evsel->idx,
+ addr, sample) : 0;
}

static int symbol__account_cycles(u64 addr, u64 start,
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 966360844fff..7ca79cfe1aea 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -584,6 +584,9 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
{
char bf[128];

+ if (!evsel)
+ goto out_unknown;
+
if (evsel->name)
return evsel->name;

@@ -620,7 +623,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)

evsel->name = strdup(bf);

- return evsel->name ?: "unknown";
+ if (evsel->name)
+ return evsel->name;
+out_unknown:
+ return "unknown";
}

const char *perf_evsel__group_name(struct perf_evsel *evsel)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 682e3d524d3c..df608cfaa03c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1100,7 +1100,7 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
return 0;
}

-#define MAX_CACHES 2000
+#define MAX_CACHES (MAX_NR_CPUS * 4)

static int write_cache(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index b8d864ed4afe..a48895c4324a 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -94,26 +94,49 @@ struct egroup {
const char *metric_expr;
};

-static struct perf_evsel *find_evsel(struct perf_evlist *perf_evlist,
- const char **ids,
- int idnum,
- struct perf_evsel **metric_events)
+static bool record_evsel(int *ind, struct perf_evsel **start,
+ int idnum,
+ struct perf_evsel **metric_events,
+ struct perf_evsel *ev)
+{
+ metric_events[*ind] = ev;
+ if (*ind == 0)
+ *start = ev;
+ if (++*ind == idnum) {
+ metric_events[*ind] = NULL;
+ return true;
+ }
+ return false;
+}
+
+static struct perf_evsel *find_evsel_group(struct perf_evlist *perf_evlist,
+ const char **ids,
+ int idnum,
+ struct perf_evsel **metric_events)
{
struct perf_evsel *ev, *start = NULL;
int ind = 0;

evlist__for_each_entry (perf_evlist, ev) {
+ if (ev->collect_stat)
+ continue;
if (!strcmp(ev->name, ids[ind])) {
- metric_events[ind] = ev;
- if (ind == 0)
- start = ev;
- if (++ind == idnum) {
- metric_events[ind] = NULL;
+ if (record_evsel(&ind, &start, idnum,
+ metric_events, ev))
return start;
- }
} else {
+ /*
+ * We saw some other event that is not
+ * in our list of events. Discard
+ * the whole match and start again.
+ */
ind = 0;
start = NULL;
+ if (!strcmp(ev->name, ids[ind])) {
+ if (record_evsel(&ind, &start, idnum,
+ metric_events, ev))
+ return start;
+ }
}
}
/*
@@ -143,8 +166,8 @@ static int metricgroup__setup_events(struct list_head *groups,
ret = -ENOMEM;
break;
}
- evsel = find_evsel(perf_evlist, eg->ids, eg->idnum,
- metric_events);
+ evsel = find_evsel_group(perf_evlist, eg->ids, eg->idnum,
+ metric_events);
if (!evsel) {
pr_debug("Cannot resolve %s: %s\n",
eg->metric_name, eg->metric_expr);
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 6d043c78f3c2..9c940242dcbe 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -539,7 +539,8 @@ static void collect_all_aliases(struct perf_stat_config *config, struct perf_evs
alias->scale != counter->scale ||
alias->cgrp != counter->cgrp ||
strcmp(alias->unit, counter->unit) ||
- perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter))
+ perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter) ||
+ !strcmp(alias->pmu_name, counter->pmu_name))
break;
alias->merged_stat = true;
cb(config, alias, data, false);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 83d8094be4fe..0ef98e991ade 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -303,7 +303,7 @@ static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
struct perf_evsel *c2;

evlist__for_each_entry (evsel_list, c2) {
- if (!strcasecmp(c2->name, name))
+ if (!strcasecmp(c2->name, name) && !c2->collect_stat)
return c2;
}
return NULL;
@@ -342,7 +342,8 @@ void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
if (leader) {
/* Search in group */
for_each_group_member (oc, leader) {
- if (!strcasecmp(oc->name, metric_names[i])) {
+ if (!strcasecmp(oc->name, metric_names[i]) &&
+ !oc->collect_stat) {
found = true;
break;
}
@@ -722,6 +723,7 @@ static void generic_metric(struct perf_stat_config *config,
double ratio;
int i;
void *ctxp = out->ctx;
+ char *n, *pn;

expr__ctx_init(&pctx);
expr__add_id(&pctx, name, avg);
@@ -741,7 +743,19 @@ static void generic_metric(struct perf_stat_config *config,
stats = &v->stats;
scale = 1.0;
}
- expr__add_id(&pctx, metric_events[i]->name, avg_stats(stats)*scale);
+
+ n = strdup(metric_events[i]->name);
+ if (!n)
+ return;
+ /*
+ * This display code with --no-merge adds [cpu] postfixes.
+ * These are not supported by the parser. Remove everything
+ * after the space.
+ */
+ pn = strchr(n, ' ');
+ if (pn)
+ *pn = 0;
+ expr__add_id(&pctx, n, avg_stats(stats)*scale);
}
if (!metric_events[i]) {
const char *p = metric_expr;
@@ -758,6 +772,9 @@ static void generic_metric(struct perf_stat_config *config,
(metric_name ? metric_name : name) : "", 0);
} else
print_metric(config, ctxp, NULL, NULL, "", 0);
+
+ for (i = 1; i < pctx.num_ids; i++)
+ free((void *)pctx.ids[i].name);
}

void perf_stat__print_shadow_stats(struct perf_stat_config *config,
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index 1eef0aed6423..08a405593a79 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -306,6 +306,8 @@ int cmd_freq_set(int argc, char **argv)
bitmask_setbit(cpus_chosen, cpus->cpu);
cpus = cpus->next;
}
+ /* Set the last cpu in related cpus list */
+ bitmask_setbit(cpus_chosen, cpus->cpu);
cpufreq_put_related_cpus(cpus);
}
}
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 0575751bc1bc..e2f6ed0a583d 100644
--- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
@@ -61,7 +61,7 @@ struct sr6_tlv_t {
unsigned char value[0];
} BPF_PACKET_HEADER;

-__attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
+static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
{
void *cursor, *data_end;
struct ip6_srh_t *srh;
@@ -95,7 +95,7 @@ __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
return srh;
}

-__attribute__((always_inline))
+static __always_inline
int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
uint32_t old_pad, uint32_t pad_off)
{
@@ -125,7 +125,7 @@ int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
return 0;
}

-__attribute__((always_inline))
+static __always_inline
int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t *tlv_off, uint32_t *pad_size,
uint32_t *pad_off)
@@ -184,7 +184,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
return 0;
}

-__attribute__((always_inline))
+static __always_inline
int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
struct sr6_tlv_t *itlv, uint8_t tlv_size)
{
@@ -228,7 +228,7 @@ int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}

-__attribute__((always_inline))
+static __always_inline
int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t tlv_off)
{
@@ -266,7 +266,7 @@ int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}

-__attribute__((always_inline))
+static __always_inline
int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
{
int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) +
\
 
 \ /
  Last update: 2019-07-26 10:48    [W:0.886 / U:0.248 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site