lkml.org 
[lkml]   [2020]   [Oct]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 RESEND 3/9] spi: spi-s3c64xx: Check return values
Date
Check return values in prepare_dma() and s3c64xx_spi_config() and
propagate errors upwards.

Fixes: 788437273fa8 ("spi: s3c64xx: move to generic dmaengine API")
Reviewed-by: Krzysztof Kozlowski <krzk@kernel.org>
Signed-off-by: Łukasz Stelmach <l.stelmach@samsung.com>
---
drivers/spi/spi-s3c64xx.c | 50 ++++++++++++++++++++++++++++++++-------
1 file changed, 41 insertions(+), 9 deletions(-)

diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 22bf8c75580a..f7482f2f1e13 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -122,6 +122,7 @@

struct s3c64xx_spi_dma_data {
struct dma_chan *ch;
+ dma_cookie_t cookie;
enum dma_transfer_direction direction;
};

@@ -271,12 +272,13 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags);
}

-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
struct dma_async_tx_descriptor *desc;
+ int ret;

memset(&config, 0, sizeof(config));

@@ -300,12 +302,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,

desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
+ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
+ return -ENOMEM;
+ }

desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;

- dmaengine_submit(desc);
+ dma->cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret) {
+ dev_err(&sdd->pdev->dev, "DMA submission failed");
+ return -EIO;
+ }
+
dma_async_issue_pending(dma->ch);
+ return 0;
}

static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
@@ -355,11 +369,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
}

-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
+ int ret = 0;

modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
@@ -385,7 +400,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -417,12 +432,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}

+ if (ret)
+ return ret;
+
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
+
+ return 0;
}

static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
@@ -555,9 +575,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
return 0;
}

-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
void __iomem *regs = sdd->regs;
+ int ret;
u32 val;

/* Disable Clock */
@@ -605,7 +626,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)

if (sdd->port_conf->clk_from_cmu) {
/* The src_clk clock is divided internally by 2 */
- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+ if (ret)
+ return ret;
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -619,6 +642,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
+
+ return 0;
}

#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
@@ -661,7 +686,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
sdd->cur_mode = spi->mode;
- s3c64xx_spi_config(sdd);
+ status = s3c64xx_spi_config(sdd);
+ if (status)
+ return status;
}

if (!is_polling(sdd) && (xfer->len > fifo_len) &&
@@ -688,10 +715,15 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
/* Start the signals */
s3c64xx_spi_set_cs(spi, true);

- s3c64xx_enable_datapath(sdd, xfer, use_dma);
+ status = s3c64xx_enable_datapath(sdd, xfer, use_dma);

spin_unlock_irqrestore(&sdd->lock, flags);

+ if (status) {
+ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
+ break;
+ }
+
if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer);
else
--
2.26.2
\
 
 \ /
  Last update: 2020-10-01 17:23    [W:0.339 / U:0.596 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site