lkml.org 
[lkml]   [2010]   [May]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCHv2 2/7] OMAP SSI: Introducing OMAP SSI driver
    Date
    Introduces the OMAP SSI driver in the kernel.

    The Synchronous Serial Interface (SSI) is a legacy version
    of HSI. As in the case of HSI, it is mainly used to connect
    Application engines (APE) with cellular modem engines (CMT)
    in cellular handsets.

    It provides a multichannel, full-duplex, multi-core communication
    with no reference clock. The OMAP SSI block is capable of reaching
    speeds of 110 Mbit/s.

    Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com>
    ---
    arch/arm/mach-omap2/ssi.c | 139 +++
    arch/arm/plat-omap/include/plat/ssi.h | 196 ++++
    drivers/hsi/controllers/omap_ssi.c | 1694 +++++++++++++++++++++++++++++++++
    3 files changed, 2029 insertions(+), 0 deletions(-)
    create mode 100644 arch/arm/mach-omap2/ssi.c
    create mode 100644 arch/arm/plat-omap/include/plat/ssi.h
    create mode 100644 drivers/hsi/controllers/omap_ssi.c

    diff --git a/arch/arm/mach-omap2/ssi.c b/arch/arm/mach-omap2/ssi.c
    new file mode 100644
    index 0000000..b46aea8
    --- /dev/null
    +++ b/arch/arm/mach-omap2/ssi.c
    @@ -0,0 +1,139 @@
    +/*
    + * linux/arch/arm/mach-omap2/ssi.c
    + *
    + * Copyright (C) 2010 Nokia Corporation. All rights reserved.
    + *
    + * Contact: Carlos Chinea <carlos.chinea@nokia.com>
    + *
    + * This program is free software; you can redistribute it and/or
    + * modify it under the terms of the GNU General Public License
    + * version 2 as published by the Free Software Foundation.
    + *
    + * This program is distributed in the hope that it will be useful, but
    + * WITHOUT ANY WARRANTY; without even the implied warranty of
    + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
    + * General Public License for more details.
    + *
    + * You should have received a copy of the GNU General Public License
    + * along with this program; if not, write to the Free Software
    + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
    + * 02110-1301 USA
    + */
    +
    +#include <linux/kernel.h>
    +#include <linux/init.h>
    +#include <linux/err.h>
    +#include <linux/gpio.h>
    +#include <linux/platform_device.h>
    +#include <plat/omap-pm.h>
    +#include <plat/ssi.h>
    +
    +static struct omap_ssi_platform_data ssi_pdata = {
    + .num_ports = SSI_NUM_PORTS,
    + .get_dev_context_loss_count = omap_pm_get_dev_context_loss_count,
    +};
    +
    +static struct resource ssi_resources[] = {
    + /* SSI controller */
    + [0] = {
    + .start = 0x48058000,
    + .end = 0x48058fff,
    + .name = "omap_ssi_sys",
    + .flags = IORESOURCE_MEM,
    + },
    + /* GDD */
    + [1] = {
    + .start = 0x48059000,
    + .end = 0x48059fff,
    + .name = "omap_ssi_gdd",
    + .flags = IORESOURCE_MEM,
    + },
    + [2] = {
    + .start = 71,
    + .end = 71,
    + .name = "ssi_gdd",
    + .flags = IORESOURCE_IRQ,
    + },
    + /* SSI port 1 */
    + [3] = {
    + .start = 0x4805a000,
    + .end = 0x4805a7ff,
    + .name = "omap_ssi_sst1",
    + .flags = IORESOURCE_MEM,
    + },
    + [4] = {
    + .start = 0x4805a800,
    + .end = 0x4805afff,
    + .name = "omap_ssi_ssr1",
    + .flags = IORESOURCE_MEM,
    + },
    + [5] = {
    + .start = 67,
    + .end = 67,
    + .name = "ssi_p1_mpu_irq0",
    + .flags = IORESOURCE_IRQ,
    + },
    + [6] = {
    + .start = 69,
    + .end = 69,
    + .name = "ssi_p1_mpu_irq1",
    + .flags = IORESOURCE_IRQ,
    + },
    + [7] = {
    + .start = 0,
    + .end = 0,
    + .name = "ssi_p1_cawake",
    + .flags = IORESOURCE_IRQ | IORESOURCE_UNSET,
    + },
    +};
    +
    +static void ssi_pdev_release(struct device *dev)
    +{
    +}
    +
    +static struct platform_device ssi_pdev = {
    + .name = "omap_ssi",
    + .id = 0,
    + .num_resources = ARRAY_SIZE(ssi_resources),
    + .resource = ssi_resources,
    + .dev = {
    + .release = ssi_pdev_release,
    + .platform_data = &ssi_pdata,
    + },
    +};
    +
    +int __init omap_ssi_config(struct omap_ssi_board_config *ssi_config)
    +{
    + unsigned int port, offset, cawake_gpio;
    + int err;
    +
    + ssi_pdata.num_ports = ssi_config->num_ports;
    + for (port = 0, offset = 7; port < ssi_config->num_ports;
    + port++, offset += 5) {
    + cawake_gpio = ssi_config->cawake_gpio[port];
    + if (!cawake_gpio)
    + continue; /* Nothing to do */
    + err = gpio_request(cawake_gpio, "cawake");
    + if (err < 0)
    + goto rback;
    + gpio_direction_input(cawake_gpio);
    + ssi_resources[offset].start = gpio_to_irq(cawake_gpio);
    + ssi_resources[offset].flags &= ~IORESOURCE_UNSET;
    + ssi_resources[offset].flags |= IORESOURCE_IRQ_HIGHEDGE |
    + IORESOURCE_IRQ_LOWEDGE;
    + }
    +
    + return 0;
    +rback:
    + dev_err(&ssi_pdev.dev, "Request cawake (gpio%d) failed\n", cawake_gpio);
    + while (port > 0)
    + gpio_free(ssi_config->cawake_gpio[--port]);
    +
    + return err;
    +}
    +
    +static int __init ssi_init(void)
    +{
    + return platform_device_register(&ssi_pdev);
    +}
    +subsys_initcall(ssi_init);
    diff --git a/arch/arm/plat-omap/include/plat/ssi.h b/arch/arm/plat-omap/include/plat/ssi.h
    new file mode 100644
    index 0000000..b077605
    --- /dev/null
    +++ b/arch/arm/plat-omap/include/plat/ssi.h
    @@ -0,0 +1,196 @@
    +/*
    + * plat/ssi.h
    + *
    + * Hardware definitions for SSI.
    + *
    + * Copyright (C) 2010 Nokia Corporation. All rights reserved.
    + *
    + * Contact: Carlos Chinea <carlos.chinea@nokia.com>
    + *
    + * This program is free software; you can redistribute it and/or
    + * modify it under the terms of the GNU General Public License
    + * version 2 as published by the Free Software Foundation.
    + *
    + * This program is distributed in the hope that it will be useful, but
    + * WITHOUT ANY WARRANTY; without even the implied warranty of
    + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
    + * General Public License for more details.
    + *
    + * You should have received a copy of the GNU General Public License
    + * along with this program; if not, write to the Free Software
    + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
    + * 02110-1301 USA
    + */
    +
    +#ifndef __OMAP_SSI_REGS_H__
    +#define __OMAP_SSI_REGS_H__
    +
    +#define SSI_NUM_PORTS 1
    +/*
    + * SSI SYS registers
    + */
    +#define SSI_REVISION_REG 0
    +# define SSI_REV_MAJOR 0xf0
    +# define SSI_REV_MINOR 0xf
    +#define SSI_SYSCONFIG_REG 0x10
    +# define SSI_AUTOIDLE (1 << 0)
    +# define SSI_SOFTRESET (1 << 1)
    +# define SSI_SIDLEMODE_FORCE 0
    +# define SSI_SIDLEMODE_NO (1 << 3)
    +# define SSI_SIDLEMODE_SMART (1 << 4)
    +# define SSI_SIDLEMODE_MASK 0x18
    +# define SSI_MIDLEMODE_FORCE 0
    +# define SSI_MIDLEMODE_NO (1 << 12)
    +# define SSI_MIDLEMODE_SMART (1 << 13)
    +# define SSI_MIDLEMODE_MASK 0x3000
    +#define SSI_SYSSTATUS_REG 0x14
    +# define SSI_RESETDONE 1
    +#define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2))
    +#define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8))
    +# define SSI_DATAACCEPT(channel) (1 << (channel))
    +# define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8))
    +# define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16))
    +# define SSI_ERROROCCURED (1 << 24)
    +# define SSI_BREAKDETECTED (1 << 25)
    +#define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800
    +#define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804
    +# define SSI_GDD_LCH(channel) (1 << (channel))
    +#define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10))
    +#define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10))
    +#define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10))
    +# define SSI_WAKE(channel) (1 << (channel))
    +# define SSI_WAKE_MASK 0xff
    +
    +/*
    + * SSI SST registers
    + */
    +#define SSI_SST_ID_REG 0
    +#define SSI_SST_MODE_REG 4
    +# define SSI_MODE_VAL_MASK 3
    +# define SSI_MODE_SLEEP 0
    +# define SSI_MODE_STREAM 1
    +# define SSI_MODE_FRAME 2
    +# define SSI_MODE_MULTIPOINTS 3
    +#define SSI_SST_FRAMESIZE_REG 8
    +# define SSI_FRAMESIZE_DEFAULT 31
    +#define SSI_SST_TXSTATE_REG 0xc
    +# define SSI_TXSTATE_IDLE 0
    +#define SSI_SST_BUFSTATE_REG 0x10
    +# define SSI_FULL(channel) (1 << (channel))
    +#define SSI_SST_DIVISOR_REG 0x18
    +# define SSI_MAX_DIVISOR 127
    +#define SSI_SST_BREAK_REG 0x20
    +#define SSI_SST_CHANNELS_REG 0x24
    +# define SSI_CHANNELS_DEFAULT 4
    +#define SSI_SST_ARBMODE_REG 0x28
    +# define SSI_ARBMODE_ROUNDROBIN 0
    +# define SSI_ARBMODE_PRIORITY 1
    +#define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4))
    +#define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4))
    +
    +/*
    + * SSI SSR registers
    + */
    +#define SSI_SSR_ID_REG 0
    +#define SSI_SSR_MODE_REG 4
    +#define SSI_SSR_FRAMESIZE_REG 8
    +#define SSI_SSR_RXSTATE_REG 0xc
    +#define SSI_SSR_BUFSTATE_REG 0x10
    +# define SSI_NOTEMPTY(channel) (1 << (channel))
    +#define SSI_SSR_BREAK_REG 0x1c
    +#define SSI_SSR_ERROR_REG 0x20
    +#define SSI_SSR_ERRORACK_REG 0x24
    +#define SSI_SSR_OVERRUN_REG 0x2c
    +#define SSI_SSR_OVERRUNACK_REG 0x30
    +#define SSI_SSR_TIMEOUT_REG 0x34
    +# define SSI_TIMEOUT_DEFAULT 0
    +#define SSI_SSR_CHANNELS_REG 0x28
    +#define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4))
    +#define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4))
    +
    +/*
    + * SSI GDD registers
    + */
    +#define SSI_GDD_HW_ID_REG 0
    +#define SSI_GDD_PPORT_ID_REG 0x10
    +#define SSI_GDD_MPORT_ID_REG 0x14
    +#define SSI_GDD_PPORT_SR_REG 0x20
    +#define SSI_GDD_MPORT_SR_REG 0x24
    +# define SSI_ACTIVE_LCH_NUM_MASK 0xff
    +#define SSI_GDD_TEST_REG 0x40
    +# define SSI_TEST 1
    +#define SSI_GDD_GCR_REG 0x100
    +# define SSI_CLK_AUTOGATING_ON (1 << 3)
    +# define SSI_FREE (1 << 2)
    +# define SSI_SWITCH_OFF (1 << 0)
    +#define SSI_GDD_GRST_REG 0x200
    +# define SSI_SWRESET 1
    +#define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40))
    +# define SSI_DST_BURST_EN_MASK 0xc000
    +# define SSI_DST_SINGLE_ACCESS0 0
    +# define SSI_DST_SINGLE_ACCESS (1 << 14)
    +# define SSI_DST_BURST_4x32_BIT (2 << 14)
    +# define SSI_DST_BURST_8x32_BIT (3 << 14)
    +# define SSI_DST_MASK 0x1e00
    +# define SSI_DST_MEMORY_PORT (8 << 9)
    +# define SSI_DST_PERIPHERAL_PORT (9 << 9)
    +# define SSI_SRC_BURST_EN_MASK 0x180
    +# define SSI_SRC_SINGLE_ACCESS0 0
    +# define SSI_SRC_SINGLE_ACCESS (1 << 7)
    +# define SSI_SRC_BURST_4x32_BIT (2 << 7)
    +# define SSI_SRC_BURST_8x32_BIT (3 << 7)
    +# define SSI_SRC_MASK 0x3c
    +# define SSI_SRC_MEMORY_PORT (8 << 2)
    +# define SSI_SRC_PERIPHERAL_PORT (9 << 2)
    +# define SSI_DATA_TYPE_MASK 3
    +# define SSI_DATA_TYPE_S32 2
    +#define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40))
    +# define SSI_DST_AMODE_MASK (3 << 14)
    +# define SSI_DST_AMODE_CONST 0
    +# define SSI_DST_AMODE_POSTINC (1 << 12)
    +# define SSI_SRC_AMODE_MASK (3 << 12)
    +# define SSI_SRC_AMODE_CONST 0
    +# define SSI_SRC_AMODE_POSTINC (1 << 12)
    +# define SSI_CCR_ENABLE (1 << 7)
    +# define SSI_CCR_SYNC_MASK 0x1f
    +#define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40))
    +# define SSI_BLOCK_IE (1 << 5)
    +# define SSI_HALF_IE (1 << 2)
    +# define SSI_TOUT_IE (1 << 0)
    +#define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40))
    +# define SSI_CSR_SYNC (1 << 6)
    +# define SSI_CSR_BLOCK (1 << 5)
    +# define SSI_CSR_HALF (1 << 2)
    +# define SSI_CSR_TOUR (1 << 0)
    +#define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40))
    +#define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40))
    +#define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40))
    +#define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40))
    +#define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40))
    +#define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40))
    +# define SSI_ENABLE_LNK (1 << 15)
    +# define SSI_STOP_LNK (1 << 14)
    +# define SSI_NEXT_CH_ID_MASK 0xf
    +
    +/**
    + * struct omap_ssi_platform_data - OMAP SSI platform data
    + * @num_ports: Number of ports on the controller
    + * @ctxt_loss_count: Pointer to omap_pm_get_dev_context_loss_count
    + */
    +struct omap_ssi_platform_data {
    + unsigned int num_ports;
    + int (*get_dev_context_loss_count)(struct device *dev);
    +};
    +
    +/**
    + * struct omap_ssi_config - SSI board configuration
    + * @num_ports: Number of ports in use
    + * @cawake_line: Array of cawake gpio lines
    + */
    +struct omap_ssi_board_config {
    + unsigned int num_ports;
    + int cawake_gpio[SSI_NUM_PORTS];
    +};
    +
    +extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config);
    +#endif /* __OMAP_SSI_REGS_H__ */
    diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c
    new file mode 100644
    index 0000000..01f5cbe
    --- /dev/null
    +++ b/drivers/hsi/controllers/omap_ssi.c
    @@ -0,0 +1,1694 @@
    +/*
    + * omap_ssi.c
    + *
    + * Implements the OMAP SSI driver.
    + *
    + * Copyright (C) 2010 Nokia Corporation. All rights reserved.
    + *
    + * Contact: Carlos Chinea <carlos.chinea@nokia.com>
    + *
    + * This program is free software; you can redistribute it and/or
    + * modify it under the terms of the GNU General Public License
    + * version 2 as published by the Free Software Foundation.
    + *
    + * This program is distributed in the hope that it will be useful, but
    + * WITHOUT ANY WARRANTY; without even the implied warranty of
    + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
    + * General Public License for more details.
    + *
    + * You should have received a copy of the GNU General Public License
    + * along with this program; if not, write to the Free Software
    + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
    + * 02110-1301 USA
    + */
    +#include <linux/err.h>
    +#include <linux/ioport.h>
    +#include <linux/io.h>
    +#include <linux/gpio.h>
    +#include <linux/clk.h>
    +#include <linux/device.h>
    +#include <linux/platform_device.h>
    +#include <linux/dma-mapping.h>
    +#include <linux/delay.h>
    +#include <linux/seq_file.h>
    +#include <linux/scatterlist.h>
    +#include <linux/interrupt.h>
    +#include <linux/spinlock.h>
    +#include <linux/hsi/hsi.h>
    +#include <linux/debugfs.h>
    +#include <plat/omap-pm.h>
    +#include <plat/clock.h>
    +#include <plat/ssi.h>
    +
    +#define SSI_MAX_CHANNELS 8
    +#define SSI_MAX_GDD_LCH 8
    +#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
    +
    +/**
    + * struct gdd_trn - GDD transaction data
    + * @msg: Pointer to the HSI message being served
    + * @sg: Pointer to the current sg entry being served
    + */
    +struct gdd_trn {
    + struct hsi_msg *msg;
    + struct scatterlist *sg;
    +};
    +
    +/**
    + * struct omap_ssi_controller - OMAP SSI controller data
    + * @dev: device associated to the controller (HSI controller)
    + * @sys: SSI I/O base address
    + * @gdd: GDD I/O base address
    + * @ick: SSI interconnect clock
    + * @fck: SSI functional clock
    + * @ck_refcount: References count for clocks
    + * @gdd_irq: IRQ line for GDD
    + * @gdd_tasklet: bottom half for DMA transfers
    + * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers
    + * @lock: lock to serialize access to GDD
    + * @ck_lock: lock to serialize access to the clocks
    + * @rate_change: flag to know if we are in the middle of a DVFS transition
    + * @loss_count: To follow if we need to restore context or not
    + * @sysconfig: SSI controller saved context
    + * @gdd_gcr: SSI GDD saved context
    + * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any
    + * @dir: Debugfs SSI root directory
    + */
    +struct omap_ssi_controller {
    + struct device *dev;
    + unsigned long sys;
    + unsigned long gdd;
    + struct clk *ick;
    + struct clk *fck;
    + int ck_refcount;
    + unsigned int gdd_irq;
    + struct tasklet_struct gdd_tasklet;
    + struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH];
    + spinlock_t lock;
    + spinlock_t ck_lock;
    + u32 fck_rate;
    + unsigned int rate_change:1;
    + int loss_count;
    + /* OMAP SSI Controller context */
    + u32 sysconfig;
    + u32 gdd_gcr;
    + int (*get_loss)(struct device *dev);
    +#ifdef CONFIG_DEBUG_FS
    + struct dentry *dir;
    +#endif
    +};
    +
    +/**
    + * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context
    + * @mode: Bit transmission mode
    + * @channels: Number of channels
    + * @framesize: Frame size in bits
    + * @timeout: RX frame timeout
    + * @divisot: TX divider
    + * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
    + */
    +struct omap_ssm_ctx {
    + u32 mode;
    + u32 channels;
    + u32 frame_size;
    + union {
    + u32 timeout; /* Rx Only */
    + struct {
    + u32 arb_mode;
    + u32 divisor;
    + }; /* Tx only */
    + };
    +};
    +
    +/**
    + * struct omap_ssi_port - OMAP SSI port data
    + * @dev: device associated to the port (HSI port)
    + * @sst_dma: SSI transmitter physical base address
    + * @ssr_dma: SSI receiver physical base address
    + * @sst_base: SSI transmitter base address
    + * @ssr_base: SSI receiver base address
    + * @lock: Spin lock to serialize access to the SSI port
    + * @channels: Current number of channels configured (1,2,4 or 8)
    + * @txqueue: TX message queues
    + * @rxqueue: RX message queues
    + * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode)
    + * @irq: IRQ number
    + * @wake_irq: IRQ number for incoming wake line (-1 if none)
    + * @pio_tasklet: Bottom half for PIO transfers and events
    + * @wake_tasklet: Bottom half for incoming wake events
    + * @wkin_cken: Keep track of clock references due to the incoming wake line
    + * @wake_refcount: Reference count for output wake line
    + * @sys_mpu_enable: Context for the interrupt enable register for irq 0
    + * @sst: Context for the synchronous serial transmitter
    + * @ssr: Context for the synchronous serial receiver
    + */
    +struct omap_ssi_port {
    + struct device *dev;
    + dma_addr_t sst_dma;
    + dma_addr_t ssr_dma;
    + unsigned long sst_base;
    + unsigned long ssr_base;
    + spinlock_t wk_lock;
    + spinlock_t lock;
    + unsigned int channels;
    + struct list_head txqueue[SSI_MAX_CHANNELS];
    + struct list_head rxqueue[SSI_MAX_CHANNELS];
    + struct list_head brkqueue;
    + unsigned int irq;
    + int wake_irq;
    + struct tasklet_struct pio_tasklet;
    + struct tasklet_struct wake_tasklet;
    + unsigned int wkin_cken:1; /* Workaround */
    + int wk_refcount;
    + /* OMAP SSI port context */
    + u32 sys_mpu_enable; /* We use only one irq */
    + struct omap_ssm_ctx sst;
    + struct omap_ssm_ctx ssr;
    +};
    +
    +static inline unsigned int ssi_wakein(struct hsi_port *port)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    +
    + return gpio_get_value(irq_to_gpio(omap_port->wake_irq));
    +}
    +
    +static int ssi_set_port_mode(struct device *dev, void *data)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(to_hsi_port(dev));
    + u32 *mode = data;
    +
    + __raw_writel(*mode, omap_port->sst_base + SSI_SST_MODE_REG);
    + __raw_writel(*mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
    +
    + return 0;
    +}
    +
    +static inline void ssi_set_mode(struct hsi_controller *ssi, u32 mode)
    +{
    + device_for_each_child(&ssi->device, &mode, ssi_set_port_mode);
    +}
    +
    +static int ssi_restore_port_mode(struct device *dev, void *data)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(to_hsi_port(dev));
    +
    + __raw_writel(omap_port->sst.mode,
    + omap_port->sst_base + SSI_SST_MODE_REG);
    + __raw_writel(omap_port->ssr.mode,
    + omap_port->ssr_base + SSI_SSR_MODE_REG);
    +
    + return 0;
    +}
    +
    +static int ssi_restore_port_ctx(struct device *dev, void *data)
    +{
    + struct hsi_port *port = to_hsi_port(dev);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(dev->parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + unsigned long base = omap_port->sst_base;
    +
    + __raw_writel(omap_port->sys_mpu_enable,
    + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + /* SST context */
    + __raw_writel(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
    + __raw_writel(omap_port->sst.divisor, base + SSI_SST_DIVISOR_REG);
    + __raw_writel(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
    + __raw_writel(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
    + /* SSR context */
    + base = omap_port->ssr_base;
    + __raw_writel(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
    + __raw_writel(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
    + __raw_writel(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
    +
    + return 0;
    +}
    +
    +static int ssi_save_port_ctx(struct device *dev, void *data)
    +{
    + struct hsi_port *port = to_hsi_port(dev);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(dev->parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    +
    + omap_port->sys_mpu_enable = __raw_readl(omap_ssi->sys +
    + SSI_MPU_ENABLE_REG(port->num, 0));
    +
    + return 0;
    +}
    +
    +static int ssi_clk_enable(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + int err = 0;
    +
    + spin_lock_bh(&omap_ssi->ck_lock);
    + if (omap_ssi->ck_refcount++)
    + goto out;
    +
    + err = clk_enable(omap_ssi->fck);
    + if (unlikely(err < 0))
    + goto out;
    + err = clk_enable(omap_ssi->ick);
    + if (unlikely(err < 0)) {
    + clk_disable(omap_ssi->fck);
    + goto out;
    + }
    + if ((omap_ssi->get_loss) && (omap_ssi->loss_count ==
    + (*omap_ssi->get_loss)(ssi->device.parent)))
    + goto mode; /* We always need to restore the mode */
    +
    + __raw_writel(omap_ssi->sysconfig, omap_ssi->sys + SSI_SYSCONFIG_REG);
    + __raw_writel(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG);
    +
    + device_for_each_child(&ssi->device, NULL, ssi_restore_port_ctx);
    +mode:
    + if (!omap_ssi->rate_change)
    + device_for_each_child(&ssi->device, NULL,
    + ssi_restore_port_mode);
    +out:
    + spin_unlock_bh(&omap_ssi->ck_lock);
    +
    + return err;
    +}
    +
    +static void ssi_clk_disable(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    +
    + spin_lock_bh(&omap_ssi->ck_lock);
    + WARN_ON(omap_ssi->ck_refcount <= 0);
    + if (--omap_ssi->ck_refcount)
    + goto out;
    +
    + if (!omap_ssi->rate_change)
    + ssi_set_mode(ssi, SSI_MODE_SLEEP);
    +
    + if (omap_ssi->get_loss)
    + omap_ssi->loss_count =
    + (*omap_ssi->get_loss)(ssi->device.parent);
    +
    + device_for_each_child(&ssi->device, NULL, ssi_save_port_ctx);
    + clk_disable(omap_ssi->ick);
    + clk_disable(omap_ssi->fck);
    +
    +out:
    + spin_unlock_bh(&omap_ssi->ck_lock);
    +}
    +
    +#ifdef CONFIG_DEBUG_FS
    +static int ssi_debug_show(struct seq_file *m, void *p)
    +{
    + struct hsi_controller *ssi = m->private;
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + unsigned long sys = omap_ssi->sys;
    +
    + ssi_clk_enable(ssi);
    + seq_printf(m, "REVISION\t: 0x%08x\n",
    + __raw_readl(sys + SSI_REVISION_REG));
    + seq_printf(m, "SYSCONFIG\t: 0x%08x\n",
    + __raw_readl(sys + SSI_SYSCONFIG_REG));
    + seq_printf(m, "SYSSTATUS\t: 0x%08x\n",
    + __raw_readl(sys + SSI_SYSSTATUS_REG));
    + ssi_clk_disable(ssi);
    +
    + return 0;
    +}
    +
    +static int ssi_debug_port_show(struct seq_file *m, void *p)
    +{
    + struct hsi_port *port = m->private;
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + unsigned long base = omap_ssi->sys;
    + unsigned int ch;
    +
    + ssi_clk_enable(ssi);
    + if (omap_port->wake_irq > 0)
    + seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
    + seq_printf(m, "WAKE\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_WAKE_REG(port->num)));
    + seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
    + __raw_readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
    + seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
    + __raw_readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
    + /* SST */
    + base = omap_port->sst_base;
    + seq_printf(m, "\nSST\n===\n");
    + seq_printf(m, "MODE\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SST_MODE_REG));
    + seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
    + __raw_readl(base + SSI_SST_FRAMESIZE_REG));
    + seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SST_DIVISOR_REG));
    + seq_printf(m, "CHANNELS\t: 0x%08x\n",
    + __raw_readl(base + SSI_SST_CHANNELS_REG));
    + seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SST_ARBMODE_REG));
    + seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SST_TXSTATE_REG));
    + seq_printf(m, "BUFSTATE\t: 0x%08x\n",
    + __raw_readl(base + SSI_SST_BUFSTATE_REG));
    + seq_printf(m, "BREAK\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SST_BREAK_REG));
    + for (ch = 0; ch < omap_port->channels; ch++) {
    + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
    + __raw_readl(base + SSI_SST_BUFFER_CH_REG(ch)));
    + }
    + /* SSR */
    + base = omap_port->ssr_base;
    + seq_printf(m, "\nSSR\n===\n");
    + seq_printf(m, "MODE\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_MODE_REG));
    + seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_FRAMESIZE_REG));
    + seq_printf(m, "CHANNELS\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_CHANNELS_REG));
    + seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_TIMEOUT_REG));
    + seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_RXSTATE_REG));
    + seq_printf(m, "BUFSTATE\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_BUFSTATE_REG));
    + seq_printf(m, "BREAK\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_BREAK_REG));
    + seq_printf(m, "ERROR\t\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_ERROR_REG));
    + seq_printf(m, "ERRORACK\t: 0x%08x\n",
    + __raw_readl(base + SSI_SSR_ERRORACK_REG));
    + for (ch = 0; ch < omap_port->channels; ch++) {
    + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
    + __raw_readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
    + }
    + ssi_clk_disable(ssi);
    +
    + return 0;
    +}
    +
    +static int ssi_debug_gdd_show(struct seq_file *m, void *p)
    +{
    + struct hsi_controller *ssi = m->private;
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + unsigned long gdd = omap_ssi->gdd;
    + int lch;
    +
    + ssi_clk_enable(ssi);
    + seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n",
    + __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG));
    + seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n",
    + __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG));
    + seq_printf(m, "HW_ID\t\t: 0x%08x\n",
    + __raw_readl(gdd + SSI_GDD_HW_ID_REG));
    + seq_printf(m, "PPORT_ID\t: 0x%08x\n",
    + __raw_readl(gdd + SSI_GDD_PPORT_ID_REG));
    + seq_printf(m, "MPORT_ID\t: 0x%08x\n",
    + __raw_readl(gdd + SSI_GDD_MPORT_ID_REG));
    + seq_printf(m, "TEST\t\t: 0x%08x\n",
    + __raw_readl(gdd + SSI_GDD_TEST_REG));
    + seq_printf(m, "GCR\t\t: 0x%08x\n",
    + __raw_readl(gdd + SSI_GDD_GCR_REG));
    +
    + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
    + seq_printf(m, "\nGDD LCH %d\n=========\n", lch);
    + seq_printf(m, "CSDP\t\t: 0x%04x\n",
    + __raw_readw(gdd + SSI_GDD_CSDP_REG(lch)));
    + seq_printf(m, "CCR\t\t: 0x%04x\n",
    + __raw_readw(gdd + SSI_GDD_CCR_REG(lch)));
    + seq_printf(m, "CICR\t\t: 0x%04x\n",
    + __raw_readw(gdd + SSI_GDD_CICR_REG(lch)));
    + seq_printf(m, "CSR\t\t: 0x%04x\n",
    + __raw_readw(gdd + SSI_GDD_CSR_REG(lch)));
    + seq_printf(m, "CSSA\t\t: 0x%08x\n",
    + __raw_readl(gdd + SSI_GDD_CSSA_REG(lch)));
    + seq_printf(m, "CDSA\t\t: 0x%08x\n",
    + __raw_readl(gdd + SSI_GDD_CDSA_REG(lch)));
    + seq_printf(m, "CEN\t\t: 0x%04x\n",
    + __raw_readw(gdd + SSI_GDD_CEN_REG(lch)));
    + seq_printf(m, "CSAC\t\t: 0x%04x\n",
    + __raw_readw(gdd + SSI_GDD_CSAC_REG(lch)));
    + seq_printf(m, "CDAC\t\t: 0x%04x\n",
    + __raw_readw(gdd + SSI_GDD_CDAC_REG(lch)));
    + seq_printf(m, "CLNK_CTRL\t: 0x%04x\n",
    + __raw_readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch)));
    + }
    + ssi_clk_disable(ssi);
    +
    + return 0;
    +}
    +
    +static int ssi_regs_open(struct inode *inode, struct file *file)
    +{
    + return single_open(file, ssi_debug_show, inode->i_private);
    +}
    +
    +static int ssi_port_regs_open(struct inode *inode, struct file *file)
    +{
    + return single_open(file, ssi_debug_port_show, inode->i_private);
    +}
    +
    +static int ssi_gdd_regs_open(struct inode *inode, struct file *file)
    +{
    + return single_open(file, ssi_debug_gdd_show, inode->i_private);
    +}
    +
    +static const struct file_operations ssi_regs_fops = {
    + .open = ssi_regs_open,
    + .read = seq_read,
    + .llseek = seq_lseek,
    + .release = single_release,
    +};
    +
    +static const struct file_operations ssi_port_regs_fops = {
    + .open = ssi_port_regs_open,
    + .read = seq_read,
    + .llseek = seq_lseek,
    + .release = single_release,
    +};
    +
    +static const struct file_operations ssi_gdd_regs_fops = {
    + .open = ssi_gdd_regs_open,
    + .read = seq_read,
    + .llseek = seq_lseek,
    + .release = single_release,
    +};
    +
    +static int __init ssi_debug_add_port(struct device *dev, void *data)
    +{
    + struct hsi_port *port = to_hsi_port(dev);
    + struct dentry *dir = data;
    +
    + dir = debugfs_create_dir(dev_name(dev), dir);
    + if (IS_ERR(dir))
    + return PTR_ERR(dir);
    + debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
    +
    + return 0;
    +}
    +
    +static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + struct dentry *dir;
    + int err;
    +
    + /* SSI controller */
    + omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL);
    + if (IS_ERR(omap_ssi->dir))
    + return PTR_ERR(omap_ssi->dir);
    +
    + debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi,
    + &ssi_regs_fops);
    + /* SSI GDD (DMA) */
    + dir = debugfs_create_dir("gdd", omap_ssi->dir);
    + if (IS_ERR(dir))
    + goto rback;
    + debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops);
    + /* SSI ports */
    + err = device_for_each_child(&ssi->device, omap_ssi->dir,
    + ssi_debug_add_port);
    + if (err < 0)
    + goto rback;
    +
    + return 0;
    +rback:
    + debugfs_remove_recursive(omap_ssi->dir);
    +
    + return PTR_ERR(dir);
    +}
    +
    +static void ssi_debug_remove_ctrl(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    +
    + debugfs_remove_recursive(omap_ssi->dir);
    +}
    +#endif /* CONFIG_DEBUG_FS */
    +
    +static int ssi_claim_lch(struct hsi_msg *msg)
    +{
    +
    + struct hsi_port *port = hsi_get_port(msg->cl);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + int lch;
    +
    + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
    + if (!omap_ssi->gdd_trn[lch].msg) {
    + omap_ssi->gdd_trn[lch].msg = msg;
    + omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
    + return lch;
    + }
    +
    + return -EBUSY;
    +}
    +
    +static int ssi_start_pio(struct hsi_msg *msg)
    +{
    + struct hsi_port *port = hsi_get_port(msg->cl);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + u32 val;
    +
    + ssi_clk_enable(ssi);
    + if (msg->ttype == HSI_MSG_WRITE) {
    + val = SSI_DATAACCEPT(msg->channel);
    + ssi_clk_enable(ssi); /* Hold clocks for pio writes */
    + } else {
    + val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
    + }
    + dev_dbg(&port->device, "Single %s transfer\n",
    + msg->ttype ? "write" : "read");
    + val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + ssi_clk_disable(ssi);
    + msg->actual_len = 0;
    + msg->status = HSI_STATUS_PROCEDING;
    +
    + return 0;
    +}
    +
    +static int ssi_start_dma(struct hsi_msg *msg, int lch)
    +{
    + struct hsi_port *port = hsi_get_port(msg->cl);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + unsigned long gdd = omap_ssi->gdd;
    + int err;
    + u16 csdp;
    + u16 ccr;
    + u32 s_addr;
    + u32 d_addr;
    + u32 tmp;
    +
    + if (msg->ttype == HSI_MSG_READ) {
    + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
    + DMA_FROM_DEVICE);
    + if (err < 0) {
    + dev_dbg(&ssi->device, "DMA map SG failed !\n");
    + return err;
    + }
    + csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
    + SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
    + SSI_DATA_TYPE_S32;
    + ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
    + ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
    + SSI_CCR_ENABLE;
    + s_addr = omap_port->ssr_dma +
    + SSI_SSR_BUFFER_CH_REG(msg->channel);
    + d_addr = sg_dma_address(msg->sgt.sgl);
    + } else {
    + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
    + DMA_TO_DEVICE);
    + if (err < 0) {
    + dev_dbg(&ssi->device, "DMA map SG failed !\n");
    + return err;
    + }
    + csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
    + SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
    + SSI_DATA_TYPE_S32;
    + ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
    + ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
    + SSI_CCR_ENABLE;
    + s_addr = sg_dma_address(msg->sgt.sgl);
    + d_addr = omap_port->sst_dma +
    + SSI_SST_BUFFER_CH_REG(msg->channel);
    + }
    + dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x"
    + " d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr);
    + ssi_clk_enable(ssi); /* Hold clocks during the transfer */
    + __raw_writew(csdp, gdd + SSI_GDD_CSDP_REG(lch));
    + __raw_writew(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
    + __raw_writel(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
    + __raw_writel(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
    + __raw_writew(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
    + gdd + SSI_GDD_CEN_REG(lch));
    + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    + tmp |= SSI_GDD_LCH(lch);
    + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    + __raw_writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
    + msg->status = HSI_STATUS_PROCEDING;
    +
    + return 0;
    +}
    +
    +static int ssi_start_transfer(struct list_head *queue)
    +{
    + struct hsi_msg *msg;
    + int lch = -1;
    +
    + if (list_empty(queue))
    + return 0;
    + msg = list_first_entry(queue, struct hsi_msg, link);
    + if (msg->status != HSI_STATUS_QUEUED)
    + return 0;
    + if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
    + lch = ssi_claim_lch(msg);
    + if (lch >= 0)
    + return ssi_start_dma(msg, lch);
    + else
    + return ssi_start_pio(msg);
    +}
    +
    +static void ssi_error(struct hsi_port *port)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + struct hsi_msg *msg;
    + unsigned int i;
    + u32 err;
    + u32 val;
    + u32 tmp;
    +
    + /* ACK error */
    + err = __raw_readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
    + dev_err(&port->device, "SSI error: 0x%02x\n", err);
    + if (!err) {
    + dev_dbg(&port->device, "spurious SSI error ignored!\n");
    + return;
    + }
    + /* Cancel all GDD read transfers */
    + for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
    + msg = omap_ssi->gdd_trn[i].msg;
    + if ((msg) && (msg->ttype == HSI_MSG_READ)) {
    + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
    + val |= (1 << i);
    + omap_ssi->gdd_trn[i].msg = NULL;
    + }
    + }
    + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    + tmp &= ~val;
    + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    + /* Cancel all PIO read transfers */
    + tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
    + __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + /* Signal the error all current pending read requests */
    + for (i = 0; i < omap_port->channels; i++) {
    + if (list_empty(&omap_port->rxqueue[i]))
    + continue;
    + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
    + link);
    + list_del(&msg->link);
    + msg->status = HSI_STATUS_ERROR;
    + msg->complete(msg);
    + /* Now restart queued reads if any */
    + ssi_start_transfer(&omap_port->rxqueue[i]);
    + }
    + /* ACK error */
    + __raw_writel(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
    +}
    +
    +static void ssi_break_complete(struct hsi_port *port)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + struct hsi_msg *msg;
    + struct hsi_msg *tmp;
    + u32 val;
    +
    + dev_dbg(&port->device, "HWBREAK received\n");
    +
    + spin_lock(&omap_port->lock);
    + val = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + val &= ~SSI_BREAKDETECTED;
    + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + __raw_writel(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
    + spin_unlock(&omap_port->lock);
    +
    + list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
    + msg->status = HSI_STATUS_COMPLETED;
    + list_del(&msg->link);
    + msg->complete(msg);
    + }
    +
    +}
    +
    +static int ssi_async_break(struct hsi_msg *msg)
    +{
    + struct hsi_port *port = hsi_get_port(msg->cl);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + int err = 0;
    + u32 tmp;
    +
    + ssi_clk_enable(ssi);
    + if (msg->ttype == HSI_MSG_WRITE) {
    + if (omap_port->sst.mode != SSI_MODE_FRAME) {
    + err = -EINVAL;
    + goto out;
    + }
    + __raw_writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
    + msg->status = HSI_STATUS_COMPLETED;
    + msg->complete(msg);
    + } else {
    + if (omap_port->ssr.mode != SSI_MODE_FRAME) {
    + err = -EINVAL;
    + goto out;
    + }
    + spin_lock_bh(&omap_port->lock);
    + tmp = __raw_readl(omap_ssi->sys +
    + SSI_MPU_ENABLE_REG(port->num, 0));
    + __raw_writel(tmp | SSI_BREAKDETECTED,
    + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + msg->status = HSI_STATUS_PROCEDING;
    + list_add_tail(&msg->link, &omap_port->brkqueue);
    + spin_unlock_bh(&omap_port->lock);
    + }
    +out:
    + ssi_clk_disable(ssi);
    +
    + return err;
    +}
    +
    +static int ssi_async(struct hsi_msg *msg)
    +{
    + struct hsi_port *port = hsi_get_port(msg->cl);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct list_head *queue;
    + int err;
    +
    + BUG_ON(!msg);
    +
    + if (msg->sgt.nents > 1)
    + return -ENOSYS; /* TODO: Add sg support */
    +
    + if (msg->break_frame)
    + return ssi_async_break(msg);
    +
    + if (msg->ttype) {
    + BUG_ON(msg->channel >= omap_port->sst.channels);
    + queue = &omap_port->txqueue[msg->channel];
    + } else {
    + BUG_ON(msg->channel >= omap_port->ssr.channels);
    + queue = &omap_port->rxqueue[msg->channel];
    + }
    + msg->status = HSI_STATUS_QUEUED;
    + spin_lock_bh(&omap_port->lock);
    + list_add_tail(&msg->link, queue);
    + err = ssi_start_transfer(queue);
    + spin_unlock_bh(&omap_port->lock);
    +
    + dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
    + msg->status, msg->ttype, msg->channel);
    +
    + return err;
    +}
    +
    +static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
    +{
    + struct list_head *node, *tmp;
    + struct hsi_msg *msg;
    +
    + list_for_each_safe(node, tmp, queue) {
    + msg = list_entry(node, struct hsi_msg, link);
    + if ((cl) && (cl != msg->cl))
    + continue;
    + list_del(node);
    + pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
    + msg->channel, msg, msg->sgt.sgl->length,
    + msg->ttype, msg->context);
    + if (msg->destructor)
    + msg->destructor(msg);
    + else
    + hsi_free_msg(msg);
    + }
    +}
    +
    +static u32 ssi_calculate_div(struct hsi_controller *ssi, u32 max_speed)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + u32 tx_fckrate = omap_ssi->fck_rate;
    +
    + /* / 2 : SSI TX clock is always half of the SSI functional clock */
    + tx_fckrate >>= 1;
    + /* Round down when tx_fckrate % max_speed == 0 */
    + tx_fckrate--;
    + dev_dbg(&ssi->device, "TX divisor is %d for fck_rate %d speed %d\n",
    + tx_fckrate / max_speed, omap_ssi->fck_rate, max_speed);
    +
    + return tx_fckrate / max_speed;
    +}
    +
    +static int ssi_setup(struct hsi_client *cl)
    +{
    + struct hsi_port *port = to_hsi_port(cl->device.parent);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + unsigned long sst = omap_port->sst_base;
    + unsigned long ssr = omap_port->ssr_base;
    + u32 div = 0;
    + int err = 0;
    +
    + ssi_clk_enable(ssi);
    + spin_lock_bh(&omap_port->lock);
    +
    + if (cl->tx_cfg.speed)
    + div = ssi_calculate_div(ssi, cl->tx_cfg.speed);
    +
    + if (div > SSI_MAX_DIVISOR) {
    + dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
    + cl->tx_cfg.speed, div);
    + err = -EINVAL;
    + goto out;
    + }
    + /* Set TX module to sleep to stop TX during cfg update */
    + __raw_writel(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
    + __raw_writel(31, sst + SSI_SST_FRAMESIZE_REG);
    + __raw_writel(div, sst + SSI_SST_DIVISOR_REG);
    + __raw_writel(cl->tx_cfg.channels, sst + SSI_SST_CHANNELS_REG);
    + __raw_writel(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
    + __raw_writel(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
    + /* Set RX module to sleep to stop RX during cfg update */
    + __raw_writel(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
    + __raw_writel(31, ssr + SSI_SSR_FRAMESIZE_REG);
    + __raw_writel(cl->rx_cfg.channels, ssr + SSI_SSR_CHANNELS_REG);
    + __raw_writel(0, ssr + SSI_SSR_TIMEOUT_REG);
    + /* Cleanup the break queue if we leave FRAME mode */
    + if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
    + (cl->rx_cfg.mode != SSI_MODE_FRAME))
    + ssi_flush_queue(&omap_port->brkqueue, cl);
    + __raw_writel(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
    + omap_port->channels = max(cl->rx_cfg.channels, cl->tx_cfg.channels);
    + /* Shadow registering for OFF mode */
    + /* SST */
    + omap_port->sst.divisor = div;
    + omap_port->sst.frame_size = 31;
    + omap_port->sst.channels = cl->tx_cfg.channels;
    + omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
    + omap_port->sst.mode = cl->tx_cfg.mode;
    + /* SSR */
    + omap_port->ssr.frame_size = 31;
    + omap_port->ssr.timeout = 0;
    + omap_port->ssr.channels = cl->rx_cfg.channels;
    + omap_port->ssr.mode = cl->rx_cfg.mode;
    +out:
    + spin_unlock_bh(&omap_port->lock);
    + ssi_clk_disable(ssi);
    +
    + return err;
    +}
    +
    +static void ssi_cleanup_queues(struct hsi_client *cl)
    +{
    + struct hsi_port *port = hsi_get_port(cl);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + struct hsi_msg *msg;
    + unsigned int i;
    + u32 rxbufstate = 0;
    + u32 txbufstate = 0;
    + u32 status = SSI_ERROROCCURED;
    + u32 tmp;
    +
    + ssi_flush_queue(&omap_port->brkqueue, cl);
    + if (list_empty(&omap_port->brkqueue))
    + status |= SSI_BREAKDETECTED;
    +
    + for (i = 0; i < omap_port->channels; i++) {
    + msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
    + link);
    + if ((msg) && (msg->cl == cl)) {
    + txbufstate |= (1 << i);
    + status |= SSI_DATAACCEPT(i);
    + /* Release the clocks writes, also GDD ones */
    + ssi_clk_disable(ssi);
    + }
    + ssi_flush_queue(&omap_port->txqueue[i], cl);
    + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
    + link);
    + if ((msg) && (msg->cl == cl)) {
    + rxbufstate |= (1 << i);
    + status |= SSI_DATAAVAILABLE(i);
    + }
    + ssi_flush_queue(&omap_port->rxqueue[i], cl);
    + /* Check if we keep the error detection interrupt armed */
    + if (!list_empty(&omap_port->rxqueue[i]))
    + status &= ~SSI_ERROROCCURED;
    + }
    + /* Cleanup write buffers */
    + tmp = __raw_readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
    + tmp &= ~txbufstate;
    + __raw_writel(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
    + /* Cleanup read buffers */
    + tmp = __raw_readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
    + tmp &= ~rxbufstate;
    + __raw_writel(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
    + /* Disarm and ack pending interrupts */
    + tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + tmp &= ~status;
    + __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + __raw_writel(status, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
    +}
    +
    +static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + struct hsi_msg *msg;
    + unsigned int i;
    + u32 val = 0;
    + u32 tmp;
    +
    + for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
    + msg = omap_ssi->gdd_trn[i].msg;
    + if ((!msg) || (msg->cl != cl))
    + continue;
    + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
    + val |= (1 << i);
    + /*
    + * Clock references for write will be handled in
    + * ssi_cleanup_queues
    + */
    + if (msg->ttype == HSI_MSG_READ)
    + ssi_clk_disable(ssi);
    + omap_ssi->gdd_trn[i].msg = NULL;
    + }
    + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    + tmp &= ~val;
    + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    + __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
    +}
    +
    +static int ssi_release(struct hsi_client *cl)
    +{
    + struct hsi_port *port = hsi_get_port(cl);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    +
    + ssi_clk_enable(ssi);
    + spin_lock_bh(&omap_port->lock);
    + /* Stop all communications */
    + __raw_writel(SSI_MODE_SLEEP, omap_port->sst_base + SSI_SST_MODE_REG);
    + __raw_writel(SSI_MODE_SLEEP, omap_port->ssr_base + SSI_SSR_MODE_REG);
    + /* Stop all the pending DMA requests for that client */
    + ssi_cleanup_gdd(ssi, cl);
    + /* Now cleanup all the queues */
    + ssi_cleanup_queues(cl);
    + /* Restart communications */
    + ssi_restore_port_mode(&port->device, NULL);
    + spin_unlock_bh(&omap_port->lock);
    + ssi_clk_disable(ssi);
    +
    + return 0;
    +}
    +
    +static int ssi_flush(struct hsi_client *cl)
    +{
    + struct hsi_port *port = hsi_get_port(cl);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + struct hsi_msg *msg;
    + unsigned long sst = omap_port->sst_base;
    + unsigned long ssr = omap_port->ssr_base;
    + unsigned int i;
    + u32 err;
    +
    + ssi_clk_enable(ssi);
    + spin_lock_bh(&omap_port->lock);
    + /* Stop all communications */
    + __raw_writel(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
    + __raw_writel(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
    + /* Stop all DMA transfers */
    + for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
    + msg = omap_ssi->gdd_trn[i].msg;
    + if (!msg || (port != hsi_get_port(msg->cl)))
    + continue;
    + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
    + if (msg->ttype == HSI_MSG_READ)
    + ssi_clk_disable(ssi);
    + omap_ssi->gdd_trn[i].msg = NULL;
    + }
    + /* Flush all SST buffers */
    + __raw_writel(0, sst + SSI_SST_BUFSTATE_REG);
    + __raw_writel(0, sst + SSI_SST_TXSTATE_REG);
    + /* Flush all SSR buffers */
    + __raw_writel(0, ssr + SSI_SSR_RXSTATE_REG);
    + __raw_writel(0, ssr + SSI_SSR_BUFSTATE_REG);
    + /* Flush all errors */
    + err = __raw_readl(ssr + SSI_SSR_ERROR_REG);
    + __raw_writel(err, ssr + SSI_SSR_ERRORACK_REG);
    + /* Flush break */
    + __raw_writel(0, ssr + SSI_SSR_BREAK_REG);
    + /* Clear interrupts */
    + __raw_writel(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + __raw_writel(0xffffff00,
    + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
    + __raw_writel(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    + __raw_writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
    + /* Dequeue all pending requests */
    + for (i = 0; i < omap_port->channels; i++) {
    + /* Release write clocks */
    + if (!list_empty(&omap_port->txqueue[i]))
    + ssi_clk_disable(ssi);
    + ssi_flush_queue(&omap_port->txqueue[i], NULL);
    + ssi_flush_queue(&omap_port->rxqueue[i], NULL);
    + }
    + ssi_flush_queue(&omap_port->brkqueue, NULL);
    + /* Restart communications */
    + ssi_restore_port_mode(&port->device, NULL);
    + spin_unlock_bh(&omap_port->lock);
    + ssi_clk_disable(ssi);
    +
    + return 0;
    +}
    +
    +static int ssi_start_tx(struct hsi_client *cl)
    +{
    + struct hsi_port *port = hsi_get_port(cl);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    +
    + dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
    +
    + spin_lock_bh(&omap_port->wk_lock);
    + if (omap_port->wk_refcount++) {
    + spin_unlock_bh(&omap_port->wk_lock);
    + return 0;
    + }
    + ssi_clk_enable(ssi); /* Grab clocks */
    + __raw_writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
    + spin_unlock_bh(&omap_port->wk_lock);
    +
    + return 0;
    +}
    +
    +static int ssi_stop_tx(struct hsi_client *cl)
    +{
    + struct hsi_port *port = hsi_get_port(cl);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    +
    + dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
    +
    + spin_lock_bh(&omap_port->wk_lock);
    + BUG_ON(!omap_port->wk_refcount);
    + if (--omap_port->wk_refcount) {
    + spin_unlock_bh(&omap_port->wk_lock);
    + return 0;
    + }
    + __raw_writel(SSI_WAKE(0),
    + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
    + ssi_clk_disable(ssi); /* Release clocks */
    + spin_unlock_bh(&omap_port->wk_lock);
    +
    + return 0;
    +}
    +
    +static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
    +{
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct hsi_msg *msg;
    + u32 *buf;
    + u32 val;
    +
    + spin_lock(&omap_port->lock);
    + msg = list_first_entry(queue, struct hsi_msg, link);
    + if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
    + msg->actual_len = 0;
    + msg->status = HSI_STATUS_PENDING;
    + }
    + if (msg->status == HSI_STATUS_PROCEDING) {
    + buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
    + if (msg->ttype == HSI_MSG_WRITE)
    + __raw_writel(*buf, omap_port->sst_base +
    + SSI_SST_BUFFER_CH_REG(msg->channel));
    + else
    + *buf = __raw_readl(omap_port->ssr_base +
    + SSI_SSR_BUFFER_CH_REG(msg->channel));
    + dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
    + msg->ttype, *buf);
    + msg->actual_len += sizeof(*buf);
    + if (msg->actual_len >= msg->sgt.sgl->length)
    + msg->status = HSI_STATUS_COMPLETED;
    + /*
    + * Wait for the last written frame to be really sent before
    + * we call the complete callback
    + */
    + if ((msg->status == HSI_STATUS_PROCEDING) ||
    + ((msg->status == HSI_STATUS_COMPLETED) &&
    + (msg->ttype == HSI_MSG_WRITE)))
    + goto out;
    +
    + }
    + if (msg->status == HSI_STATUS_PROCEDING)
    + goto out;
    + /* Transfer completed at this point */
    + val = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + if (msg->ttype == HSI_MSG_WRITE) {
    + val &= ~SSI_DATAACCEPT(msg->channel);
    + ssi_clk_disable(ssi); /* Release clocks for write transfer */
    + } else {
    + val &= ~SSI_DATAAVAILABLE(msg->channel);
    + }
    + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + list_del(&msg->link);
    + spin_unlock(&omap_port->lock);
    + msg->complete(msg);
    + spin_lock(&omap_port->lock);
    + ssi_start_transfer(queue);
    +out:
    + spin_unlock(&omap_port->lock);
    +}
    +
    +static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg;
    + struct hsi_port *port = to_hsi_port(msg->cl->device.parent);
    + unsigned int dir;
    + u32 csr;
    + u32 val;
    +
    + spin_lock(&omap_ssi->lock);
    +
    + val = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    + val &= ~SSI_GDD_LCH(lch);
    + __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
    +
    + if (msg->ttype == HSI_MSG_READ) {
    + dir = DMA_FROM_DEVICE;
    + val = SSI_DATAAVAILABLE(msg->channel);
    + ssi_clk_disable(ssi);
    + } else {
    + dir = DMA_TO_DEVICE;
    + val = SSI_DATAACCEPT(msg->channel);
    + /* Keep clocks reference for write pio event */
    + }
    + dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir);
    + csr = __raw_readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch));
    + omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */
    + if (csr & SSI_CSR_TOUR) { /* Timeout error */
    + msg->status = HSI_STATUS_ERROR;
    + msg->actual_len = 0;
    + list_del(&msg->link); /* Dequeue msg */
    + spin_unlock(&omap_ssi->lock);
    + msg->complete(msg);
    + return;
    + }
    +
    + val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
    +
    + msg->status = HSI_STATUS_COMPLETED;
    + msg->actual_len = sg_dma_len(msg->sgt.sgl);
    + spin_unlock(&omap_ssi->lock);
    +}
    +
    +static void ssi_gdd_tasklet(unsigned long dev)
    +{
    + struct hsi_controller *ssi = (struct hsi_controller *)dev;
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + unsigned long sys = omap_ssi->sys;
    + unsigned int lch;
    + u32 status_reg;
    +
    + ssi_clk_enable(ssi);
    +
    + status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
    + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
    + if (status_reg & SSI_GDD_LCH(lch))
    + ssi_gdd_complete(ssi, lch);
    + }
    + __raw_writel(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG);
    + status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
    + ssi_clk_disable(ssi);
    + if (status_reg)
    + tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
    + else
    + enable_irq(omap_ssi->gdd_irq);
    +
    +}
    +
    +static irqreturn_t ssi_gdd_isr(int irq, void *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    +
    + tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
    + disable_irq_nosync(irq);
    +
    + return IRQ_HANDLED;
    +}
    +
    +static void ssi_pio_tasklet(unsigned long ssi_port)
    +{
    + struct hsi_port *port = (struct hsi_port *)ssi_port;
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + unsigned long sys = omap_ssi->sys;
    + unsigned int ch;
    + u32 status_reg;
    +
    + ssi_clk_enable(ssi);
    + status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
    + status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
    +
    + for (ch = 0; ch < omap_port->channels; ch++) {
    + if (status_reg & SSI_DATAACCEPT(ch))
    + ssi_pio_complete(port, &omap_port->txqueue[ch]);
    + if (status_reg & SSI_DATAAVAILABLE(ch))
    + ssi_pio_complete(port, &omap_port->rxqueue[ch]);
    + }
    + if (status_reg & SSI_BREAKDETECTED)
    + ssi_break_complete(port);
    + if (status_reg & SSI_ERROROCCURED)
    + ssi_error(port);
    + __raw_writel(status_reg, sys + SSI_MPU_STATUS_REG(port->num, 0));
    +
    + status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
    + status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
    + ssi_clk_disable(ssi);
    +
    + if (status_reg)
    + tasklet_hi_schedule(&omap_port->pio_tasklet);
    + else
    + enable_irq(omap_port->irq);
    +}
    +
    +static irqreturn_t ssi_pio_isr(int irq, void *port)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    +
    + tasklet_hi_schedule(&omap_port->pio_tasklet);
    + disable_irq_nosync(irq);
    +
    + return IRQ_HANDLED;
    +}
    +
    +static void ssi_wake_tasklet(unsigned long ssi_port)
    +{
    + struct hsi_port *port = (struct hsi_port *)ssi_port;
    + struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    +
    + if (ssi_wakein(port)) {
    + /**
    + * We can have a quick High-Low-High transition in the line.
    + * In such a case if we have long interrupt latencies,
    + * we can miss the low event or get twice a high event.
    + * This workaround will avoid breaking the clock reference
    + * count when such a situation occurs.
    + */
    + if (!omap_port->wkin_cken) {
    + omap_port->wkin_cken = 1;
    + ssi_clk_enable(ssi);
    + }
    + hsi_event(port, HSI_EVENT_START_RX);
    + } else {
    + hsi_event(port, HSI_EVENT_STOP_RX);
    + if (omap_port->wkin_cken) {
    + ssi_clk_disable(ssi);
    + omap_port->wkin_cken = 0;
    + }
    + }
    +}
    +
    +static irqreturn_t ssi_wake_isr(int irq, void *ssi_port)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
    +
    + tasklet_hi_schedule(&omap_port->wake_tasklet);
    +
    + return IRQ_HANDLED;
    +}
    +
    +static int __init ssi_port_irq(struct hsi_port *port,
    + struct platform_device *pd)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct resource *irq;
    + int err;
    +
    + irq = platform_get_resource(pd, IORESOURCE_IRQ, (port->num * 3) + 1);
    + if (!irq) {
    + dev_err(&port->device, "Port IRQ resource missing\n");
    + return -ENXIO;
    + }
    + omap_port->irq = irq->start;
    + tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
    + (unsigned long)port);
    + err = devm_request_irq(&pd->dev, omap_port->irq, ssi_pio_isr,
    + IRQF_DISABLED, irq->name, port);
    + if (err < 0)
    + dev_err(&port->device, "Request IRQ %d failed (%d)\n",
    + omap_port->irq, err);
    + return err;
    +}
    +
    +static int __init ssi_wake_irq(struct hsi_port *port,
    + struct platform_device *pd)
    +{
    + struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
    + struct resource *irq;
    + int err;
    +
    + irq = platform_get_resource(pd, IORESOURCE_IRQ, (port->num * 3) + 3);
    + if (!irq) {
    + dev_err(&port->device, "Wake in IRQ resource missing");
    + return -ENXIO;
    + }
    + if (irq->flags & IORESOURCE_UNSET) {
    + dev_info(&port->device, "No Wake in support\n");
    + omap_port->wake_irq = -1;
    + return 0;
    + }
    + omap_port->wake_irq = irq->start;
    + tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
    + (unsigned long)port);
    + err = devm_request_irq(&pd->dev, omap_port->wake_irq, ssi_wake_isr,
    + IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
    + irq->name, port);
    + if (err < 0)
    + dev_err(&port->device, "Request Wake in IRQ %d failed (%d)\n",
    + omap_port->wake_irq, err);
    + return err;
    +}
    +
    +static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
    +{
    + unsigned int ch;
    +
    + for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
    + INIT_LIST_HEAD(&omap_port->txqueue[ch]);
    + INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
    + }
    + INIT_LIST_HEAD(&omap_port->brkqueue);
    +}
    +
    +static int __init ssi_get_iomem(struct platform_device *pd,
    + unsigned int num, unsigned long *base, dma_addr_t *phy)
    +{
    + struct resource *mem;
    + struct resource *ioarea;
    +
    + mem = platform_get_resource(pd, IORESOURCE_MEM, num);
    + if (!mem) {
    + dev_err(&pd->dev, "IO memory region missing (%d)\n", num);
    + return -ENXIO;
    + }
    + ioarea = devm_request_mem_region(&pd->dev, mem->start,
    + (mem->end - mem->start) + 1, dev_name(&pd->dev));
    + if (!ioarea) {
    + dev_err(&pd->dev, "%s IO memory region request failed\n",
    + mem->name);
    + return -ENXIO;
    + }
    + *base = (unsigned long)devm_ioremap(&pd->dev, mem->start,
    + (mem->end - mem->start) + 1);
    + if (!base) {
    + dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
    + return -ENXIO;
    + }
    + if (phy)
    + *phy = mem->start;
    +
    + return 0;
    +}
    +
    +static int __init ssi_ports_init(struct hsi_controller *ssi,
    + struct platform_device *pd)
    +{
    + struct hsi_port *port;
    + struct omap_ssi_port *omap_port;
    + unsigned int i;
    + int err;
    +
    + for (i = 0; i < ssi->num_ports; i++) {
    + port = &ssi->port[i];
    + omap_port = devm_kzalloc(&pd->dev, sizeof(*omap_port),
    + GFP_KERNEL);
    + if (!omap_port)
    + return -ENOMEM;
    + port->async = ssi_async;
    + port->setup = ssi_setup;
    + port->flush = ssi_flush;
    + port->start_tx = ssi_start_tx;
    + port->stop_tx = ssi_stop_tx;
    + port->release = ssi_release;
    + hsi_port_set_drvdata(port, omap_port);
    + /* Get SST base addresses*/
    + err = ssi_get_iomem(pd, ((i * 2) + 2), &omap_port->sst_base,
    + &omap_port->sst_dma);
    + if (err < 0)
    + return err;
    + /* Get SSR base addresses */
    + err = ssi_get_iomem(pd, ((i * 2) + 3), &omap_port->ssr_base,
    + &omap_port->ssr_dma);
    + if (err < 0)
    + return err;
    + err = ssi_port_irq(port, pd);
    + if (err < 0)
    + return err;
    + err = ssi_wake_irq(port, pd);
    + if (err < 0)
    + return err;
    + ssi_queues_init(omap_port);
    + spin_lock_init(&omap_port->lock);
    + spin_lock_init(&omap_port->wk_lock);
    + }
    +
    + return 0;
    +}
    +
    +static void ssi_ports_exit(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_port *omap_port;
    + unsigned int i;
    +
    + for (i = 0; i < ssi->num_ports; i++) {
    + omap_port = hsi_port_drvdata(&ssi->port[i]);
    + WARN_ON(omap_port->wk_refcount != 0);
    + tasklet_kill(&omap_port->wake_tasklet);
    + tasklet_kill(&omap_port->pio_tasklet);
    + }
    +}
    +
    +static int __init ssi_clk_get(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + int err;
    +
    + omap_ssi->ick = clk_get(&ssi->device, "ssi_ick");
    + if (IS_ERR(omap_ssi->ick)) {
    + dev_err(&ssi->device, "Interface clock missing\n");
    + return PTR_ERR(omap_ssi->ick);
    + }
    + omap_ssi->fck = clk_get(&ssi->device, "ssi_ssr_fck");
    + if (IS_ERR(omap_ssi->fck)) {
    + dev_err(&ssi->device, "Functional clock missing\n");
    + err = PTR_ERR(omap_ssi->fck);
    + goto out1;
    + }
    +
    + return 0;
    +out1:
    + clk_put(omap_ssi->ick);
    +
    + return err;
    +}
    +
    +static void ssi_clk_put(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    +
    + WARN_ON(omap_ssi->ck_refcount != 0);
    +
    + clk_put(omap_ssi->ick);
    + clk_put(omap_ssi->fck);
    +}
    +
    +static int __init ssi_add_controller(struct hsi_controller *ssi,
    + struct platform_device *pd)
    +{
    + struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data;
    + struct omap_ssi_controller *omap_ssi;
    + struct resource *irq;
    + int err;
    +
    + omap_ssi = devm_kzalloc(&pd->dev, sizeof(*omap_ssi), GFP_KERNEL);
    + if (!omap_ssi) {
    + dev_err(&pd->dev, "not enough memory for omap ssi\n");
    + return -ENOMEM;
    + }
    + ssi->id = pd->id;
    + ssi->device.parent = &pd->dev;
    + dev_set_name(&ssi->device, "ssi%d", ssi->id);
    + hsi_controller_set_drvdata(ssi, omap_ssi);
    + omap_ssi->dev = &ssi->device;
    + err = ssi_get_iomem(pd, 0, &omap_ssi->sys, NULL);
    + if (err < 0)
    + return err;
    + err = ssi_get_iomem(pd, 1, &omap_ssi->gdd, NULL);
    + if (err < 0)
    + return err;
    + irq = platform_get_resource(pd, IORESOURCE_IRQ, 0);
    + if (!irq) {
    + dev_err(&pd->dev, "GDD IRQ resource missing\n");
    + return -ENXIO;
    + }
    + omap_ssi->gdd_irq = irq->start;
    + tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet,
    + (unsigned long)ssi);
    + err = devm_request_irq(&pd->dev, omap_ssi->gdd_irq, ssi_gdd_isr,
    + IRQF_DISABLED, irq->name, ssi);
    + if (err < 0) {
    + dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)",
    + omap_ssi->gdd_irq, err);
    + return err;
    + }
    + err = ssi_ports_init(ssi, pd);
    + if (err < 0)
    + return err;
    + omap_ssi->get_loss = omap_ssi_pdata->get_dev_context_loss_count;
    + spin_lock_init(&omap_ssi->lock);
    + spin_lock_init(&omap_ssi->ck_lock);
    +
    + err = ssi_clk_get(ssi);
    + if (err < 0)
    + return err;
    +
    + err = hsi_register_controller(ssi);
    + if (err < 0)
    + ssi_clk_put(ssi);
    +
    + return err;
    +}
    +
    +static int __init ssi_hw_init(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    + unsigned int i;
    + u32 val;
    + int err;
    +
    + err = ssi_clk_enable(ssi);
    + if (err < 0) {
    + dev_err(&ssi->device, "Failed to enable the clocks %d\n", err);
    + return err;
    + }
    + /* Reseting SSI controller */
    + __raw_writel(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG);
    + val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
    + for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) {
    + msleep(10);
    + val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
    + }
    + if (!(val & SSI_RESETDONE)) {
    + dev_err(&ssi->device, "SSI HW reset failed\n");
    + ssi_clk_disable(ssi);
    + return -EIO;
    + }
    + /* Reseting GDD */
    + __raw_writel(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG);
    + /* Get FCK rate */
    + omap_ssi->fck_rate = (u32)clk_get_rate(omap_ssi->fck) / 1000; /* KHz */
    + dev_dbg(&ssi->device, "SSI fck rate %d KHz\n", omap_ssi->fck_rate);
    + /* Set default PM settings */
    + val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART;
    + __raw_writel(val, omap_ssi->sys + SSI_SYSCONFIG_REG);
    + omap_ssi->sysconfig = val;
    + __raw_writel(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG);
    + omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON;
    + ssi_clk_disable(ssi);
    +
    + return 0;
    +}
    +
    +static void ssi_remove_controller(struct hsi_controller *ssi)
    +{
    + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
    +
    + ssi_ports_exit(ssi);
    + tasklet_kill(&omap_ssi->gdd_tasklet);
    + ssi_clk_put(ssi);
    + hsi_unregister_controller(ssi);
    +}
    +
    +static int __init ssi_probe(struct platform_device *pd)
    +{
    + struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data;
    + struct hsi_controller *ssi;
    + int err;
    +
    + if (!omap_ssi_pdata) {
    + dev_err(&pd->dev, "No OMAP SSI platform data\n");
    + return -EINVAL;
    + }
    + ssi = hsi_alloc_controller(omap_ssi_pdata->num_ports, GFP_KERNEL);
    + if (!ssi) {
    + dev_err(&pd->dev, "No memory for controller\n");
    + return -ENOMEM;
    + }
    + platform_set_drvdata(pd, ssi);
    + err = ssi_add_controller(ssi, pd);
    + if (err < 0)
    + goto out1;
    + err = ssi_hw_init(ssi);
    + if (err < 0)
    + goto out2;
    +#ifdef CONFIG_DEBUG_FS
    + err = ssi_debug_add_ctrl(ssi);
    + if (err < 0)
    + goto out2;
    +#endif
    + return err;
    +out2:
    + ssi_remove_controller(ssi);
    +out1:
    + platform_set_drvdata(pd, NULL);
    + hsi_free_controller(ssi);
    +
    + return err;
    +}
    +
    +static int __exit ssi_remove(struct platform_device *pd)
    +{
    + struct hsi_controller *ssi = platform_get_drvdata(pd);
    +
    +#ifdef CONFIG_DEBUG_FS
    + ssi_debug_remove_ctrl(ssi);
    +#endif
    + ssi_remove_controller(ssi);
    + platform_set_drvdata(pd, NULL);
    + hsi_free_controller(ssi);
    +
    + return 0;
    +}
    +
    +static struct platform_driver ssi_pdriver = {
    + .remove = __exit_p(ssi_remove),
    + .driver = {
    + .name = "omap_ssi",
    + .owner = THIS_MODULE,
    + },
    +};
    +
    +static int __init omap_ssi_init(void)
    +{
    + pr_info("OMAP SSI hw driver loaded\n");
    + return platform_driver_probe(&ssi_pdriver, ssi_probe);
    +}
    +module_init(omap_ssi_init);
    +
    +static void __exit omap_ssi_exit(void)
    +{
    + platform_driver_unregister(&ssi_pdriver);
    + pr_info("OMAP SSI driver removed\n");
    +}
    +module_exit(omap_ssi_exit);
    +
    +MODULE_ALIAS("platform:omap_ssi");
    +MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
    +MODULE_DESCRIPTION("Synchronous Serial Interface Driver");
    +MODULE_LICENSE("GPL");
    --
    1.5.6.5


    \
     
     \ /
      Last update: 2010-05-07 17:19    [W:6.161 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site