lkml.org 
[lkml]   [2010]   [Apr]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patches in this message
/
From
Date
Subject[RFC PATCH 3/4] Mobile security processor driver
From 14e6bd039387b507d67155cd5d7d0e2a78166987 Mon Sep 17 00:00:00 2001
From: Mark Allyn <mark.a.allyn@intel.com>
Date: Wed, 28 Apr 2010 14:39:30 -0700
Subject: RFC [PATCH 3/4] staging: sep: update program files for SEP December 2009 release

Signed-off-by: Mark A. Allyn <mark.a.allyn@intel.com>
---
drivers/staging/sep/sep_driver.c | 2715 --------------------
drivers/staging/sep/sep_ext_with_pci_driver.c | 548 ++++
drivers/staging/sep/sep_main_mod.c | 3330 +++++++++++++++++++++++++
3 files changed, 3878 insertions(+), 2715 deletions(-)
delete mode 100644 drivers/staging/sep/sep_driver.c
create mode 100644 drivers/staging/sep/sep_ext_with_pci_driver.c
create mode 100644 drivers/staging/sep/sep_main_mod.c

diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index 8888073..0000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2715 +0,0 @@
-/*
- *
- * sep_driver.c - Security Processor Driver main group of functions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <asm/ioctl.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-#if SEP_DRIVER_ARM_DEBUG_MODE
-
-#define CRYS_SEP_ROM_length 0x4000
-#define CRYS_SEP_ROM_start_address 0x8000C000UL
-#define CRYS_SEP_ROM_start_address_offset 0xC000UL
-#define SEP_ROM_BANK_register 0x80008420UL
-#define SEP_ROM_BANK_register_offset 0x8420UL
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
-
-/*
- * THESE 2 definitions are specific to the board - must be
- * defined during integration
- */
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
-
-/* 2M size */
-
-static void sep_load_rom_code(struct sep_device *sep)
-{
- /* Index variables */
- unsigned long i, k, j;
- u32 reg;
- u32 error;
- u32 warning;
-
- /* Loading ROM from SEP_ROM_image.h file */
- k = sizeof(CRYS_SEP_ROM);
-
- edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
-
- edbg("SEP Driver: k is %lu\n", k);
- edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
- edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
-
- for (i = 0; i < 4; i++) {
- /* write bank */
- sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
-
- for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
- sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
-
- k = k - 4;
-
- if (k == 0) {
- j = CRYS_SEP_ROM_length;
- i = 4;
- }
- }
- }
-
- /* reset the SEP */
- sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
-
- /* poll for SEP ROM boot finish */
- do
- reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg);
-
- edbg("SEP Driver: ROM polling ended\n");
-
- switch (reg) {
- case 0x1:
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver: ROM polling case 1\n");
- break;
- case 0x4:
- /* Cold boot ended successfully */
- case 0x8:
- /* Warmboot ended successfully */
- case 0x10:
- /* ColdWarm boot ended successfully */
- error = 0;
- case 0x2:
- /* Boot First Phase ended */
- warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- case 0x20:
- edbg("SEP Driver: ROM polling case %d\n", reg);
- break;
- }
-
-}
-
-#else
-static void sep_load_rom_code(struct sep_device *sep) { }
-#endif /* SEP_DRIVER_ARM_DEBUG_MODE */
-
-
-
-/*----------------------------------------
- DEFINES
------------------------------------------*/
-
-#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
- GLOBAL variables
---------------------------------------------*/
-
-/* debug messages level */
-static int debug;
-module_param(debug, int , 0);
-MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device sep_instance;
-static struct sep_device *sep_dev = &sep_instance;
-
-/*
- mutex for the access to the internals of the sep driver
-*/
-static DEFINE_MUTEX(sep_mutex);
-
-
-/* wait queue head (event) of the driver */
-static DECLARE_WAIT_QUEUE_HEAD(sep_event);
-
-/**
- * sep_load_firmware - copy firmware cache/resident
- * @sep: device we are loading
- *
- * This functions copies the cache and resident from their source
- * location into destination shared memory.
- */
-
-static int sep_load_firmware(struct sep_device *sep)
-{
- const struct firmware *fw;
- char *cache_name = "sep/cache.image.bin";
- char *res_name = "sep/resident.image.bin";
- int error;
-
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-
- /* load cache */
- error = request_firmware(&fw, cache_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request cache fw\n");
- return error;
- }
- edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
-
- memcpy(sep->rar_addr, (void *)fw->data, fw->size);
- sep->cache_size = fw->size;
- release_firmware(fw);
-
- sep->resident_bus = sep->rar_bus + sep->cache_size;
- sep->resident_addr = sep->rar_addr + sep->cache_size;
-
- /* load resident */
- error = request_firmware(&fw, res_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request res fw\n");
- return error;
- }
- edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
-
- memcpy(sep->resident_addr, (void *) fw->data, fw->size);
- sep->resident_size = fw->size;
- release_firmware(fw);
-
- edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
- sep->resident_addr, (unsigned long long)sep->resident_bus,
- sep->rar_addr, (unsigned long long)sep->rar_bus);
- return 0;
-}
-
-MODULE_FIRMWARE("sep/cache.image.bin");
-MODULE_FIRMWARE("sep/resident.image.bin");
-
-/**
- * sep_map_and_alloc_shared_area - allocate shared block
- * @sep: security processor
- * @size: size of shared area
- *
- * Allocate a shared buffer in host memory that can be used by both the
- * kernel and also the hardware interface via DMA.
- */
-
-static int sep_map_and_alloc_shared_area(struct sep_device *sep,
- unsigned long size)
-{
- /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
- sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
- &sep->shared_bus, GFP_KERNEL);
-
- if (!sep->shared_addr) {
- edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
- return -ENOMEM;
- }
- /* set the bus address of the shared area */
- edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
- size, sep->shared_addr, (unsigned long long)sep->shared_bus);
- return 0;
-}
-
-/**
- * sep_unmap_and_free_shared_area - free shared block
- * @sep: security processor
- *
- * Free the shared area allocated to the security processor. The
- * processor must have finished with this and any final posted
- * writes cleared before we do so.
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
-{
- dma_free_coherent(&sep->pdev->dev, size,
- sep->shared_addr, sep->shared_bus);
-}
-
-/**
- * sep_shared_virt_to_bus - convert bus/virt addresses
- *
- * Returns the bus address inside the shared area according
- * to the virtual address.
- */
-
-static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
- void *virt_address)
-{
- dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
- edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
- virt_address);
- return pa;
-}
-
-/**
- * sep_shared_bus_to_virt - convert bus/virt addresses
- *
- * Returns virtual address inside the shared area according
- * to the bus address.
- */
-
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-
-/**
- * sep_try_open - attempt to open a SEP device
- * @sep: device to attempt to open
- *
- * Atomically attempt to get ownership of a SEP device.
- * Returns 1 if the device was opened, 0 on failure.
- */
-
-static int sep_try_open(struct sep_device *sep)
-{
- if (!test_and_set_bit(0, &sep->in_use))
- return 1;
- return 0;
-}
-
-/**
- * sep_open - device open method
- * @inode: inode of sep device
- * @filp: file handle to sep device
- *
- * Open method for the SEP device. Called when userspace opens
- * the SEP device node. Must also release the memory data pool
- * allocations.
- *
- * Returns zero on success otherwise an error code.
- */
-
-static int sep_open(struct inode *inode, struct file *filp)
-{
- if (sep_dev == NULL)
- return -ENODEV;
-
- /* check the blocking mode */
- if (filp->f_flags & O_NDELAY) {
- if (sep_try_open(sep_dev) == 0)
- return -EAGAIN;
- } else
- if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
- return -EINTR;
-
- /* Bind to the device, we only have one which makes it easy */
- filp->private_data = sep_dev;
- /* release data pool allocations */
- sep_dev->data_pool_bytes_allocated = 0;
- return 0;
-}
-
-
-/**
- * sep_release - close a SEP device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device. As the open protects against
- * multiple simultaenous opens that means this method is called when the
- * final reference to the open handle is dropped.
- */
-
-static int sep_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-#endif
- /* Ensure any blocked open progresses */
- clear_bit(0, &sep->in_use);
- wake_up(&sep_event);
- return 0;
-}
-
-/*---------------------------------------------------------------
- map function - this functions maps the message shared area
------------------------------------------------------------------*/
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- dma_addr_t bus_addr;
- struct sep_device *sep = filp->private_data;
-
- dbg("-------->SEP Driver: mmap start\n");
-
- /* check that the size of the mapped range is as the size of the message
- shared area */
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- edbg("SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
- return -EAGAIN;
- }
-
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- /* get bus address */
- bus_addr = sep->shared_bus;
-
- edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
-
- if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- edbg("SEP Driver remap_page_range failed\n");
- printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
- return -EAGAIN;
- }
-
- dbg("SEP Driver:<-------- mmap end\n");
-
- return 0;
-}
-
-
-/*-----------------------------------------------
- poll function
-*----------------------------------------------*/
-static unsigned int sep_poll(struct file *filp, poll_table * wait)
-{
- unsigned long count;
- unsigned int mask = 0;
- unsigned long retval = 0; /* flow id */
- struct sep_device *sep = filp->private_data;
-
- dbg("---------->SEP Driver poll: start\n");
-
-
-#if SEP_DRIVER_POLLING_MODE
-
- while (sep->send_ct != (retval & 0x7FFFFFFF)) {
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
- }
-
- sep->reply_ct++;
-#else
- /* add the event to the polling wait table */
- poll_wait(filp, &sep_event, wait);
-
-#endif
-
- edbg("sep->send_ct is %lu\n", sep->send_ct);
- edbg("sep->reply_ct is %lu\n", sep->reply_ct);
-
- /* check if the data is ready */
- if (sep->send_ct == sep->reply_ct) {
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
-
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- edbg("retval is %lu\n", retval);
- /* check if the this is sep reply or request */
- if (retval >> 31) {
- edbg("SEP Driver: sep request in\n");
- /* request */
- mask |= POLLOUT | POLLWRNORM;
- } else {
- edbg("SEP Driver: sep reply in\n");
- mask |= POLLIN | POLLRDNORM;
- }
- }
- dbg("SEP Driver:<-------- poll exit\n");
- return mask;
-}
-
-/**
- * sep_time_address - address in SEP memory of time
- * @sep: SEP device we want the address from
- *
- * Return the address of the two dwords in memory used for time
- * setting.
- */
-
-static u32 *sep_time_address(struct sep_device *sep)
-{
- return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- * sep_set_time - set the SEP time
- * @sep: the SEP we are setting the time for
- *
- * Calculates time and sets it at the predefined address.
- * Called with the sep mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
- struct timeval time;
- u32 *time_addr; /* address of time as seen by the kernel */
-
-
- dbg("sep:sep_set_time start\n");
-
- do_gettimeofday(&time);
-
- /* set value in the SYSTEM MEMORY offset */
- time_addr = sep_time_address(sep);
-
- time_addr[0] = SEP_TIME_VAL_TOKEN;
- time_addr[1] = time.tv_sec;
-
- edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
- edbg("SEP Driver:time_addr is %p\n", time_addr);
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- return time.tv_sec;
-}
-
-/**
- * sep_dump_message - dump the message that is pending
- * @sep: sep device
- *
- * Dump out the message pending in the shared message area
- */
-
-static void sep_dump_message(struct sep_device *sep)
-{
- int count;
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
-}
-
-/**
- * sep_send_command_handler - kick off a command
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_command_handler start\n");
-
- mutex_lock(&sep_mutex);
- sep_set_time(sep);
-
- /* FIXME: flush cache */
- flush_cache_all();
-
- sep_dump_message(sep);
- /* update counter */
- sep->send_ct++;
- /* send interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
- dbg("SEP Driver:<-------- sep_send_command_handler end\n");
- mutex_unlock(&sep_mutex);
- return;
-}
-
-/**
- * sep_send_reply_command_handler - kick off a command reply
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_reply_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_reply_command_handler start\n");
-
- /* flash cache */
- flush_cache_all();
-
- sep_dump_message(sep);
-
- mutex_lock(&sep_mutex);
- sep->send_ct++; /* update counter */
- /* send the interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
- /* update both counters */
- sep->send_ct++;
- sep->reply_ct++;
- mutex_unlock(&sep_mutex);
- dbg("sep: sep_send_reply_command_handler end\n");
-}
-
-/*
- This function handles the allocate data pool memory request
- This function returns calculates the bus address of the
- allocated memory, and the offset of this area from the mapped address.
- Therefore, the FVOs in user space can calculate the exact virtual
- address of this allocated memory
-*/
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- struct sep_driver_alloc_t command_args;
-
- dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
- if (error)
- goto end_function;
-
- /* allocate memory */
- if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the virtual and bus address */
- command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
- command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-
- /* write the memory back to the user space */
- error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
- if (error)
- goto end_function;
-
- /* set the allocation */
- sep->data_pool_bytes_allocated += command_args.num_bytes;
-
-end_function:
- dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
- return error;
-}
-
-/*
- This function handles write into allocated data pool command
-*/
-static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- void *virt_address;
- unsigned long va;
- unsigned long app_in_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
- error = -EINVAL;
- goto end_function;
- }
- /* copy the application data */
- error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
-end_function:
- dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
- return error;
-}
-
-/*
- this function handles the read from data pool command
-*/
-static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- /* virtual address of dest application buffer */
- unsigned long app_out_address;
- /* virtual address of the data pool */
- unsigned long va;
- void *virt_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
- /* FIXME: These are incomplete all over the driver: what about + len
- and when doing that also overflows */
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -EINVAL;
- goto end_function;
- }
-
- /* copy the application data */
- error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
-end_function:
- dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
- return error;
-}
-
-/*
- This function releases all the application virtual buffer physical pages,
- that were previously locked
-*/
-static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
-{
- unsigned long count;
-
- if (dirtyFlag) {
- for (count = 0; count < num_pages; count++) {
- /* the out array was written, therefore the data was changed */
- if (!PageReserved(page_array_ptr[count]))
- SetPageDirty(page_array_ptr[count]);
- page_cache_release(page_array_ptr[count]);
- }
- } else {
- /* free in pages - the data was only read, therefore no update was done
- on those pages */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array_ptr[count]);
- }
-
- if (page_array_ptr)
- /* free the array */
- kfree(page_array_ptr);
-
- return 0;
-}
-
-/*
- This function locks all the physical pages of the kernel virtual buffer
- and construct a basic lli array, where each entry holds the physical
- page address and the size that application data holds in this physical pages
-*/
-static int sep_lock_kernel_pages(struct sep_device *sep,
- unsigned long kernel_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct sep_lli_entry_t *lli_array;
- /* next kernel address to map */
- unsigned long next_kernel_address;
- unsigned long count;
-
- dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = kernel_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lx\n", start_page);
- edbg("SEP Driver: end_page is %lx\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* advance the address to the start of the next page */
- next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- next_kernel_address += PAGE_SIZE;
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-
- /* set the size of the last page */
- lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = 0;
-end_function:
- dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
- return 0;
-}
-
-/*
- This function locks all the physical pages of the application virtual buffer
- and construct a basic lli array, where each entry holds the physical page
- address and the size that application data holds in this physical pages
-*/
-static int sep_lock_user_pages(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct page **page_array;
- struct sep_lli_entry_t *lli_array;
- unsigned long count;
- int result;
-
- dbg("SEP Driver:--------> sep_lock_user_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = app_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lu\n", start_page);
- edbg("SEP Driver: end_page is %lu\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- /* allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
- if (!page_array) {
- edbg("SEP Driver: kmalloc for page_array failed\n");
-
- error = -ENOMEM;
- goto end_function;
- }
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
-
- error = -ENOMEM;
- goto end_function_with_error1;
- }
-
- /* convert the application virtual address into a set of physical */
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
- up_read(&current->mm->mmap_sem);
-
- /* check the number of pages locked - if not all then exit with error */
- if (result != num_pages) {
- dbg("SEP Driver: not all pages locked by get_user_pages\n");
-
- error = -ENOMEM;
- goto end_function_with_error2;
- }
-
- /* flush the cache */
- for (count = 0; count < num_pages; count++)
- flush_dcache_page(page_array[count]);
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-
- /* set the size of the last page */
- lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", app_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
- count, lli_array[count].physical_address,
- count, lli_array[count].block_size);
- }
-
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = page_array;
- goto end_function;
-
-end_function_with_error2:
- /* release the cache */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array[count]);
- kfree(lli_array);
-end_function_with_error1:
- kfree(page_array);
-end_function:
- dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
- return 0;
-}
-
-
-/*
- this function calculates the size of data that can be inserted into the lli
- table from this array the condition is that either the table is full
- (all etnries are entered), or there are no more entries in the lli array
-*/
-static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
-{
- unsigned long table_data_size = 0;
- unsigned long counter;
-
- /* calculate the data in the out lli table if till we fill the whole
- table or till the data has ended */
- for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
- table_data_size += lli_in_array_ptr[counter].block_size;
- return table_data_size;
-}
-
-/*
- this functions builds ont lli table from the lli_array according to
- the given size of data
-*/
-static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
-{
- unsigned long curr_table_data_size;
- /* counter of lli array entry */
- unsigned long array_counter;
-
- dbg("SEP Driver:--------> sep_build_lli_table start\n");
-
- /* init currrent table data size and lli array entry counter */
- curr_table_data_size = 0;
- array_counter = 0;
- *num_table_entries_ptr = 1;
-
- edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* fill the table till table size reaches the needed amount */
- while (curr_table_data_size < table_data_size) {
- /* update the number of entries in table */
- (*num_table_entries_ptr)++;
-
- lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
- lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
- curr_table_data_size += lli_table_ptr->block_size;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* check for overflow of the table data */
- if (curr_table_data_size > table_data_size) {
- edbg("SEP Driver:curr_table_data_size > table_data_size\n");
-
- /* update the size of block in the table */
- lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
-
- /* update the physical address in the lli array */
- lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
-
- /* update the block size left in the lli array */
- lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
- } else
- /* advance to the next entry in the lli_array */
- array_counter++;
-
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* move to the next entry in table */
- lli_table_ptr++;
- }
-
- /* set the info entry to default */
- lli_table_ptr->physical_address = 0xffffffff;
- lli_table_ptr->block_size = 0;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* set the output parameter */
- *num_processed_entries_ptr += array_counter;
-
- edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
- dbg("SEP Driver:<-------- sep_build_lli_table end\n");
- return;
-}
-
-/*
- this function goes over the list of the print created tables and
- prints all the data
-*/
-static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
-{
- unsigned long table_count;
- unsigned long entries_count;
-
- dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
-
- table_count = 1;
- while ((unsigned long) lli_table_ptr != 0xffffffff) {
- edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
- edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
-
- /* print entries of the table (without info entry) */
- for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
- edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
- }
-
- /* point to the info entry */
- lli_table_ptr--;
-
- edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
- edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-
-
- table_data_size = lli_table_ptr->block_size & 0xffffff;
- num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
- lli_table_ptr = (struct sep_lli_entry_t *)
- (lli_table_ptr->physical_address);
-
- edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
-
- if ((unsigned long) lli_table_ptr != 0xffffffff)
- lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
-
- table_count++;
- }
- dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
-}
-
-
-/*
- This function prepares only input DMA table for synhronic symmetric
- operations (HASH)
-*/
-static int sep_prepare_input_dma_table(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_ptr,
- unsigned long *num_entries_ptr,
- unsigned long *table_data_size_ptr,
- bool isKernelVirtualAddress)
-{
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_entry_ptr;
- /* array of pointers ot page */
- struct sep_lli_entry_t *lli_array_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_entry;
- /* num entries in the virtual buffer */
- unsigned long sep_lli_entries;
- /* lli table pointer */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* the total data in one table */
- unsigned long table_data_size;
- /* number of entries in lli table */
- unsigned long num_entries_in_table;
- /* next table address */
- void *lli_table_alloc_addr;
- unsigned long result;
-
- dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
-
- edbg("SEP Driver:data_size is %lu\n", data_size);
- edbg("SEP Driver:block_size is %lu\n", block_size);
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->in_num_pages = 0;
-
- if (data_size == 0) {
- /* special case - created 2 entries table with zero data */
- in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
- /* FIXME: Should the entry below not be for _bus */
- in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- in_lli_table_ptr->block_size = 0;
-
- in_lli_table_ptr++;
- in_lli_table_ptr->physical_address = 0xFFFFFFFF;
- in_lli_table_ptr->block_size = 0;
-
- *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- *num_entries_ptr = 2;
- *table_data_size_ptr = 0;
-
- goto end_function;
- }
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
- else
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-
- if (result)
- return result;
-
- edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
-
- current_entry = 0;
- info_entry_ptr = 0;
- sep_lli_entries = sep->in_num_pages;
-
- /* initiate to point after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* loop till all the entries in in array are not processed */
- while (current_entry < sep_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
-
- if (info_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *num_entries_ptr = num_entries_in_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
-
- /* the array of the pages */
- kfree(lli_array_ptr);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
- return 0;
-
-}
-
-/*
- This function creates the input and output dma tables for
- symmetric operations (AES/DES) according to the block size from LLI arays
-*/
-static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
- struct sep_lli_entry_t *lli_in_array,
- unsigned long sep_in_lli_entries,
- struct sep_lli_entry_t *lli_out_array,
- unsigned long sep_out_lli_entries,
- unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
-{
- /* points to the area where next lli table can be allocated: keep void *
- as there is pointer scaling to fix otherwise */
- void *lli_table_alloc_addr;
- /* input lli table */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* output lli table */
- struct sep_lli_entry_t *out_lli_table_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_in_entry_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_out_entry_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_in_entry;
- /* points to the first entry to be processed in the lli_out_array */
- unsigned long current_out_entry;
- /* max size of the input table */
- unsigned long in_table_data_size;
- /* max size of the output table */
- unsigned long out_table_data_size;
- /* flag te signifies if this is the first tables build from the arrays */
- unsigned long first_table_flag;
- /* the data size that should be in table */
- unsigned long table_data_size;
- /* number of etnries in the input table */
- unsigned long num_entries_in_table;
- /* number of etnries in the output table */
- unsigned long num_entries_out_table;
-
- dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
-
- /* initiate to pint after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- current_in_entry = 0;
- current_out_entry = 0;
- first_table_flag = 1;
- info_in_entry_ptr = 0;
- info_out_entry_ptr = 0;
-
- /* loop till all the entries in in array are not processed */
- while (current_in_entry < sep_in_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* set the first output tables */
- out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
-
- /* calculate the maximum size of data for output table */
- out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
-
- edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
- edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
-
- /* check where the data is smallest */
- table_data_size = in_table_data_size;
- if (table_data_size > out_table_data_size)
- table_data_size = out_table_data_size;
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
-
- /* construct output lli table */
- sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
-
- /* if info entry is null - this is the first table built */
- if (info_in_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *in_num_entries_ptr = num_entries_in_table;
- *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- *out_num_entries_ptr = num_entries_out_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
- edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-
- /* update the info entry of the previous in table */
- info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
-
- edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
- edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
- edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
- /* print output tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
- dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
- return 0;
-}
-
-
-/*
- This function builds input and output DMA tables for synhronic
- symmetric operations (AES, DES). It also checks that each table
- is of the modular block size
-*/
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
- unsigned long app_virt_in_addr,
- unsigned long app_virt_out_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
-{
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_in_array;
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_out_array;
- int result = 0;
-
- dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true) {
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
- goto end_function;
- }
- } else {
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
- goto end_function;
- }
- }
-
- if (isKernelVirtualAddress == true) {
- result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- } else {
- result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- }
- edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
- edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
- edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
-
- /* call the fucntion that creates table from the lli arrays */
- result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
- if (result) {
- edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
- goto end_function_with_error2;
- }
-
- /* fall through - free the lli entry arrays */
- dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
- dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
- dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
-end_function_with_error2:
- kfree(lli_out_array);
-end_function_with_error1:
- kfree(lli_in_array);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
- return result;
-
-}
-
-/*
- this function handles tha request for creation of the DMA table
- for the synchronic symmetric operations (AES,DES)
-*/
-static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- /* command arguments */
- struct sep_driver_build_sync_table_t command_args;
-
- dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
- if (error)
- goto end_function;
-
- edbg("app_in_address is %08lx\n", command_args.app_in_address);
- edbg("app_out_address is %08lx\n", command_args.app_out_address);
- edbg("data_size is %lu\n", command_args.data_in_size);
- edbg("block_size is %lu\n", command_args.block_size);
-
- /* check if we need to build only input table or input/output */
- if (command_args.app_out_address)
- /* prepare input and output tables */
- error = sep_prepare_input_output_dma_table(sep,
- command_args.app_in_address,
- command_args.app_out_address,
- command_args.data_in_size,
- command_args.block_size,
- &command_args.in_table_address,
- &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
- else
- /* prepare input tables */
- error = sep_prepare_input_dma_table(sep,
- command_args.app_in_address,
- command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-
- if (error)
- goto end_function;
- /* copy to user */
- if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for freeing dma table for synhronic actions
-*/
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
- dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
-
- /* free input pages array */
- sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
-
- /* free output pages array if needed */
- if (sep->out_page_array)
- sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
-
- /* reset all the values */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
- sep->in_num_pages = 0;
- sep->out_num_pages = 0;
- dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
- return 0;
-}
-
-/*
- this function find a space for the new flow dma table
-*/
-static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
- unsigned long **table_address_ptr)
-{
- int error = 0;
- /* pointer to the id field of the flow dma table */
- unsigned long *start_table_ptr;
- /* Do not make start_addr unsigned long * unless fixing the offset
- computations ! */
- void *flow_dma_area_start_addr;
- unsigned long *flow_dma_area_end_addr;
- /* maximum table size in words */
- unsigned long table_size_in_words;
-
- /* find the start address of the flow DMA table area */
- flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* set end address of the flow table area */
- flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
-
- /* set table size in words */
- table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
-
- /* set the pointer to the start address of DMA area */
- start_table_ptr = flow_dma_area_start_addr;
-
- /* find the space for the next table */
- while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
- start_table_ptr += table_size_in_words;
-
- /* check if we reached the end of floa tables area */
- if (start_table_ptr >= flow_dma_area_end_addr)
- error = -1;
- else
- *table_address_ptr = start_table_ptr;
-
- return error;
-}
-
-/*
- This function creates one DMA table for flow and returns its data,
- and pointer to its info entry
-*/
-static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
- unsigned long virt_buff_addr,
- unsigned long virt_buff_size,
- struct sep_lli_entry_t *table_data,
- struct sep_lli_entry_t **info_entry_ptr,
- struct sep_flow_context_t *flow_data_ptr,
- bool isKernelVirtualAddress)
-{
- int error;
- /* the range in pages */
- unsigned long lli_array_size;
- struct sep_lli_entry_t *lli_array;
- struct sep_lli_entry_t *flow_dma_table_entry_ptr;
- unsigned long *start_dma_table_ptr;
- /* total table data counter */
- unsigned long dma_table_data_count;
- /* pointer that will keep the pointer to the pages of the virtual buffer */
- struct page **page_array_ptr;
- unsigned long entry_count;
-
- /* find the space for the new table */
- error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
- if (error)
- goto end_function;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock kernel buffer in the memory */
- error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
- else
- /* lock user buffer in the memory */
- error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-
- if (error)
- goto end_function;
-
- /* set the pointer to page array at the beginning of table - this table is
- now considered taken */
- *start_dma_table_ptr = lli_array_size;
-
- /* point to the place of the pages pointers of the table */
- start_dma_table_ptr++;
-
- /* set the pages pointer */
- *start_dma_table_ptr = (unsigned long) page_array_ptr;
-
- /* set the pointer to the first entry */
- flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
-
- /* now create the entries for table */
- for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
- flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
-
- flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
-
- /* set the total data of a table */
- dma_table_data_count += lli_array[entry_count].block_size;
-
- flow_dma_table_entry_ptr++;
- }
-
- /* set the physical address */
- table_data->physical_address = virt_to_phys(start_dma_table_ptr);
-
- /* set the num_entries and total data size */
- table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
-
- /* set the info entry */
- flow_dma_table_entry_ptr->physical_address = 0xffffffff;
- flow_dma_table_entry_ptr->block_size = 0;
-
- /* set the pointer to info entry */
- *info_entry_ptr = flow_dma_table_entry_ptr;
-
- /* the array of the lli entries */
- kfree(lli_array);
-end_function:
- return error;
-}
-
-
-
-/*
- This function creates a list of tables for flow and returns the data for
- the first and last tables of the list
-*/
-static int sep_prepare_flow_dma_tables(struct sep_device *sep,
- unsigned long num_virtual_buffers,
- unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
-{
- int error;
- unsigned long virt_buff_addr;
- unsigned long virt_buff_size;
- struct sep_lli_entry_t table_data;
- struct sep_lli_entry_t *info_entry_ptr;
- struct sep_lli_entry_t *prev_info_entry_ptr;
- unsigned long i;
-
- /* init vars */
- error = 0;
- prev_info_entry_ptr = 0;
-
- /* init the first table to default */
- table_data.physical_address = 0xffffffff;
- first_table_data_ptr->physical_address = 0xffffffff;
- table_data.block_size = 0;
-
- for (i = 0; i < num_virtual_buffers; i++) {
- /* get the virtual buffer address */
- error = get_user(virt_buff_addr, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* get the virtual buffer size */
- first_buff_addr++;
- error = get_user(virt_buff_size, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* advance the address to point to the next pair of address|size */
- first_buff_addr++;
-
- /* now prepare the one flow LLI table from the data */
- error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
- if (error)
- goto end_function;
-
- if (i == 0) {
- /* if this is the first table - save it to return to the user
- application */
- *first_table_data_ptr = table_data;
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- } else {
- /* not first table - the previous table info entry should
- be updated */
- prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- }
- }
-
- /* set the last table data */
- *last_table_data_ptr = table_data;
-end_function:
- return error;
-}
-
-/*
- this function goes over all the flow tables connected to the given
- table and deallocate them
-*/
-static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
-{
- /* id pointer */
- unsigned long *table_ptr;
- /* end address of the flow dma area */
- unsigned long num_entries;
- unsigned long num_pages;
- struct page **pages_ptr;
- /* maximum table size in words */
- struct sep_lli_entry_t *info_entry_ptr;
-
- /* set the pointer to the first table */
- table_ptr = (unsigned long *) first_table_ptr->physical_address;
-
- /* set the num of entries */
- num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
- & SEP_NUM_ENTRIES_MASK;
-
- /* go over all the connected tables */
- while (*table_ptr != 0xffffffff) {
- /* get number of pages */
- num_pages = *(table_ptr - 2);
-
- /* get the pointer to the pages */
- pages_ptr = (struct page **) (*(table_ptr - 1));
-
- /* free the pages */
- sep_free_dma_pages(pages_ptr, num_pages, 1);
-
- /* goto to the info entry */
- info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
-
- table_ptr = (unsigned long *) info_entry_ptr->physical_address;
- num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
- }
-
- return;
-}
-
-/**
- * sep_find_flow_context - find a flow
- * @sep: the SEP we are working with
- * @flow_id: flow identifier
- *
- * Returns a pointer the matching flow, or NULL if the flow does not
- * exist.
- */
-
-static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
- unsigned long flow_id)
-{
- int count;
- /*
- * always search for flow with id default first - in case we
- * already started working on the flow there can be no situation
- * when 2 flows are with default flag
- */
- for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
- if (sep->flows[count].flow_id == flow_id)
- return &sep->flows[count];
- }
- return NULL;
-}
-
-
-/*
- this function handles the request to create the DMA tables for flow
-*/
-static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = -ENOENT;
- struct sep_driver_build_flow_table_t command_args;
- /* first table - output */
- struct sep_lli_entry_t first_table_data;
- /* dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the previuos DMA table */
- struct sep_lli_entry_t *prev_info_entry_ptr;
- /* pointer to the flow data strucutre */
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
-
- /* init variables */
- prev_info_entry_ptr = 0;
- first_table_data.physical_address = 0xffffffff;
-
- /* find the free structure for flow data */
- error = -EINVAL;
- flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
- if (error)
- goto end_function;
-
- /* create flow tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* check if flow is static */
- if (!command_args.flow_type)
- /* point the info entry of the last to the info entry of the first */
- last_table_data = first_table_data;
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
- if (error)
- goto end_function_with_error;
-
- /* all the flow created - update the flow entry with temp id */
- flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
-
- /* set the processing tables data in the context */
- if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
- flow_context_ptr->input_tables_in_process = first_table_data;
- else
- flow_context_ptr->output_tables_in_process = first_table_data;
-
- goto end_function;
-
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles add tables to flow
-*/
-static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- unsigned long num_entries;
- struct sep_driver_add_flow_table_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
- /* first dma table data */
- struct sep_lli_entry_t first_table_data;
- /* last dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the current DMA table */
- struct sep_lli_entry_t *info_entry_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
-
- /* get input parameters */
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
- if (error)
- goto end_function;
-
- /* find the flow structure for the flow id */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* prepare the flow dma tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* now check if there is already an existing add table for this flow */
- if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
- /* this buffer was for input buffers */
- if (flow_context_ptr->input_tables_flag) {
- /* add table already exists - add the new tables to the end
- of the previous */
- num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_input_table;
- } else {
- /* set the input flag */
- flow_context_ptr->input_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_input_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_input_table = last_table_data;
- } else { /* this is output tables */
-
- /* this buffer was for input buffers */
- if (flow_context_ptr->output_tables_flag) {
- /* add table already exists - add the new tables to
- the end of the previous */
- num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_output_table;
- } else {
- /* set the input flag */
- flow_context_ptr->output_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_output_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_output_table = last_table_data;
- }
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
- return error;
-}
-
-/*
- this function add the flow add message to the specific flow
-*/
-static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_add_message_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
- if (error)
- goto end_function;
-
- /* check input */
- if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* find the flow context */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* copy the message into context */
- flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
- error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
- return error;
-}
-
-
-/*
- this function returns the bus and virtual addresses of the static pool
-*/
-static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_static_pool_addr_t command_args;
-
- dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
-
- /*prepare the output parameters in the struct */
- command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
- command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
- edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
- dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
- return error;
-}
-
-/*
- this address gets the offset of the physical address from the start
- of the mapped area
-*/
-static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_get_mapped_offset_t command_args;
-
- dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
- if (error)
- goto end_function;
-
- if (command_args.physical_address < sep->shared_bus) {
- error = -EINVAL;
- goto end_function;
- }
-
- /*prepare the output parameters in the struct */
- command_args.offset = command_args.physical_address - sep->shared_bus;
-
- edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
-end_function:
- dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
- return error;
-}
-
-
-/*
- ?
-*/
-static int sep_start_handler(struct sep_device *sep)
-{
- unsigned long reg_val;
- unsigned long error = 0;
-
- dbg("SEP Driver:--------> sep_start_handler start\n");
-
- /* wait in polling for message from SEP */
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg_val);
-
- /* check the value */
- if (reg_val == 0x1)
- /* fatal error - read error status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- dbg("SEP Driver:<-------- sep_start_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for SEP initialization
-*/
-static int sep_init_handler(struct sep_device *sep, unsigned long arg)
-{
- unsigned long message_word;
- unsigned long *message_ptr;
- struct sep_driver_init_t command_args;
- unsigned long counter;
- unsigned long error;
- unsigned long reg_val;
-
- dbg("SEP Driver:--------> sep_init_handler start\n");
- error = 0;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
-
- dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
-
- if (error)
- goto end_function;
-
- /* PATCH - configure the DMA to single -burst instead of multi-burst */
- /*sep_configure_dma_burst(); */
-
- dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
-
- message_ptr = (unsigned long *) command_args.message_addr;
-
- /* set the base address of the SRAM */
- sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
-
- for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
- get_user(message_word, message_ptr);
- /* write data to SRAM */
- sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
- edbg("SEP Driver:message_word is %lu\n", message_word);
- /* wait for write complete */
- sep_wait_sram_write(sep);
- }
- dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
- /* signal SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
-
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!(reg_val & 0xFFFFFFFD));
-
- dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
-
- /* check the value */
- if (reg_val == 0x1) {
- edbg("SEP Driver:init failed\n");
-
- error = sep_read_reg(sep, 0x8060);
- edbg("SEP Driver:sw monitor is %lu\n", error);
-
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver:error is %lu\n", error);
- }
-end_function:
- dbg("SEP Driver:<-------- sep_init_handler end\n");
- return error;
-
-}
-
-/*
- this function handles the request cache and resident reallocation
-*/
-static int sep_realloc_cache_resident_handler(struct sep_device *sep,
- unsigned long arg)
-{
- struct sep_driver_realloc_cache_resident_t command_args;
- int error;
-
- /* copy cache and resident to the their intended locations */
- error = sep_load_firmware(sep);
- if (error)
- return error;
-
- command_args.new_base_addr = sep->shared_bus;
-
- /* find the new base address according to the lowest address between
- cache, resident and shared area */
- if (sep->resident_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->resident_bus;
- if (sep->rar_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->rar_bus;
-
- /* set the return parameters */
- command_args.new_cache_addr = sep->rar_bus;
- command_args.new_resident_addr = sep->resident_bus;
-
- /* set the new shared area */
- command_args.new_shared_area_addr = sep->shared_bus;
-
- edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
- edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
- edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
- edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
-
- /* return to user */
- if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
- return -EFAULT;
- return 0;
-}
-
-/**
- * sep_get_time_handler - time request from user space
- * @sep: sep we are to set the time for
- * @arg: pointer to user space arg buffer
- *
- * This function reports back the time and the address in the SEP
- * shared buffer at which it has been placed. (Do we really need this!!!)
- */
-
-static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
-{
- struct sep_driver_get_time_t command_args;
-
- mutex_lock(&sep_mutex);
- command_args.time_value = sep_set_time(sep);
- command_args.time_physical_address = (unsigned long)sep_time_address(sep);
- mutex_unlock(&sep_mutex);
- if (copy_to_user((void __user *)arg,
- &command_args, sizeof(struct sep_driver_get_time_t)))
- return -EFAULT;
- return 0;
-
-}
-
-/*
- This API handles the end transaction request
-*/
-static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
-{
- dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
-
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
- /* lock the sep mutex */
- mutex_unlock(&sep_mutex);
-#endif
-
- dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
-
- return 0;
-}
-
-
-/**
- * sep_set_flow_id_handler - handle flow setting
- * @sep: the SEP we are configuring
- * @flow_id: the flow we are setting
- *
- * This function handler the set flow id command
- */
-static int sep_set_flow_id_handler(struct sep_device *sep,
- unsigned long flow_id)
-{
- int error = 0;
- struct sep_flow_context_t *flow_data_ptr;
-
- /* find the flow data structure that was just used for creating new flow
- - its id should be default */
-
- mutex_lock(&sep_mutex);
- flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
- if (flow_data_ptr)
- flow_data_ptr->flow_id = flow_id; /* set flow id */
- else
- error = -EINVAL;
- mutex_unlock(&sep_mutex);
- return error;
-}
-
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int error = 0;
- struct sep_device *sep = filp->private_data;
-
- dbg("------------>SEP Driver: ioctl start\n");
-
- edbg("SEP Driver: cmd is %x\n", cmd);
-
- switch (cmd) {
- case SEP_IOCSENDSEPCOMMAND:
- /* send command to SEP */
- sep_send_command_handler(sep);
- edbg("SEP Driver: after sep_send_command_handler\n");
- break;
- case SEP_IOCSENDSEPRPLYCOMMAND:
- /* send reply command to SEP */
- sep_send_reply_command_handler(sep);
- break;
- case SEP_IOCALLOCDATAPOLL:
- /* allocate data pool */
- error = sep_allocate_data_pool_memory_handler(sep, arg);
- break;
- case SEP_IOCWRITEDATAPOLL:
- /* write data into memory pool */
- error = sep_write_into_data_pool_handler(sep, arg);
- break;
- case SEP_IOCREADDATAPOLL:
- /* read data from data pool into application memory */
- error = sep_read_from_data_pool_handler(sep, arg);
- break;
- case SEP_IOCCREATESYMDMATABLE:
- /* create dma table for synhronic operation */
- error = sep_create_sync_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCCREATEFLOWDMATABLE:
- /* create flow dma tables */
- error = sep_create_flow_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCFREEDMATABLEDATA:
- /* free the pages */
- error = sep_free_dma_table_data_handler(sep);
- break;
- case SEP_IOCSETFLOWID:
- /* set flow id */
- error = sep_set_flow_id_handler(sep, (unsigned long)arg);
- break;
- case SEP_IOCADDFLOWTABLE:
- /* add tables to the dynamic flow */
- error = sep_add_flow_tables_handler(sep, arg);
- break;
- case SEP_IOCADDFLOWMESSAGE:
- /* add message of add tables to flow */
- error = sep_add_flow_tables_message_handler(sep, arg);
- break;
- case SEP_IOCSEPSTART:
- /* start command to sep */
- error = sep_start_handler(sep);
- break;
- case SEP_IOCSEPINIT:
- /* init command to sep */
- error = sep_init_handler(sep, arg);
- break;
- case SEP_IOCGETSTATICPOOLADDR:
- /* get the physical and virtual addresses of the static pool */
- error = sep_get_static_pool_addr_handler(sep, arg);
- break;
- case SEP_IOCENDTRANSACTION:
- error = sep_end_transaction_handler(sep, arg);
- break;
- case SEP_IOCREALLOCCACHERES:
- error = sep_realloc_cache_resident_handler(sep, arg);
- break;
- case SEP_IOCGETMAPPEDADDROFFSET:
- error = sep_get_physical_mapped_offset_handler(sep, arg);
- break;
- case SEP_IOCGETIME:
- error = sep_get_time_handler(sep, arg);
- break;
- default:
- error = -ENOTTY;
- break;
- }
- dbg("SEP Driver:<-------- ioctl end\n");
- return error;
-}
-
-
-
-#if !SEP_DRIVER_POLLING_MODE
-
-/* handler for flow done interrupt */
-
-static void sep_flow_done_handler(struct work_struct *work)
-{
- struct sep_flow_context_t *flow_data_ptr;
-
- /* obtain the mutex */
- mutex_lock(&sep_mutex);
-
- /* get the pointer to context */
- flow_data_ptr = (struct sep_flow_context_t *) work;
-
- /* free all the current input tables in sep */
- sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
-
- /* free all the current tables output tables in SEP (if needed) */
- if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
- sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
-
- /* check if we have additional tables to be sent to SEP only input
- flag may be checked */
- if (flow_data_ptr->input_tables_flag) {
- /* copy the message to the shared RAM and signal SEP */
- memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
-
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
- }
- mutex_unlock(&sep_mutex);
-}
-/*
- interrupt handler function
-*/
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
- irqreturn_t int_error;
- unsigned long reg_val;
- unsigned long flow_id;
- struct sep_flow_context_t *flow_context_ptr;
- struct sep_device *sep = dev_id;
-
- int_error = IRQ_HANDLED;
-
- /* read the IRR register to check if this is SEP interrupt */
- reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
- edbg("SEP Interrupt - reg is %08lx\n", reg_val);
-
- /* check if this is the flow interrupt */
- if (0 /*reg_val & (0x1 << 11) */ ) {
- /* read GPRO to find out the which flow is done */
- flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
- /* find the contex of the flow */
- flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
- if (flow_context_ptr == NULL)
- goto end_function_with_error;
-
- /* queue the work */
- INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
- queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
-
- } else {
- /* check if this is reply interrupt from SEP */
- if (reg_val & (0x1 << 13)) {
- /* update the counter of reply messages */
- sep->reply_ct++;
- /* wake up the waiting process */
- wake_up(&sep_event);
- } else {
- int_error = IRQ_NONE;
- goto end_function;
- }
- }
-end_function_with_error:
- /* clear the interrupt */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-end_function:
- return int_error;
-}
-
-#endif
-
-
-
-#if 0
-
-static void sep_wait_busy(struct sep_device *sep)
-{
- u32 reg;
-
- do {
- reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
- } while (reg);
-}
-
-/*
- PATCH for configuring the DMA to single burst instead of multi-burst
-*/
-static void sep_configure_dma_burst(struct sep_device *sep)
-{
-#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
-
- /* request access to registers from SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
-
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
-
- /* set the DMA burst register to single burst */
- sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
-
- /* release the sep busy */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
-
-}
-
-#endif
-
-/*
- Function that is activated on the successful probe of the SEP device
-*/
-static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int error = 0;
- struct sep_device *sep;
- int counter;
- int size; /* size of memory for allocation */
-
- edbg("Sep pci probe starting\n");
- if (sep_dev != NULL) {
- dev_warn(&pdev->dev, "only one SEP supported.\n");
- return -EBUSY;
- }
-
- /* enable the device */
- error = pci_enable_device(pdev);
- if (error) {
- edbg("error enabling pci device\n");
- goto end_function;
- }
-
- /* set the pci dev pointer */
- sep_dev = &sep_instance;
- sep = &sep_instance;
-
- edbg("sep->shared_addr = %p\n", sep->shared_addr);
- /* transaction counter that coordinates the transactions between SEP
- and HOST */
- sep->send_ct = 0;
- /* counter for the messages from sep */
- sep->reply_ct = 0;
- /* counter for the number of bytes allocated in the pool
- for the current transaction */
- sep->data_pool_bytes_allocated = 0;
-
- /* calculate the total size for allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
- /* allocate the shared area */
- if (sep_map_and_alloc_shared_area(sep, size)) {
- error = -ENOMEM;
- /* allocation failed */
- goto end_function_error;
- }
- /* now set the memory regions */
-#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
- /* Note: this test section will need moving before it could ever
- work as the registers are not yet mapped ! */
- /* send the new SHARED MESSAGE AREA to the SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
- /* poll for SEP response */
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
- while (retval != 0xffffffff && retval != sep->shared_bus)
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- /* check the return value (register) */
- if (retval != sep->shared_bus) {
- error = -ENOMEM;
- goto end_function_deallocate_sep_shared_area;
- }
-#endif
- /* init the flow contextes */
- for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
- sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
-
- sep->flow_wq = create_singlethread_workqueue("sepflowwq");
- if (sep->flow_wq == NULL) {
- error = -ENOMEM;
- edbg("sep_driver:flow queue creation failed\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver: create flow workqueue \n");
- sep->pdev = pci_dev_get(pdev);
-
- sep->reg_addr = pci_ioremap_bar(pdev, 0);
- if (!sep->reg_addr) {
- edbg("sep: ioremap of registers failed.\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
-
- /* load the rom code */
- sep_load_rom_code(sep);
-
- /* set up system base address and shared memory location */
- sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
- 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- &sep->rar_bus, GFP_KERNEL);
-
- if (!sep->rar_addr) {
- edbg("SEP Driver:can't allocate rar\n");
- goto end_function_uniomap;
- }
-
-
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-
-#if !SEP_DRIVER_POLLING_MODE
-
- edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
-
- /* clear ICR register */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
- edbg("SEP Driver: about to call request_irq\n");
- /* get the interrupt line */
- error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
- if (error)
- goto end_function_free_res;
- return 0;
- edbg("SEP Driver: about to write IMR REG_ADDR");
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-end_function_free_res:
- dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- sep->rar_addr, sep->rar_bus);
-#endif /* SEP_DRIVER_POLLING_MODE */
-end_function_uniomap:
- iounmap(sep->reg_addr);
-end_function_deallocate_sep_shared_area:
- /* de-allocate shared area */
- sep_unmap_and_free_shared_area(sep, size);
-end_function_error:
- sep_dev = NULL;
-end_function:
- return error;
-}
-
-static const struct pci_device_id sep_pci_id_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
- .name = "sep_sec_driver",
- .id_table = sep_pci_id_tbl,
- .probe = sep_probe
- /* FIXME: remove handler */
-};
-
-/* major and minor device numbers */
-static dev_t sep_devno;
-
-/* the files operations structure of the driver */
-static struct file_operations sep_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_ioctl,
- .poll = sep_poll,
- .open = sep_open,
- .release = sep_release,
- .mmap = sep_mmap,
-};
-
-
-/* cdev struct of the driver */
-static struct cdev sep_cdev;
-
-/*
- this function registers the driver to the file system
-*/
-static int sep_register_driver_to_fs(void)
-{
- int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
- if (ret_val) {
- edbg("sep: major number allocation failed, retval is %d\n",
- ret_val);
- return ret_val;
- }
- /* init cdev */
- cdev_init(&sep_cdev, &sep_file_operations);
- sep_cdev.owner = THIS_MODULE;
-
- /* register the driver with the kernel */
- ret_val = cdev_add(&sep_cdev, sep_devno, 1);
- if (ret_val) {
- edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- }
- return ret_val;
-}
-
-
-/*--------------------------------------------------------------
- init function
-----------------------------------------------------------------*/
-static int __init sep_init(void)
-{
- int ret_val = 0;
- dbg("SEP Driver:-------->Init start\n");
- /* FIXME: Probe can occur before we are ready to survive a probe */
- ret_val = pci_register_driver(&sep_pci_driver);
- if (ret_val) {
- edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
- goto end_function_unregister_from_fs;
- }
- /* register driver to fs */
- ret_val = sep_register_driver_to_fs();
- if (ret_val)
- goto end_function_unregister_pci;
- goto end_function;
-end_function_unregister_pci:
- pci_unregister_driver(&sep_pci_driver);
-end_function_unregister_from_fs:
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
-end_function:
- dbg("SEP Driver:<-------- Init end\n");
- return ret_val;
-}
-
-
-/*-------------------------------------------------------------
- exit function
---------------------------------------------------------------*/
-static void __exit sep_exit(void)
-{
- int size;
-
- dbg("SEP Driver:--------> Exit start\n");
-
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- /* calculate the total size for de-allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
- /* FIXME: We need to do this in the unload for the device */
- /* free shared area */
- if (sep_dev) {
- sep_unmap_and_free_shared_area(sep_dev, size);
- edbg("SEP Driver: free pages SEP SHARED AREA \n");
- iounmap((void *) sep_dev->reg_addr);
- edbg("SEP Driver: iounmap \n");
- }
- edbg("SEP Driver: release_mem_region \n");
- dbg("SEP Driver:<-------- Exit end\n");
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_ext_with_pci_driver.c b/drivers/staging/sep/sep_ext_with_pci_driver.c
new file mode 100644
index 0000000..68da00e
--- /dev/null
+++ b/drivers/staging/sep/sep_ext_with_pci_driver.c
@@ -0,0 +1,548 @@
+/*
+ *
+ * sep_ext_with_pci_driver.c - Security Processor Driver
+ * pci initialization functions
+ *
+ *
+ **************************************************************************
+ * Copyright 2009 (c) Discretix Technologies Ltd. *
+ * Copyright 2009 (c) Intel Corporation *
+ * *
+ * This software is protected by copyright, international treaties and *
+ * various patents. Any copy or reproduction of this Software as *
+ * permitted below, must include this Copyright Notice as well as any *
+ * other notices provided under such license. *
+ * *
+ * This program shall be governed by, and may be used and redistributed *
+ * under the terms and conditions of the GNU General Public License, *
+ * version 2, as published by the Free Software Foundation. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY liability and WARRANTY; without even the implied *
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+ * See the GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, please write to the Free Software *
+ * Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ **************************************************************************
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ *
+ * CHANGES:
+ *
+ * 2010.01.08 Initial publish
+ *
+ */
+
+#define DEBUG
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/semaphore.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/ioctl.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+
+#include <linux/rar/memrar.h>
+#include <linux/rar/rar_register.h>
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_driver_ext_api.h"
+
+/* NOTE - must be defined specific to the board */
+#define VENDOR_ID PCI_VENDOR_ID_INTEL
+
+/*-----------------------------
+ private functions
+--------------------------------*/
+
+/*
+ This functions locks the area of the resdent and cache sep code
+*/
+int sep_lock_cache_resident_area(void)
+{
+ int error;
+ error = rar_lock(RAR_TYPE_IMAGE);
+ if (error) {
+ pr_debug("SEP Driver:cant lock RAR region %d\n",
+ error);
+ return error;
+ }
+ return 0;
+}
+
+
+/*
+ This functions copies the cache and resident from their source location into
+ destination memory, which is external to Linux VM and is given as
+ physical address
+*/
+int sep_copy_cache_resident_to_area(struct device_context *sep_context_ptr,
+ dma_addr_t *dst_new_cache_addr_ptr,
+ dma_addr_t *dst_new_resident_addr_ptr,
+ dma_addr_t *dst_new_dcache_addr_ptr)
+
+{
+ const struct firmware *fw;
+
+ /* firmware regions */
+ size_t cache_size;
+ void *cache_virtual_address;
+
+ size_t resident_size;
+ void *resident_virtual_address;
+
+ char *cache_name = "cache.image.bin";
+ char *res_name = "resident.image.bin";
+
+ /* error */
+ int error;
+
+ /*--------------------------------
+ CODE
+ -------------------------------------*/
+ error = 0;
+
+ pr_debug("SEP Driver:rar_virtual is %p\n",
+ sep_context_ptr->rar_virtual_address);
+ pr_debug("SEP Driver:rar_physical is %08lx\n",
+ (unsigned long)sep_context_ptr->rar_physical_address);
+
+ *dst_new_cache_addr_ptr = sep_context_ptr->rar_physical_address;
+ cache_virtual_address = sep_context_ptr->rar_virtual_address;
+
+ /* load cache */
+ error = request_firmware(&fw, cache_name, &sep_context_ptr->pdev->dev);
+ if (error) {
+ pr_debug("SEP Driver:cant request cache fw\n");
+ goto end_function;
+ }
+
+ pr_debug("SEP Driver:cache data loc is %p\n",
+ (void *)fw->data);
+ pr_debug("SEP Driver:cache data size is %08lx\n",
+ (unsigned long)fw->size);
+
+ memcpy(cache_virtual_address, fw->data, fw->size);
+
+ cache_size = fw->size;
+
+ release_firmware(fw);
+
+ *dst_new_resident_addr_ptr = *dst_new_cache_addr_ptr+cache_size;
+ resident_virtual_address = cache_virtual_address+cache_size;
+
+ /* load resident */
+ error = request_firmware(&fw, res_name, &sep_context_ptr->pdev->dev);
+ if (error) {
+ pr_debug("SEP Driver:cant request res fw\n");
+ goto end_function;
+ }
+
+ pr_debug("SEP Driver:res data loc is %p\n",
+ (void *)fw->data);
+ pr_debug("SEP Driver:res data size is %08x\n",
+ fw->size);
+
+ memcpy(resident_virtual_address, fw->data, fw->size);
+
+ resident_size = fw->size;
+
+ release_firmware(fw);
+
+ *dst_new_dcache_addr_ptr = *dst_new_resident_addr_ptr+resident_size;
+
+ pr_debug("SEP Driver:resident_addr (physical )is %08lx\n",
+ (unsigned long)*dst_new_resident_addr_ptr);
+ pr_debug("SEP Driver:cache_addr (physical) is %08lx\n",
+ (unsigned long)*dst_new_cache_addr_ptr);
+
+ pr_debug("SEP Driver:resident_addr (logical )is %08lx\n",
+ (unsigned long)resident_virtual_address);
+ pr_debug("SEP Driver:cache_addr (logical) is %08lx\n",
+ (unsigned long)cache_virtual_address);
+
+ pr_debug("SEP Driver:resident_size is %08lx\n",
+ (unsigned long)resident_size);
+ pr_debug("SEP Driver:cache_size is %08lx\n",
+ (unsigned long)cache_size);
+
+end_function:
+
+ return error;
+}
+
+/*
+ This functions maps and allocates the
+ shared area on the external RAM (device)
+ The input is shared_area_size - the size of the memory to
+ allocate. The outputs
+ are kernel_shared_area_addr_ptr - the kerenl
+ address of the mapped and allocated
+ shared area, and phys_shared_area_addr_ptr
+ - the physical address of the shared area
+*/
+int sep_map_and_alloc_shared_area(struct device_context *sep_context_ptr)
+
+{
+
+ if (!sep_context_ptr) {
+ pr_debug(
+ "sep_driver:alloc_shared_area invalid parameter\n");
+ WARN_ON(!sep_context_ptr);
+ return -1;
+ }
+
+ sep_context_ptr->shared_area_virt_addr =
+ dma_alloc_coherent(&sep_context_ptr->pdev->dev,
+ sep_context_ptr->shared_area_size,
+ &sep_context_ptr->shared_area_bus_addr,
+ GFP_KERNEL);
+
+ if (!sep_context_ptr->shared_area_virt_addr) {
+ pr_debug(
+ "sep_driver:shared memory kmalloc failed\n");
+ return -1;
+ }
+
+ pr_debug(
+ "SEP Driver:sep_context_ptr->shared_area_virt_addr is %p\n",
+ sep_context_ptr->shared_area_virt_addr);
+ pr_debug(
+ "SEP Driver:sep_context_ptr->shared_region_size is %08lx\n",
+ (unsigned long)sep_context_ptr->shared_area_size);
+ pr_debug(
+ "SEP Driver:sep_context_ptr->shared_area_bus_addr is %08lx\n",
+ (unsigned long)sep_context_ptr->shared_area_bus_addr);
+
+ return 0;
+}
+
+/*
+ This functions unmaps and deallocates the shared area
+ on the external RAM (device) Input parameter is pointer to
+ device context
+*/
+void sep_unmap_and_free_shared_area(struct device_context *sep_context_ptr)
+{
+ dma_free_coherent(&sep_context_ptr->pdev->dev,
+ sep_context_ptr->shared_area_size,
+ sep_context_ptr->shared_area_virt_addr,
+ sep_context_ptr->shared_area_bus_addr);
+}
+
+
+/*
+ This functions returns the physical address inside shared area according
+ to the virtual address. It can be either on the externa RAM device
+ (ioremapped), or on the system RAM
+ This implementation is for the external RAM
+*/
+dma_addr_t sep_shared_area_virt_to_bus(struct device_context *sep_context_ptr,
+ void *virt_address)
+{
+ pr_debug("SEP Driver:sh virt to phys v %08lx\n",
+ (unsigned long)virt_address);
+ pr_debug("SEP Driver:sh virt to phys p %08lx\n",
+ (unsigned long)(sep_context_ptr->shared_area_bus_addr
+ + (virt_address - sep_context_ptr->shared_area_virt_addr)));
+
+ return sep_context_ptr->shared_area_bus_addr
+ + (size_t)(virt_address - sep_context_ptr->shared_area_virt_addr);
+}
+
+/*
+ This functions returns the virtual address inside shared area
+ according to the physical address. It can be either on the
+ externa RAM device (ioremapped), or on the system RAM This implementation
+ is for the external RAM
+*/
+
+void *sep_shared_area_bus_to_virt(struct device_context *sep_context_ptr,
+ dma_addr_t bus_address)
+{
+ return sep_context_ptr->shared_area_virt_addr
+ + (size_t)(bus_address - sep_context_ptr->shared_area_bus_addr);
+}
+
+/*
+ * This function translates the RAR handle into the bus address
+ * */
+int sep_rar_handle_to_bus(void *rar_handle , dma_addr_t *rar_bus)
+{
+ struct RAR_buffer buf;
+ static size_t const BUF_COUNT = 1;
+
+ pr_debug("SEP Driver - sep_rar_handle_to_bus %08lx\n",
+ (unsigned long)rar_handle);
+
+ if (rar_bus == NULL)
+ return -EINVAL;
+
+ buf.info.handle = (unsigned long)rar_handle;
+
+ if (rar_handle_to_bus(&buf, BUF_COUNT) != BUF_COUNT)
+ return -1;
+
+ *rar_bus = buf.bus_address;
+
+ return 0;
+}
+
+/*
+ function that is activaed on the succesfull probe of the SEP device
+*/
+static int __devinit sep_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ /* error */
+ int error;
+
+ /* io memory (register area) */
+ static dma_addr_t iomem_start_physical;
+ static dma_addr_t iomem_end_physcal;
+ size_t io_memory_size;
+ void __iomem *iomem_start_virtual;
+
+ /*------------------------
+ CODE
+ ---------------------------*/
+
+ pr_debug("Sep pci probe starting\n");
+ error = 0;
+
+ if (sep_context.pdev != NULL) {
+ pr_debug("Only one device supported\n");
+ return -EBUSY;
+ }
+
+ /* enable the device */
+ error = pci_enable_device(pdev);
+ if (error) {
+ pr_debug("error enabling pci device\n");
+ return -ENODEV;
+ }
+
+ /* set the pci dev pointer */
+ sep_context.pdev = pdev;
+
+ /* get the io memory start address */
+ iomem_start_physical = pci_resource_start(pdev, 0);
+ if (!iomem_start_physical) {
+ pr_debug("SEP Driver error pci resource start\n");
+ goto end_function;
+ }
+
+ /* get the io memory end address */
+ iomem_end_physcal = pci_resource_end(pdev, 0);
+ if (!iomem_end_physcal) {
+ pr_debug("SEP Driver error pci resource end\n");
+ goto end_function;
+ }
+
+ io_memory_size = iomem_end_physcal -
+ iomem_start_physical + 1;
+
+ pr_debug("SEP Driver:iomem_start_physical is %08lx\n",
+ (unsigned long)iomem_start_physical);
+
+ pr_debug("SEP Driver:io_memory_end_phyaical_address is %08lx\n",
+ (unsigned long)iomem_end_physcal);
+
+ pr_debug("SEP Driver:io_memory_size is %08lx\n",
+ (unsigned long)io_memory_size);
+
+ iomem_start_virtual = pci_ioremap_bar(pdev, 0);
+ if (!iomem_start_virtual) {
+ pr_debug("SEP Driver pci_iomem failed\n");
+ goto end_function;
+ }
+
+ pr_debug("SEP Driver:iomem_start_virtual is %p\n",
+ iomem_start_virtual);
+
+ sep_context.reg_addr = iomem_start_virtual;
+
+ /* call the register_rar function (this will use the sep_callback
+ * function to perform the rar related activities; this callback
+ * is used in case we are running now prior to the rar register
+ * driver running. */
+ error = register_rar(&sep_callback, (void *)&sep_context);
+
+ if (error) {
+ pr_debug("SEP Driver: register_rar failed\n");
+ WARN_ON(error);
+ goto end_function;
+ }
+
+#if !SEP_DRIVER_POLLING_MODE
+
+ /* clear ICR register */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_ICR_REG_ADDR,
+ 0xFFFFFFFF);
+
+ /* set the IMR register - open only GPR 2 */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_IMR_REG_ADDR,
+ (~(0x1 << 13)));
+
+ sep_context.sep_irq = pdev->irq;
+
+ pr_debug("SEP Driver: my irq is %d\n", sep_context.sep_irq);
+
+ error = request_irq(sep_context.sep_irq, sep_inthandler, IRQF_SHARED,
+ "sep_driver", &sep_context.reg_addr);
+
+ if (error) {
+ pr_debug("SEP Driver: Unable to request_irq\n");
+ goto end_function;
+ }
+
+ /* set the IMR register - open only GPR 2 */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_IMR_REG_ADDR,
+ (~(0x1 << 13)));
+
+#endif /* Interrupt mode (!POLLING) */
+
+goto end_ok_function;
+
+end_function:
+
+ if (sep_context.reg_addr) {
+ iounmap(sep_context.reg_addr);
+ sep_context.reg_addr = NULL;
+ }
+
+ if (sep_context.pdev) {
+ pci_disable_device(sep_context.pdev);
+ sep_context.pdev = NULL;
+ }
+
+end_ok_function:
+
+ return error;
+}
+
+/* following is the callback for the rar_driver to call in case we attempt
+ * to register too soon
+ */
+
+int sep_callback(void *sep_context_pointer)
+{
+ int error;
+ struct device_context *my_sep_context;
+
+ my_sep_context = (struct device_context *)sep_context_pointer;
+
+ /* set up rar base address from rar_register driver*/
+ error = rar_get_address(RAR_TYPE_IMAGE,
+ &my_sep_context->rar_start_address,
+ &my_sep_context->rar_end_address);
+
+ if (error) {
+ pr_debug(
+ "SEP Driver:cant get RAR region - error is %d\n",
+ error);
+ goto end_function;
+ }
+
+ pr_debug("SEP Driver:retrieved RAR start is %08lx\n",
+ (unsigned long)my_sep_context->rar_start_address);
+ pr_debug("SEP Driver:retrieved RAR end is %08lx\n",
+ (unsigned long)my_sep_context->rar_end_address);
+
+ /* now make sure we 'own' this address range */
+ my_sep_context->rar_size =
+ (size_t)(my_sep_context->rar_end_address -
+ my_sep_context->rar_start_address + 1);
+
+ if (!request_mem_region(my_sep_context->rar_start_address,
+ my_sep_context->rar_size, "sep_sec_driver")) {
+ pr_debug(
+ "SEP Driver:request mem region failed for rar\n");
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ my_sep_context->rar_virtual_address = ioremap_nocache(
+ my_sep_context->rar_start_address, my_sep_context->rar_size);
+ if (!my_sep_context->rar_virtual_address) {
+ pr_debug(
+ "SEP Driver:cant ioremap_nocache rar\n");
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ my_sep_context->rar_physical_address =
+ my_sep_context->rar_start_address;
+
+ pr_debug("SEP Driver:rar_physical is %08lx\n",
+ (unsigned long)my_sep_context->rar_physical_address);
+
+ pr_debug("SEP Driver:rar_virtual is %p\n",
+ my_sep_context->rar_virtual_address);
+
+ goto end_ok_function;
+
+end_function:
+
+ if (my_sep_context->rar_virtual_address) {
+ iounmap(my_sep_context->rar_virtual_address);
+ my_sep_context->rar_virtual_address = NULL;
+ }
+
+ if (my_sep_context->reg_addr) {
+ iounmap(my_sep_context->reg_addr);
+ my_sep_context->reg_addr = NULL;
+ }
+
+ if (my_sep_context->pdev) {
+ pci_disable_device(my_sep_context->pdev);
+ my_sep_context->pdev = NULL;
+ }
+
+end_ok_function:
+
+ return error;
+}
+
+static struct pci_device_id sep_pci_id_tbl[] = {
+ { PCI_DEVICE(VENDOR_ID, 0x080c) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+/* field for registering driver to PCI device */
+static struct pci_driver sep_pci_driver = {
+ .name = "sep_sec_driver",
+ .id_table = sep_pci_id_tbl,
+ .probe = sep_probe
+};
+
+
+/*
+ this function registers th driver to
+ the device subsystem( either PCI, USB, etc)
+*/
+int sep_register_driver_to_device(void)
+{
+ return pci_register_driver(&sep_pci_driver);
+}
+
diff --git a/drivers/staging/sep/sep_main_mod.c b/drivers/staging/sep/sep_main_mod.c
new file mode 100644
index 0000000..744e287
--- /dev/null
+++ b/drivers/staging/sep/sep_main_mod.c
@@ -0,0 +1,3330 @@
+ /*
+ * sep_main_mod.c
+ *
+ **************************************************************************
+ * Copyright 2009 (c) Discretix Technologies Ltd. *
+ * Copyright 2009 (c) Intel Corporation *
+ * *
+ * This software is protected by copyright, international treaties and *
+ * various patents. Any copy or reproduction of this Software as *
+ * permitted below, must include this Copyright Notice as well as any *
+ * other notices provided under such license. *
+ * *
+ * This program shall be governed by, and may be used and redistributed *
+ * under the terms and conditions of the GNU General Public License, *
+ * version 2, as published by the Free Software Foundation. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY liability and WARRANTY; without even the implied *
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+ * See the GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, please write to the Free Software *
+ * Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ **************************************************************************
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ *
+ * CHANGES:
+ *
+ * 2010.01.08 Initial publish
+ *
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/ioctl.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+
+#ifdef DX_CC52_SUPPORT
+#include <linux/netlink.h>
+#include <linux/connector.h>
+#include <linux/cn_proc.h>
+#endif
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_driver_ext_api.h"
+
+/*----------------------------------------
+ DEFINES
+-----------------------------------------*/
+
+
+
+/*--------------------------------------------
+ GLOBAL variables
+--------------------------------------------*/
+
+/* debug messages level */
+static int sep_debug = 0x0;
+module_param(sep_debug, int, 0);
+MODULE_PARM_DESC(sep_debug, "Flag to enable SEP debug messages");
+
+/* context of the device */
+struct device_context sep_context;
+
+
+/*---------------------------------------------
+ FUNCTIONS
+-----------------------------------------------*/
+/*
+ this function locks SEP by locking the semaphore
+*/
+int sep_lock(void)
+{
+ /* get the lock */
+ wait_event_interruptible(sep_context.event ,
+ test_and_set_bit(SEP_MMAP_LOCK_BIT, &sep_context.in_use_flag) == 0);
+ if (signal_pending(current))
+ return -EINTR;
+
+ return 0;
+}
+
+/*
+ this function unlocks SEP
+*/
+void sep_unlock(void)
+{
+ /* check if mappings must be freed */
+ if (sep_context.shared_area_dma_addr)
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ /* release lock */
+ clear_bit(SEP_MMAP_LOCK_BIT, &sep_context.in_use_flag);
+ wake_up(&sep_context.event);
+}
+
+/*
+ this function returns the address of the message shared area
+*/
+void *sep_map_shared_area(void)
+{
+ return sep_context.shared_area_virt_addr;
+}
+
+/*
+ calculates time and sets it at the predefined address
+*/
+static int sep_set_time(dma_addr_t *bus_ptr,
+ u32 *time_in_sec_ptr)
+{
+ /* time struct */
+ struct timeval time;
+
+ /* address of time in the kernel */
+ u32 *time_addr;
+
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_set_time start\n");
+
+
+ do_gettimeofday(&time);
+
+ /* set value in the SYSTEM MEMORY offset */
+ time_addr = sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
+
+ time_addr[0] = SEP_TIME_VAL_TOKEN;
+ time_addr[1] = time.tv_sec;
+
+ edbg("SEP Driver:time.tv_sec is %x\n",
+ (u32)time.tv_sec);
+ edbg("SEP Driver:time_addr is %p\n",
+ time_addr);
+ edbg("SEP Driver:g_sep_shared_area_virt_addr is %p\n",
+ sep_context.shared_area_virt_addr);
+
+ /* set the output parameters if needed */
+ if (bus_ptr)
+ *bus_ptr = sep_shared_area_virt_to_bus(&sep_context,
+ time_addr);
+
+ if (time_in_sec_ptr)
+ *time_in_sec_ptr = time.tv_sec;
+
+ dbg("SEP Driver:<-------- sep_set_time end\n");
+
+ return 0;
+}
+
+
+#ifdef DX_CC52_SUPPORT
+/*
+ open function for the singleton driver
+*/
+static int sep_singleton_open(struct inode *inode_ptr , struct file *file_ptr)
+{
+ /* return value */
+ int error;
+
+ /*-----------------
+ CODE
+ ---------------------*/
+
+ error = 0;
+
+ dbg("SEP Driver:--------> sep_singleton_open start\n");
+
+ if (test_and_set_bit(0, &sep_context.singleton_access_flag))
+ error = -EBUSY;
+
+ dbg("SEP Driver:<-------- sep_singleton_open end\n");
+
+ return error;
+}
+
+/*
+ inserts the data into the caller id table
+*/
+static int sep_set_caller_id_handler(u32 arg)
+{
+ /* error */
+ int error;
+
+ /* counter */
+ int i;
+
+ /* command arguments */
+ struct sep_driver_set_caller_id_t command_args;
+
+ /*----------------------------
+ CODE
+ ------------------------------*/
+
+ dbg("SEP Driver:--------> sep_set_caller_id_handler start\n");
+
+ error = 0;
+
+ for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
+ if (sep_context.caller_id_table[i].pid == 0)
+ break;
+ }
+
+ if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
+ edbg("SEP Driver:i == SEP_CALLER_ID_TABLE_NUM_ENTRIES\n");
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* copy the data */
+ if (copy_from_user(&command_args ,
+ (void *)arg ,
+ sizeof(struct sep_driver_set_caller_id_t))) {
+ edbg("SEP Driver:copy_from_user failed\n");
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ if (!command_args.pid ||
+ !command_args.callerIdAddress ||
+ !command_args.callerIdSizeInBytes) {
+
+ edbg("SEP Driver: params validation error\n");
+
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ edbg("SEP Driver:pid is %x\n" ,
+ command_args.pid);
+ edbg("SEP Driver:callerIdSizeInBytes is %x\n" ,
+ command_args.callerIdSizeInBytes);
+
+ if (command_args.callerIdSizeInBytes >
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ sep_context.caller_id_table[i].pid = command_args.pid;
+
+ if (copy_from_user(sep_context.caller_id_table[i].callerIdHash,
+ command_args.callerIdAddress ,
+ command_args.callerIdSizeInBytes))
+ error = -EFAULT;
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_set_caller_id_handler end\n");
+
+ return error;
+}
+
+/*
+ set the caller id (if exists) of the current process that send request to SeP
+*/
+static int sep_set_current_caller_id(void)
+{
+ int i;
+
+ dbg("SEP Driver:--------> sep_set_current_caller_id start\n");
+
+ edbg("current process is %d\n", current->pid);
+
+ /* zero the previous value */
+ memset((void *)(sep_context.shared_area_virt_addr +
+ (SEP_DRIVER_SYSTEM_CALLER_ID_MEMORY_OFFSET_IN_BYTES)),
+ 0,
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
+
+ for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
+ if (sep_context.caller_id_table[i].pid == current->pid) {
+ edbg("Caller Id found\n");
+
+ memcpy((void *)(sep_context.shared_area_virt_addr +
+ (SEP_DRIVER_SYSTEM_CALLER_ID_MEMORY_OFFSET_IN_BYTES)),
+ (void *)(sep_context.caller_id_table[i].callerIdHash),
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
+ break;
+ }
+ }
+
+ dbg("SEP Driver:<-------- sep_set_current_caller_id end\n");
+
+ return 0;
+}
+
+
+/*
+ callback for the netlink connector registration
+*/
+static void sep_cn_callback(void *data)
+{
+ /* counter */
+ int counter;
+
+ /* message */
+ struct cn_msg *msg;
+
+ /* event */
+ struct proc_event *ev;
+
+ /* process id */
+ int pid;
+
+ /*--------------------
+ CODE
+ ----------------------*/
+
+ msg = data;
+
+ /* get the proc event pointer */
+ ev = (struct proc_event *)msg->data;
+
+ /* check the event */
+ switch (ev->what) {
+ case PROC_EVENT_EXEC:
+
+ /* get the pid */
+ pid = ev->event_data.exec.process_pid;
+
+ break;
+
+ case PROC_EVENT_EXIT:
+
+ /* get the pid */
+ pid = ev->event_data.exit.process_pid;
+
+ break;
+
+ default:
+ /* not interested in the rest of the events */
+ goto end_function;
+ }
+
+ /* check if the pid is relevant */
+ for (counter = 0;
+ counter < SEP_CALLER_ID_TABLE_NUM_ENTRIES;
+ counter++) {
+ /* if the pid is found - remove it from the table */
+ if (sep_context.caller_id_table[counter].pid == pid)
+ sep_context.caller_id_table[counter].pid = 0;
+ }
+
+
+end_function:
+
+ return;
+}
+
+/*
+ initializes the caller id functionality
+*/
+static int sep_init_caller_id(void)
+{
+ /* return value */
+ int ret_val;
+
+ /* counter */
+ int counter;
+
+ struct cb_id caller_id;
+
+
+ /*----------------------------
+ CODE
+ ---------------------------------*/
+
+ ret_val = 0;
+
+ /* init cb_id struct */
+ caller_id.idx = CN_IDX_PROC;
+ caller_id.val = CN_VAL_PROC;
+
+ /* init caller id table */
+ for (counter = 0; counter < SEP_CALLER_ID_TABLE_NUM_ENTRIES; counter++)
+ sep_context.caller_id_table[counter].pid = 0;
+
+
+ /* init access flag */
+ sep_context.singleton_access_flag = 0;
+
+ edbg("SEP Driver: caller id table init finished\n");
+
+ /* register to netlink connector */
+ /*ret_val = cn_add_callback(&caller_id, "sep_caller_id", &sep_cn_callback);*/
+
+
+ return ret_val;
+}
+
+#endif /*DX_CC52_SUPPORT*/
+
+
+/*
+ This function raises interrupt to SEP that signals that is has a new
+ command from HOST
+*/
+static int sep_send_command_handler(void)
+{
+ int error;
+
+ u32 count;
+
+ dbg("SEP Driver:--------> sep_send_command_handler start\n");
+
+ error = 0;
+
+ /* check if message was already sent without poll */
+ if (test_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ /* set bit that allows poll */
+ set_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag);
+
+ sep_set_time(0, 0);
+
+#ifdef DX_CC52_SUPPORT
+ /* set the current caller if - if exists */
+ sep_set_current_caller_id();
+#endif
+
+ for (count = 0; count < 12 * 4; count += 4)
+ edbg("Word %x of the message is %x\n",
+ count,
+ *((u32 *)(sep_context.shared_area_virt_addr + count)));
+
+ /* update counter */
+ sep_context.host_to_sep_send_counter++;
+
+ sep_context.shared_area_dma_addr =
+ dma_map_single(sep_context.dev_ptr,
+ (void *)sep_context.shared_area_virt_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* send interrupt to SEP */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
+
+ dbg("SEP Driver:<-------- sep_send_command_handler end\n");
+
+end_function:
+
+ return error;
+}
+
+/*
+ this function notifies the SeP of the incoming message
+*/
+int sep_send_msg_rdy_cmd(void)
+{
+ return sep_send_command_handler();
+}
+
+
+/* poll(suspend), until reply from sep */
+int sep_driver_poll(u32 *source_ptr)
+{
+ /* error */
+ int error;
+
+ /* gpr3 register value */
+ u32 retVal2;
+
+ /* gp3 register value */
+ u32 retVal3;
+
+ /*-----------------
+ CODE
+ -------------------*/
+
+ error = 0;
+
+ /* check if send command or send_reply were activated previously */
+ if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag)) {
+ error = -EIO;
+ goto end_function;
+ }
+
+#if SEP_DRIVER_POLLING_MODE
+ do {
+ /* read gp2 */
+ retVal2 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ /* read gp3 */
+ retVal3 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+
+ /* sleep between loops */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(50);
+
+ } while ((retVal3 == 0x0) &&
+ sep_context.host_to_sep_send_counter != (retVal2 & 0x7FFFFFFF));
+
+ sep_context.sep_to_host_reply_counter++;
+#else
+ /* poll, until reply from sep */
+ wait_event(sep_context.event,
+ (sep_context.host_to_sep_send_counter ==
+ sep_context.sep_to_host_reply_counter));
+
+#endif
+
+ /* check if error occured during poll */
+ retVal3 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ if (retVal3 != 0x0) {
+ edbg("SEP Driver: error during poll\n");
+ error = -EIO;
+ goto end_function;
+ }
+
+ if (sep_context.host_to_sep_send_counter ==
+ sep_context.sep_to_host_reply_counter) {
+
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ retVal2 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ edbg("retVal2 is %x\n", retVal2);
+
+ /* clear the bit-in case it must be set
+ again by send_reply_comand */
+ clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag);
+
+ /* check printf request from sep */
+ if ((retVal2 >> 30) & 0x1) {
+
+ edbg("SEP Driver: sep printf request in\n");
+ *source_ptr = SEP_DRIVER_SRC_PRINTF;
+ goto end_function;
+ }
+
+ /* check if the this is sep reply or request */
+ if (retVal2 >> 31) {
+ edbg("SEP Driver: sep request in\n");
+ *source_ptr = SEP_DRIVER_SRC_REQ;
+ /* request */
+ } else {
+ edbg("SEP Driver: sep reply in\n");
+ *source_ptr = SEP_DRIVER_SRC_REPLY;
+ }
+ }
+
+end_function:
+
+ return error;
+}
+
+/*----------------------------------------------------------------------
+ open function of the character driver - initializes the private data
+------------------------------------------------------------------------*/
+static int sep_open(struct inode *inode_ptr, struct file *file_ptr)
+{
+ /* return value */
+ int error;
+
+ /*-----------------
+ CODE
+ ---------------------*/
+
+ error = 0;
+
+ dbg("SEP Driver:--------> open start\n");
+
+ /* init the private data flag */
+ file_ptr->private_data = (void *)SEP_DRIVER_DISOWN_LOCK_FLAG;
+
+ dbg("SEP Driver:<-------- open end\n");
+
+ return error;
+}
+
+
+/*------------------------------------------------------------
+ release function
+-------------------------------------------------------------*/
+static int sep_release(struct inode *inode_ptr, struct file *file_ptr)
+{
+ /*-----------------
+ CODE
+ ---------------------*/
+
+ dbg("SEP Driver:--------> sep_release start\n");
+
+ /* check that all the dma resources were freed */
+ sep_free_dma_table_data_handler();
+
+ /* unlock on release only if file pointer still holds the lock flag */
+ if ((u32)file_ptr->private_data == SEP_DRIVER_OWN_LOCK_FLAG) {
+
+ /* check if mappings must be freed */
+ if (sep_context.shared_area_dma_addr)
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ /* check that all the dma resources were freed */
+ sep_free_dma_table_data_handler();
+
+ /* release lock */
+ clear_bit(SEP_MMAP_LOCK_BIT, &sep_context.in_use_flag);
+
+ /* raise event for stuck contextes */
+ wake_up(&sep_context.event);
+ }
+
+ dbg("SEP Driver:<-------- sep_release end\n");
+
+ return 0;
+}
+
+
+
+
+/*---------------------------------------------------------------
+ map function - this functions maps the message shared area
+-----------------------------------------------------------------*/
+static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ /* physical addr */
+ dma_addr_t phys_addr;
+
+ /* error */
+ int error;
+
+ /*-----------------------
+ CODE
+ -------------------------*/
+
+ dbg("SEP Driver:--------> mmap start\n");
+
+ /* lock the access to SeP */
+ wait_event_interruptible(sep_context.event ,
+ test_and_set_bit(SEP_MMAP_LOCK_BIT, &sep_context.in_use_flag) == 0);
+ if (signal_pending(current)) {
+ error = -EINTR;
+ goto end_function;
+ }
+
+ /* set the flag for LOCK */
+ filp->private_data = (void *)SEP_DRIVER_OWN_LOCK_FLAG;
+
+ /* zero the pools */
+ sep_context.data_pool_bytes_allocated = 0;
+
+ /* check that the size of the mapped range is as the size of the message
+ shared area */
+ if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+ edbg("SEP Driver mmap requested size is more than allowed\n");
+ error = -EAGAIN;
+
+ goto end_function_with_error;
+ }
+
+ edbg("SEP Driver:g_sep_shared_area_virt_addr is %p\n",
+ sep_context.shared_area_virt_addr);
+
+ /* get physical address */
+ phys_addr = sep_context.shared_area_bus_addr;
+
+ edbg("SEP Driver: phys_addr is %08x\n", (u32)phys_addr);
+
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ phys_addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ edbg("SEP Driver remap_page_range failed\n");
+ error = -EAGAIN;
+
+ goto end_function_with_error;
+ }
+
+ goto end_function;
+
+end_function_with_error:
+
+ /* clear the bit */
+ clear_bit(SEP_MMAP_LOCK_BIT , &sep_context.in_use_flag);
+
+ /* raise event for stuck contextes */
+ wake_up(&sep_context.event);
+
+end_function:
+
+ dbg("SEP Driver:<-------- mmap end\n");
+
+ return 0;
+}
+
+
+/*-----------------------------------------------
+ poll function
+*----------------------------------------------*/
+static u32 sep_poll(struct file *filp, poll_table *wait)
+{
+ u32 count;
+
+ u32 mask = 0;
+
+ /* GPR2 register */
+ u32 retVal2;
+
+ /* GPR3 register */
+ u32 retVal3;
+
+ /*----------------------------------------------
+ CODE
+ -------------------------------------------------*/
+
+ dbg("SEP Driver:--------> poll start\n");
+
+ retVal2 = retVal3 = 0;
+
+ /* check that only process that lock the driver may call the poll */
+ if ((u32)filp->private_data != SEP_DRIVER_OWN_LOCK_FLAG) {
+ mask = POLLERR;
+ goto end_function;
+ }
+
+ /* check if send command or send_reply were activated previously */
+ if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag)) {
+ mask = POLLERR;
+ goto end_function;
+ }
+
+#if SEP_DRIVER_POLLING_MODE
+
+ while ((retVal3 == 0x0) &&
+ (sep_context.host_to_sep_send_counter != (retVal2 & 0x3FFFFFFF))) {
+ retVal2 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ retVal3 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ }
+
+ sep_context.sep_to_host_reply_counter++;
+#else
+ /* add the event to the polling wait table */
+ poll_wait(filp, &sep_context.event, wait);
+
+#endif
+
+ edbg("sep_host_to_sep_send_counter is %x\n",
+ sep_context.host_to_sep_send_counter);
+ edbg("sep_sep_to_host_reply_counter is %x\n",
+ sep_context.sep_to_host_reply_counter);
+
+ /* check if error occured during poll */
+ retVal3 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ if (retVal3 != 0x0) {
+ edbg("SEP Driver: error during poll\n");
+ mask |= POLLERR;
+
+ goto end_function;
+ }
+
+ /* check if the data is ready */
+ if (sep_context.host_to_sep_send_counter ==
+ sep_context.sep_to_host_reply_counter) {
+
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ for (count = 0; count < 12 * 4; count += 4)
+ edbg("Sep Mesg Word %x of the message is %x\n",
+ count,
+ *((u32 *)(sep_context.shared_area_virt_addr + count)));
+
+ for (count = 0; count < 10 * 4; count += 4)
+ edbg("Debug Data Word %x of the message is %x\n",
+ count,
+ *((u32 *)(sep_context.shared_area_virt_addr +
+ 0x1800 +
+ count)));
+
+ retVal2 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ edbg("retVal2 is %x\n", retVal2);
+
+ /* clear the bit-in case it must be set
+ again by send_reply_comand */
+ clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag);
+
+ /* check printf request from sep */
+ if ((retVal2 >> 30) & 0x1) {
+ edbg("SEP Driver: sep printf request in\n");
+ mask |= POLLHUP;
+
+ goto end_function;
+ }
+
+ /* check if the this is sep reply or request */
+ if (retVal2 >> 31) {
+ edbg("SEP Driver: sep request in\n");
+ /* request */
+ mask |= POLLOUT | POLLWRNORM;
+ } else {
+ edbg("SEP Driver: sep reply in\n");
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ }
+
+end_function:
+
+ dbg("SEP Driver:<-------- poll exit\n");
+ return mask;
+}
+
+/*
+ This function raises interrupt to SEPm that signals that is has a
+ new command from HOST
+*/
+static int sep_send_reply_command_handler(void)
+{
+ /* count */
+ u32 count;
+
+ /* error */
+ int error;
+
+
+ dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
+
+ error = 0;
+
+ /* check if message was already sent without poll */
+ if (test_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ for (count = 0; count < 12 * 4; count += 4)
+ edbg("Word %x of the message is %x\n",
+ count,
+ *((u32 *)(sep_context.shared_area_virt_addr + count)));
+
+ /* set bit that allows poll */
+ set_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag);
+
+ /* update both counters counter */
+ sep_context.host_to_sep_send_counter++;
+
+ sep_context.sep_to_host_reply_counter++;
+
+ /* flush shared area */
+ sep_context.shared_area_dma_addr =
+ dma_map_single(sep_context.dev_ptr,
+ (void *)sep_context.shared_area_virt_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* send the interrupt to SEP */
+ SEP_WRITE_REGISTER(&sep_context,
+ HW_HOST_HOST_SEP_GPR2_REG_ADDR,
+ sep_context.host_to_sep_send_counter);
+
+ /* update host to sep counter */
+ sep_context.host_to_sep_send_counter++;
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
+
+ return error;
+}
+
+/*
+ This function handles the allocate data pool memory request
+ This function returns calculates the physical address of the
+ allocated memory, and the offset of this area from the mapped address.
+ Therefore, the FVOs in user space can calculate the exact virtual
+ address of this allocated memory
+*/
+static int sep_allocate_data_pool_memory_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command paramaters */
+ struct sep_driver_alloc_t command_args;
+
+ /*-------------------------
+ CODE
+ ----------------------------*/
+
+ dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
+
+ error = 0;
+
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_alloc_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* allocate memory */
+ if (
+ (SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES -
+ sep_context.data_pool_bytes_allocated) <
+ command_args.num_bytes) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* set the virtual and physical address */
+ command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
+ sep_context.data_pool_bytes_allocated;
+ command_args.bus_address = sep_context.shared_area_bus_addr +
+ SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
+ sep_context.data_pool_bytes_allocated;
+
+ /* write the memory back to the user space */
+ if (copy_to_user((void *)arg,
+ (void *)&command_args,
+ sizeof(struct sep_driver_alloc_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* set the allocation */
+ sep_context.data_pool_bytes_allocated += command_args.num_bytes;
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
+
+ return error;
+}
+
+
+
+
+
+/*
+ this function handles the request for creation of the DMA table
+ for the synchronic symmetric operations (AES,DES,HASH).
+ it returns the physical addresses of the created DMA table to the
+ user space which insert them as a parameters to the HOST-SEP message.
+ this pointers are NOT being treated by the user application in any case.
+*/
+static int sep_create_sync_dma_tables_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command arguments */
+ struct sep_driver_build_sync_table_t command_args;
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
+
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_build_sync_table_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ edbg("app_in_address is %08lx\n", command_args.app_in_address);
+ edbg("app_out_address is %08lx\n", command_args.app_out_address);
+ edbg("data_size is %x\n", command_args.data_in_size);
+ edbg("block_size is %x\n", command_args.block_size);
+
+ /* validate user parameters */
+ if (!command_args.app_in_address) {
+
+ edbg("SEP Driver: params validation error\n");
+
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* check if we need to build only input table or input/output */
+ if (command_args.app_out_address) {
+
+ /* prepare input/output tables. */
+ error = sep_prepare_input_output_dma_table(
+ command_args.app_in_address,
+ command_args.app_out_address,
+ command_args.data_in_size,
+ command_args.block_size,
+ &command_args.in_table_address,
+ &command_args.out_table_address,
+ &command_args.in_table_num_entries,
+ &command_args.out_table_num_entries,
+ &command_args.table_data_size,
+ command_args.isKernelVirtualAddress);
+
+ } else {
+ /* prepare input tables */
+ error = sep_prepare_input_dma_table(
+ command_args.app_in_address,
+ command_args.data_in_size,
+ command_args.block_size,
+ &command_args.in_table_address,
+ &command_args.in_table_num_entries,
+ &command_args.table_data_size,
+ command_args.isKernelVirtualAddress);
+ }
+
+ if (error)
+ goto end_function;
+
+ /* copy to user */
+ if (copy_to_user(arg,
+ (void *)&command_args,
+ sizeof(struct sep_driver_build_sync_table_t)))
+ error = -EFAULT;
+
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
+
+ return error;
+}
+
+/*
+ This API handles the end transaction request
+*/
+static int sep_end_transaction_handler(struct file *file_ptr)
+{
+ /*----------------------------
+ CODE
+ -----------------------------*/
+
+ dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
+
+ /* check that poll was called before */
+ if (sep_context.shared_area_dma_addr)
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ /* check that all the dma resources were freed */
+ sep_free_dma_table_data_handler();
+
+ clear_bit(SEP_MMAP_LOCK_BIT , &sep_context.in_use_flag);
+
+ /* raise event for stuck contextes */
+ wake_up(&sep_context.event);
+
+ file_ptr->private_data = (void *)SEP_DRIVER_DISOWN_LOCK_FLAG;
+
+ dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
+
+ return 0;
+}
+
+static int sep_start_handler(void)
+{
+ /* reg val */
+ u32 reg_val;
+
+ /* error */
+ u32 error;
+
+ /*-----------------------------
+ CODE
+ ------------------------------*/
+
+ dbg("SEP Driver:--------> sep_start_handler start\n");
+
+ error = 0;
+
+ /* wait in polling for message from SEP */
+ do {
+ reg_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (!reg_val);
+
+ /* check the value */
+ if (reg_val == 0x1) {
+ /* fatal error - read erro status from GPRO */
+ error = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR0_REG_ADDR);
+ goto end_function;
+ }
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_start_handler end\n");
+
+ return error;
+}
+
+
+/*
+ this function handles the request for SEP initialization
+*/
+static int sep_init_handler(void *arg)
+{
+ /* word from message */
+ u32 message_word;
+
+ /* message ptr */
+ u32 *message_ptr;
+
+ /* command arguments */
+ struct sep_driver_init_t command_args;
+
+ /* counter */
+ u32 counter;
+
+ /* error */
+ int error;
+
+ /* reg val */
+ u32 reg_val;
+
+ /*-------------------
+ CODE
+ ---------------------*/
+
+ dbg("SEP Driver:--------> sep_init_handler start\n");
+
+ error = 0;
+
+ /* this API may be run only by root */
+ if (!capable(CAP_SYS_ADMIN)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ if (copy_from_user(&command_args, arg,
+ sizeof(struct sep_driver_init_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* validate user parameters */
+ if (!command_args.message_addr || !command_args.sep_sram_addr) {
+ edbg("SEP Driver: params validation error\n");
+
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ message_ptr = command_args.message_addr;
+
+ edbg("SEP Driver: before write1\n");
+ /* set the base address of the SRAM */
+ SEP_WRITE_REGISTER(&sep_context,
+ HW_SRAM_ADDR_REG_ADDR,
+ command_args.sep_sram_addr);
+
+ for (counter = 0 ;
+ counter < command_args.message_size_in_words;
+ counter++, message_ptr++) {
+ get_user(message_word, message_ptr);
+
+ edbg("SEP Driver: before write2\n")
+ /* write data to SRAM */
+ SEP_WRITE_REGISTER(&sep_context, HW_SRAM_DATA_REG_ADDR,
+ message_word);
+
+ edbg("SEP Driver:message_word is %x\n", message_word);
+
+ /* wait for write complete */
+ SEP_WAIT_SRAM_WRITE_COMPLETE(&sep_context);
+ }
+
+ /* signal SEP */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
+
+ do {
+ reg_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (!(reg_val & 0xFFFFFFFD));
+
+ /* check the value */
+ if (reg_val == 0x1) {
+ edbg("SEP Driver:init failed\n");
+
+ error = SEP_READ_REGISTER(&sep_context, 0x8060);
+ edbg("SEP Driver:sw monitor is %x\n", error);
+
+ /* fatal error - read erro status from GPRO */
+ error = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR0_REG_ADDR);
+ edbg("SEP Driver:error is %x\n", error);
+ goto end_function;
+ }
+
+ edbg("SEP Driver: signal end CC_INIT, reg_val is %d\n", reg_val);
+
+ /* signal sep to zero the GPR3 */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x10);
+ /* poll gpr3 till it is 0 */
+ do {
+ reg_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (reg_val != 0);
+
+ #ifndef __LITTLE_ENDIAN
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_ENDIAN_REG_ADDR, 0x1/*BE*/);
+ #endif
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_init_handler end\n");
+
+ return error;
+
+}
+
+/*
+ this function handles the request cache and resident reallocation
+*/
+static int sep_realloc_cache_resident_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* reg val */
+ u32 reg_val;
+
+ /* physical cache addr */
+ dma_addr_t cache_bus_address;
+
+ /* physical resident addr */
+ dma_addr_t resident_bus_address;
+
+ /* physical d-cache addr */
+ dma_addr_t dcache_bus_address;
+
+ /* command arguments */
+ struct sep_driver_realloc_cache_resident_t command_args;
+
+ /*------------------
+ CODE
+ ---------------------*/
+
+ /* check if SEP already initialized */
+ reg_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+
+ /* check old boot finished or warm boot finished or cold-warm boot finished */
+ if (reg_val != 0x2) {
+ error = SEP_ALREADY_INITIALIZED_ERR;
+ goto end_function;
+ }
+
+ /* this API may be run only by root */
+ if (!capable(CAP_SYS_ADMIN)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ /* copy the data */
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_realloc_cache_resident_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+
+ /* copy cache and resident to the their intended locations */
+ error = sep_copy_cache_resident_to_area(&sep_context,
+ &cache_bus_address,
+ &resident_bus_address,
+ &dcache_bus_address);
+
+ if (error)
+ goto end_function;
+
+#if SEP_DRIVER_LOCK_RAR_MODE
+
+ error = sep_copy_ext_cache_to_area(command_args.extcache_addr,
+ command_args.extcache_size_in_bytes,
+ 0);
+
+ if (error)
+ goto end_function;
+
+
+#endif
+
+ /* lock the area (if needed) */
+ error = sep_lock_cache_resident_area();
+ if (error)
+ goto end_function;
+
+ command_args.new_base_addr = sep_context.shared_area_bus_addr;
+
+ /* find the new base address according to the lowest address between
+ cache, resident and shared area */
+ if (resident_bus_address < command_args.new_base_addr)
+ command_args.new_base_addr = resident_bus_address;
+
+ if (cache_bus_address < command_args.new_base_addr)
+ command_args.new_base_addr = cache_bus_address;
+
+ if (dcache_bus_address < command_args.new_base_addr)
+ command_args.new_base_addr = dcache_bus_address;
+
+ /* set the return parameters */
+ command_args.new_cache_addr = cache_bus_address;
+ command_args.new_resident_addr = resident_bus_address;
+ command_args.new_dcache_addr = dcache_bus_address;
+
+
+ /* set the new shared area */
+ command_args.new_shared_area_addr = sep_context.shared_area_bus_addr;
+
+ edbg("SEP Driver:command_args.new_shared_area_addr is %08x\n",
+ (u32)command_args.new_shared_area_addr);
+ edbg("SEP Driver:command_args.new_base_addr is %08x\n",
+ (u32)command_args.new_base_addr);
+ edbg("SEP Driver:command_args.new_resident_addr is %08x\n",
+ (u32)command_args.new_resident_addr);
+ edbg("SEP Driver:command_args.new_cache_addr is %08x\n",
+ (u32)command_args.new_cache_addr);
+
+ /* return to user */
+ if (copy_to_user(arg,
+ (void *)&command_args,
+ sizeof(struct sep_driver_realloc_cache_resident_t)))
+ error = -EFAULT;
+
+
+end_function:
+
+ return error;
+}
+
+/*
+ This function will retrieve the RAR buffer physical addresses, type
+ and size corresponding to the RAR handles provided in the buffers vector.
+*/
+static int sep_rar_prepare_output_msg_handler(void *arg)
+{
+ /* error return code */
+ int error;
+
+ /* command args */
+ struct sep_driver_rar_handle_to_bus command_args;
+
+ /* bus address */
+ dma_addr_t rar_bus;
+
+ /* holds the RAR address in the system memory offset */
+ u32 *rar_addr;
+
+ /*------------------
+ CODE
+ ---------------------*/
+
+ dbg("SEP Driver:--------> sep_rar_prepare_output_msg_handler start\n");
+
+ error = 0;
+ rar_bus = 0;
+
+ /* copy the data */
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_rar_handle_to_bus))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* call to translation function only if user handle is not NULL */
+ if (command_args.rar_handle) {
+ error = sep_rar_handle_to_bus(command_args.rar_handle, &rar_bus);
+ if (error)
+ goto end_function;
+ }
+
+ edbg("SEP Driver: rar_addr_bus = %x\n", (u32)rar_bus);
+
+ /* set value in the SYSTEM MEMORY offset */
+ rar_addr = (u32 *)(sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
+
+ /* copy the physical address to the System Area.
+ The SEP will follow this address */
+ rar_addr[0] = SEP_RAR_VAL_TOKEN;
+ rar_addr[1] = rar_bus;
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_rar_prepare_output_msg_handler end\n");
+
+ return error;
+}
+
+/*
+ this function returns the physical and virtual addresses of the static pool
+*/
+static int sep_get_static_pool_addr_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command arguments */
+ struct sep_driver_static_pool_addr_t command_args;
+
+ /*-----------------------------
+ CODE
+ ------------------------------*/
+
+ dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
+
+ error = 0;
+
+ /*prepare the output parameters in the struct */
+ command_args.static_bus_address = sep_context.shared_area_bus_addr +
+ SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
+ command_args.static_virt_address = sep_context.shared_area_virt_addr +
+ SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
+
+ edbg(
+ "SEP Driver:physical_static_address is %08x, virtual_static_address %p\n",
+ (u32)command_args.static_bus_address,
+ command_args.static_virt_address);
+
+ /* send the parameters to user application */
+ if (copy_to_user(arg,
+ &command_args,
+ sizeof(struct sep_driver_static_pool_addr_t)))
+ error = -EFAULT;
+
+
+ dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
+
+ return error;
+}
+
+/*
+ this address gets the offset of the bus address from the start
+ of the mapped area
+*/
+static int sep_get_physical_mapped_offset_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command arguments */
+ struct sep_driver_get_mapped_offset_t command_args;
+
+ /*-----------------------------
+ CODE
+ ------------------------------*/
+
+ dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
+
+ error = 0;
+
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_get_mapped_offset_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ if (!command_args.bus_address) {
+ edbg("SEP Driver: params validation error\n");
+
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (command_args.bus_address < sep_context.shared_area_bus_addr) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /*prepare the output parameters in the struct */
+ command_args.offset = command_args.bus_address -
+ sep_context.shared_area_bus_addr;
+
+ edbg("SEP Driver:physical_address is %08x, offset is %x\n",
+ (u32)command_args.bus_address,
+ command_args.offset);
+
+ /* send the parameters to user application */
+ if (copy_to_user(arg,
+ &command_args,
+ sizeof(struct sep_driver_get_mapped_offset_t)))
+ error = -EFAULT;
+
+
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
+
+ return error;
+}
+
+/*
+ this function handles the request for get time
+*/
+static int sep_get_time_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command arguments */
+ struct sep_driver_get_time_t command_args;
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ error = sep_set_time(&command_args.time_bus_address,
+ &command_args.time_value);
+ if (error)
+ goto end_function;
+
+ /* return to user */
+ if (copy_to_user(arg,
+ (void *)&command_args,
+ sizeof(struct sep_driver_get_time_t)))
+ error = -EFAULT;
+
+
+end_function:
+
+ return error;
+}
+
+#ifdef DX_CC52_SUPPORT
+/*
+ this function handles the request for ext(3rd party)cache reallocation
+*/
+static int sep_realloc_ext_cache_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* physical ext cache addr */
+ dma_addr_t ext_cache_bus_address;
+
+ /* command arguments */
+ struct sep_driver_realloc_ext_cache_t command_args;
+
+ /*------------------
+ CODE
+ ---------------------*/
+
+ /* copy the data */
+ if (copy_from_user(&command_args,
+ arg ,
+ sizeof(struct sep_driver_realloc_ext_cache_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+#if SEP_DRIVER_LOCK_RAR_MODE
+
+ /* if we are in RAR mode - then the ext cache was already copied
+ and we just need to receive back its' physical address */
+ command_args.ext_cache_size_in_bytes = 0;
+
+#endif
+ /* copy cache and resident to the their intended locations */
+ error = sep_copy_ext_cache_to_area(command_args.ext_cache_addr,
+ command_args.ext_cache_size_in_bytes,
+ &ext_cache_bus_address);
+ if (error)
+ goto end_function;
+
+
+
+ command_args.new_ext_cache_addr = ext_cache_bus_address;
+
+ edbg("SEP Driver:command_args.new_ext_cache_addr is %x\n",
+ command_args.new_ext_cache_addr);
+
+ /* return to user */
+ if (copy_to_user(arg ,
+ (void *)&command_args ,
+ sizeof(struct sep_driver_realloc_ext_cache_t)))
+ error = -EFAULT;
+
+
+end_function:
+
+ return error;
+}
+#endif /*#ifdef DX_CC52_SUPPORT*/
+
+static long sep_ioctl(
+ struct file *filp,
+ u32 cmd,
+ unsigned long arg)
+{
+
+ /* error */
+ long error;
+
+ /*------------------------
+ CODE
+ ------------------------*/
+ error = 0;
+
+ dbg("SEP Driver:--------> ioctl start\n");
+
+
+ edbg("SEP Driver: cmd is %x\n", cmd);
+
+ /* check the lock flag - does not allow any other process
+ to call ioctl on the driver */
+ if ((u32)filp->private_data != SEP_DRIVER_OWN_LOCK_FLAG) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ /* lock access to ioctl */
+ mutex_lock(&sep_context.ioctl_mutex);
+
+ /* check that the command is for sep device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -EFAULT;
+ goto end_function_unlock;
+ }
+
+ switch (cmd) {
+ case SEP_IOCSENDSEPCOMMAND:
+
+ /* send command to SEP */
+ error = sep_send_command_handler();
+
+ edbg("SEP Driver: after sep_send_command_handler\n");
+
+ break;
+
+ case SEP_IOCSENDSEPRPLYCOMMAND:
+
+ /* send reply command to SEP */
+ error = sep_send_reply_command_handler();
+
+ break;
+
+ case SEP_IOCALLOCDATAPOLL:
+
+ /* allocate data pool */
+ error = sep_allocate_data_pool_memory_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCCREATESYMDMATABLE:
+
+ /* create dma table for synhronic operation */
+ error = sep_create_sync_dma_tables_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCFREEDMATABLEDATA:
+
+ /* free the pages */
+ error = sep_free_dma_table_data_handler();
+
+ break;
+
+ case SEP_IOCSEPSTART:
+
+ /* start command to sep */
+ error = sep_start_handler();
+ break;
+
+ case SEP_IOCSEPINIT:
+
+ /* init command to sep */
+ error = sep_init_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCGETSTATICPOOLADDR:
+
+ /* get the physical and virtual addresses of the static pool */
+ error = sep_get_static_pool_addr_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCENDTRANSACTION:
+
+ error = sep_end_transaction_handler(filp);
+
+ break;
+
+ case SEP_IOCREALLOCCACHERES:
+
+ error = sep_realloc_cache_resident_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCGETMAPPEDADDROFFSET:
+
+ error = sep_get_physical_mapped_offset_handler((void *)arg);
+
+ break;
+
+#ifdef DX_CC52_SUPPORT
+ case SEP_IOCREALLOCEXTCACHE:
+
+ error = sep_realloc_ext_cache_handler((void *)arg);
+
+ break;
+#endif
+
+ case SEP_IOCGETIME:
+
+ error = sep_get_time_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCRARPREPAREMESSAGE:
+
+ error = sep_rar_prepare_output_msg_handler((void *)arg);
+
+ break;
+
+ default:
+ edbg("SEP Driver: **** NO COMMAND ****\n");
+
+ error = -ENOTTY;
+ break;
+ }
+
+end_function_unlock:
+
+ mutex_unlock(&sep_context.ioctl_mutex);
+
+end_function:
+
+ dbg("SEP Driver:<-------- ioctl end\n");
+
+ return error;
+}
+
+#ifdef DX_CC52_SUPPORT
+
+
+/*
+ singleton ioctl
+*/
+static long sep_singleton_ioctl(/*struct inode *inode,*/
+ struct file *filp,
+ u32 cmd,
+ unsigned long arg)
+{
+
+ /* error */
+ long error;
+
+ /*------------------------
+ CODE
+ ------------------------*/
+ error = 0;
+
+ dbg("SEP Driver:--------> sep_singleton_ioctl start\n");
+
+ edbg("SEP Driver: cmd is %x\n", cmd);
+
+ /* check that the command is for sep device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -ENOTTY;
+ goto end_function;
+ }
+
+ switch (cmd) {
+ case SEP_IOCTLSETCALLERID:
+ error = sep_set_caller_id_handler(arg);
+ break;
+
+ default:
+ error = sep_ioctl(filp, cmd, arg);
+ break;
+ }
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_singleton_ioctl end\n");
+
+ return error;
+}
+
+/* file operation for singleton sep operations */
+static const struct file_operations singleton_file_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_singleton_ioctl,
+ .poll = sep_poll,
+ .open = sep_singleton_open,
+ .release = sep_release,
+ .mmap = sep_mmap,
+};
+
+#endif /*DX_CC52_SUPPORT*/
+
+
+/* file operation for normal sep operations */
+static const struct file_operations sep_file_operarions = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_ioctl,
+ .poll = sep_poll,
+ .open = sep_open,
+ .release = sep_release,
+ .mmap = sep_mmap,
+
+};
+
+/*
+ this function registers the driver to the file system
+*/
+static int sep_register_driver_to_fs(void)
+{
+ /* return value */
+ int ret_val;
+
+ /* major number */
+ int major;
+
+ /*---------------------
+ CODE
+ -----------------------*/
+
+#ifdef DX_CC52_SUPPORT
+ ret_val = alloc_chrdev_region(&sep_context.device_number,
+ 0,
+ 2,
+ DRIVER_NAME);
+#else
+ ret_val = alloc_chrdev_region(&sep_context.device_number,
+ 0,
+ 1,
+ DRIVER_NAME);
+#endif
+ if (ret_val) {
+ edbg("sep_driver:major number allocation failed,\
+ retval is %d\n",
+ ret_val);
+ goto end_function;
+ }
+
+ /* get the major number */
+ major = MAJOR(sep_context.device_number);
+
+ /* init cdev */
+ cdev_init(&sep_context.cdev, &sep_file_operarions);
+ sep_context.cdev.owner = THIS_MODULE;
+
+ /* register the driver with the kernel */
+ ret_val = cdev_add(&sep_context.cdev, sep_context.device_number, 1);
+
+ if (ret_val) {
+ edbg("sep_driver:cdev_add failed, retval is %d\n",
+ ret_val);
+ goto end_function_unregister_devnum;
+ }
+
+goto end_function;
+#ifdef DX_CC52_SUPPORT
+
+ sep_context.singleton_device_number = MKDEV(major, 1);
+
+ /* init singleton cdev */
+ cdev_init(&sep_context.singleton_cdev , &singleton_file_operations);
+ sep_context.singleton_cdev.owner = THIS_MODULE;
+
+ /* register the driver with the kernel fs */
+ ret_val = cdev_add(&sep_context.singleton_cdev ,
+ sep_context.singleton_device_number ,
+ 1);
+ if (ret_val) {
+ edbg("sep_driver:cdev_add 2 failed, retval is %d\n" ,
+ ret_val);
+ goto end_function_unregister_first_dev;
+ }
+
+ goto end_function;
+
+end_function_unregister_first_dev:
+
+ /* delete the first device */
+ cdev_del(&sep_context.cdev);
+
+#endif
+
+end_function_unregister_devnum:
+
+#ifdef DX_CC52_SUPPORT
+ /* unregister dev numbers */
+ unregister_chrdev_region(sep_context.device_number, 2);
+#else
+ unregister_chrdev_region(sep_context.device_number, 1);
+#endif
+
+end_function:
+
+ return ret_val;
+}
+
+/*
+ this function unregisters driver from fs
+*/
+static void sep_unregister_driver_from_fs(void)
+{
+ /*-------------------
+ CODE
+ ---------------------*/
+
+ cdev_del(&sep_context.cdev);
+
+#ifdef DX_CC52_SUPPORT
+ cdev_del(&sep_context.singleton_cdev);
+
+ /* unregister dev numbers */
+ unregister_chrdev_region(sep_context.device_number, 2);
+#else
+ unregister_chrdev_region(sep_context.device_number, 1);
+#endif
+
+}
+
+/*
+ reconfig the shared area between HOST and SEP - needed in case
+ the DX_CC_Init function was called before OS loading
+*/
+static int sep_reconfig_shared_area(void)
+{
+ int ret_val;
+
+ /*----------------------
+ CODE
+ --------------------------*/
+
+ ret_val = 0;
+
+#if SEP_DRIVER_RECONFIG_MESSAGE_AREA
+
+ /* send the new SHARED MESSAGE AREA to the SEP */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_SEP_GPR1_REG_ADDR ,
+ sep_context.phys_shared_area_addr);
+
+ /* poll for SEP response */
+ ret_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+ while (ret_val != 0xffffffff &&
+ ret_val != sep_context.phys_shared_area_addr) {
+ ret_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+ }
+
+ /* check the return value (register) */
+ if (ret_val != sep_context.phys_shared_area_addr)
+ ret_val = -ENOMEM;
+
+
+#endif
+
+ return ret_val;
+}
+
+static void sep_init_context(void)
+{
+ /*-------------
+ CODE
+ ----------------*/
+
+ /* zero fields */
+ sep_context.num_lli_tables_created = 0;
+ sep_context.in_page_array = 0;
+ sep_context.out_page_array = 0;
+ sep_context.in_num_pages = 0;
+ sep_context.out_num_pages = 0;
+ sep_context.in_map_array = 0;
+ sep_context.out_map_array = 0;
+ sep_context.in_map_num_entries = 0;
+ sep_context.out_map_num_entries = 0;
+ sep_context.shared_area_dma_addr = 0;
+
+
+ /* init ioctl mutex */
+ mutex_init(&sep_context.ioctl_mutex);
+
+ /* calculate the shared_area_size */
+ sep_context.shared_area_size =
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+}
+
+
+/*--------------------------------------------------------------
+ init function
+----------------------------------------------------------------*/
+static int __init sep_init(void)
+{
+ /* return value */
+ int ret_val;
+
+ /*------------------------
+ CODE
+ ------------------------*/
+
+ dbg("SEP Driver:-------->Init start\n");
+
+ ret_val = 0;
+
+ ret_val = sep_register_driver_to_device();
+ if (ret_val) {
+ edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n",
+ ret_val);
+ goto end_function_unregister_from_fs;
+ }
+
+ /* initialize the context fields */
+ sep_init_context();
+
+ /* allocate the shared area */
+ if (sep_map_and_alloc_shared_area(&sep_context)) {
+ ret_val = -ENOMEM;
+ /* allocation failed */
+ goto end_function;
+ }
+
+ /* initialize wait queue */
+ init_waitqueue_head(&sep_context.event);
+
+#ifdef DX_CC52_SUPPORT
+ /* init the caller id */
+ ret_val = sep_init_caller_id();
+ if (ret_val)
+ goto end_function_deallocate_sep_shared_area;
+
+#endif
+
+ /* reconfig the shared area - needed in case that OS
+ was initialized after the DX_CC_Init */
+ ret_val = sep_reconfig_shared_area();
+ if (ret_val)
+ goto end_function_unregister_from_fs;
+
+ /* register driver to fs */
+ ret_val = sep_register_driver_to_fs();
+ if (ret_val)
+ goto end_function_deallocate_sep_shared_area;
+
+ goto end_function;
+
+end_function_unregister_from_fs:
+
+ /* unregister from fs */
+ sep_unregister_driver_from_fs();
+
+end_function_deallocate_sep_shared_area:
+
+ /* de-allocate shared area */
+ sep_unmap_and_free_shared_area(&sep_context);
+
+end_function:
+
+ dbg("SEP Driver:<-------- Init end\n");
+
+ return ret_val;
+}
+
+
+
+
+/*-------------------------------------------------------------
+ exit function
+--------------------------------------------------------------*/
+static void __exit sep_exit(void)
+{
+ /* size */
+ int size;
+
+ /*-----------------------------
+ CODE
+ --------------------------------*/
+
+ dbg("SEP Driver:--------> Exit start\n");
+
+ /* unregister from fs */
+ sep_unregister_driver_from_fs();
+
+ /* calculate the total size for de-allocation */
+ size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+
+
+ /* free shared area */
+ sep_unmap_and_free_shared_area(&sep_context);
+
+ iounmap(sep_context.rar_virtual_address);
+ iounmap((void *)sep_context.reg_addr);
+
+ sep_context.rar_virtual_address = NULL;
+ sep_context.reg_addr = NULL;
+
+ dbg("SEP Driver:<-------- Exit end\n");
+}
+
+
+/*
+ interrupt handler function
+*/
+irqreturn_t sep_inthandler(int irq, void *dev_id)
+{
+ /* int error */
+ irqreturn_t int_error;
+
+ /* reg value */
+ u32 reg_val;
+
+ /*-----------------------------
+ CODE
+ -----------------------------*/
+
+ int_error = IRQ_HANDLED;
+
+ /* read the IRR register to check if this is SEP interrupt */
+ reg_val = SEP_READ_REGISTER(&sep_context, HW_HOST_IRR_REG_ADDR);
+ edbg("SEP Interrupt - reg is %08x\n", reg_val);
+
+
+ /* check if this is reply interrupt from SEP */
+ if (reg_val & (0x1 << 13)) {
+ /* update the counter of reply messages */
+ sep_context.sep_to_host_reply_counter++;
+
+ /* wake up the waiting process */
+ wake_up(&sep_context.event);
+ } else {
+ int_error = IRQ_NONE;
+ goto end_function;
+ }
+
+ /* clear the interrupt */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_ICR_REG_ADDR, reg_val);
+
+end_function:
+
+ return int_error;
+}
+
+/*
+ this function calculates the size of data that can be inserted into the lli
+ table from this array the condition is that either the table is full
+ (all etnries are entered), or there are no more entries in the lli array
+*/
+u32 sep_calculate_lli_table_max_size(
+ struct sep_lli_entry_t *lli_in_array_ptr,
+ u32 num_array_entries)
+{
+ /* table data size */
+ u32 table_data_size;
+
+ /* counter */
+ u32 counter;
+
+ /*---------------------
+ CODE
+ ----------------------*/
+
+ table_data_size = 0;
+
+ /* calculate the data in the out lli table if till we fill the whole
+ table or till the data has ended */
+ for (counter = 0;
+ (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
+ (counter < num_array_entries); counter++)
+ table_data_size += lli_in_array_ptr[counter].block_size;
+
+ return table_data_size;
+}
+
+/*
+ this functions builds ont lli table from the lli_array according to
+ the given size of data
+*/
+static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr,
+ struct sep_lli_entry_t *lli_table_ptr,
+ u32 *num_processed_entries_ptr,
+ u32 *num_table_entries_ptr,
+ u32 table_data_size)
+{
+ /* current table data size */
+ u32 curr_table_data_size;
+
+ /* counter of lli array entry */
+ u32 array_counter;
+
+ /*-----------------------
+ CODE
+ ---------------------------*/
+
+ dbg("SEP Driver:--------> sep_build_lli_table start\n");
+
+ /* init currrent table data size and lli array entry counter */
+ curr_table_data_size = 0;
+ array_counter = 0;
+ *num_table_entries_ptr = 1;
+
+ edbg("SEP Driver:table_data_size is %x\n", table_data_size);
+
+ /* fill the table till table size reaches the needed amount */
+ while (curr_table_data_size < table_data_size) {
+ /* update the number of entries in table */
+ (*num_table_entries_ptr)++;
+
+ lli_table_ptr->bus_address =
+ SEP_CHANGE_ENDIANNESS(lli_array_ptr[array_counter].bus_address);
+
+ lli_table_ptr->block_size =
+ SEP_CHANGE_ENDIANNESS(lli_array_ptr[array_counter].block_size);
+
+ curr_table_data_size += lli_array_ptr[array_counter].block_size;
+
+ edbg("SEP Driver:lli_table_ptr is %08x\n", (u32)lli_table_ptr);
+ edbg("SEP Driver:lli_table_ptr->bus_address is %08x\n",
+ lli_table_ptr->bus_address);
+ edbg("SEP Driver:lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+ /* check for overflow of the table data */
+ if (curr_table_data_size > table_data_size) {
+ edbg("SEP Driver:curr_table_data_size > table_data_size\n");
+
+ /* update the size of block in the table */
+ lli_table_ptr->block_size -=
+ SEP_CHANGE_ENDIANNESS((curr_table_data_size - table_data_size));
+
+ /* update the physical address in the lli array */
+ lli_array_ptr[array_counter].bus_address +=
+ SEP_CHANGE_ENDIANNESS(lli_table_ptr->block_size);
+
+ /* update the block size left in the lli array */
+ lli_array_ptr[array_counter].block_size =
+ (curr_table_data_size - table_data_size);
+ } else
+ /* advance to the next entry in the lli_array */
+ array_counter++;
+
+ edbg("SEP Driver:lli_table_ptr->bus_address is %08x\n",
+ lli_table_ptr->bus_address);
+ edbg("SEP Driver:lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+ /* move to the next entry in table */
+ lli_table_ptr++;
+ }
+
+ /* set the info entry to default */
+ lli_table_ptr->bus_address = 0xffffffff;
+ lli_table_ptr->block_size = 0;
+
+ edbg("SEP Driver:lli_table_ptr is %08x\n", (u32)lli_table_ptr);
+ edbg("SEP Driver:lli_table_ptr->bus_address is %08x\n",
+ lli_table_ptr->bus_address);
+ edbg("SEP Driver:lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+
+ /* set the output parameter */
+ *num_processed_entries_ptr += array_counter;
+
+ edbg("SEP Driver:*num_processed_entries_ptr is %x\n",
+ *num_processed_entries_ptr);
+
+
+ dbg("SEP Driver:<-------- sep_build_lli_table end\n");
+
+ return;
+}
+
+/*
+ this function goes over the list of the print created tables and
+ prints all the data
+*/
+static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr,
+ u32 num_table_entries,
+ u32 table_data_size)
+{
+ u32 table_count;
+
+ u32 entries_count;
+ /*-----------------------------
+ CODE
+ -------------------------------*/
+
+ dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
+
+ table_count = 1;
+ while ((u32)lli_table_ptr != 0xffffffff) {
+ edbg("SEP Driver: lli table %08x, table_data_size is %x\n",
+ table_count,
+ table_data_size);
+ edbg("SEP Driver: num_table_entries is %x\n",
+ num_table_entries);
+
+ /* print entries of the table (without info entry) */
+ for (entries_count = 0;
+ entries_count < num_table_entries;
+ entries_count++, lli_table_ptr++) {
+
+ edbg("SEP Driver:lli_table_ptr address is %08x\n",
+ (u32)lli_table_ptr);
+ edbg("SEP Driver:phys address is %08x\
+ block size is %x\n",
+ lli_table_ptr->bus_address,
+ lli_table_ptr->block_size);
+ }
+
+ /* point to the info entry */
+ lli_table_ptr--;
+
+ edbg("SEP Driver:phys lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+ edbg("SEP Driver:phys lli_table_ptr->physical_address\
+ is %08x\n",
+ lli_table_ptr->bus_address);
+
+
+ table_data_size =
+ SEP_CHANGE_ENDIANNESS(lli_table_ptr->block_size) & 0xffffff;
+
+ num_table_entries =
+ (SEP_CHANGE_ENDIANNESS(lli_table_ptr->block_size) >> 24) & 0xff;
+
+ lli_table_ptr = (struct sep_lli_entry_t *)
+ (SEP_CHANGE_ENDIANNESS(lli_table_ptr->bus_address));
+
+ edbg("SEP Driver:phys table_data_size is %x\
+ num_table_entries is %x lli_table_ptr is%x\n",
+ table_data_size, num_table_entries, (u32)lli_table_ptr);
+
+ if ((u32)lli_table_ptr != 0xffffffff)
+ lli_table_ptr = (struct sep_lli_entry_t *)
+ sep_shared_area_bus_to_virt(&sep_context,
+ (u32)lli_table_ptr);
+ table_count++;
+ }
+
+ dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
+}
+
+
+static void sep_prepare_empty_lli_table(dma_addr_t *lli_table_addr_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr)
+{
+ /* lli table pointer */
+ struct sep_lli_entry_t *lli_table_ptr;
+
+ /*---------------------
+ CODE
+ ------------------------*/
+
+ /* find the area for new table */
+ lli_table_ptr =
+ (struct sep_lli_entry_t *)(sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES +
+ sep_context.num_lli_tables_created *
+ sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ lli_table_ptr->bus_address = 0;
+ lli_table_ptr->block_size = 0;
+
+ lli_table_ptr++;
+ lli_table_ptr->bus_address = 0xFFFFFFFF;
+ lli_table_ptr->block_size = 0;
+
+ /* set the output parameter value */
+ *lli_table_addr_ptr = sep_context.shared_area_bus_addr +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES +
+ sep_context.num_lli_tables_created *
+ sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* set the num of entries and table data size for empty table */
+ *num_entries_ptr = 2;
+ *table_data_size_ptr = 0;
+
+ /* update the number of created tables */
+ sep_context.num_lli_tables_created++;
+
+}
+
+/*
+ This function creates the input and output dma tables for
+ symmetric operations (AES/DES) according to the block size from LLI arays
+*/
+static int sep_construct_dma_tables_from_lli(
+ struct sep_lli_entry_t *lli_in_array,
+ u32 sep_in_lli_entries,
+ struct sep_lli_entry_t *lli_out_array,
+ u32 sep_out_lli_entries,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr)
+{
+ /* points to the area where next lli table can be allocated */
+ u32 lli_table_alloc_addr;
+
+ /* input lli table */
+ struct sep_lli_entry_t *in_lli_table_ptr;
+
+ /* output lli table */
+ struct sep_lli_entry_t *out_lli_table_ptr;
+
+ /* pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry_t *info_in_entry_ptr;
+
+ /* pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry_t *info_out_entry_ptr;
+
+ /* points to the first entry to be processed in the lli_in_array */
+ u32 current_in_entry;
+
+ /* points to the first entry to be processed in the lli_out_array */
+ u32 current_out_entry;
+
+ /* max size of the input table */
+ u32 in_table_data_size;
+
+ /* max size of the output table */
+ u32 out_table_data_size;
+
+ /* flag te signifies if this is the first tables build from the arrays */
+ u32 first_table_flag;
+
+ /* the data size that should be in table */
+ u32 table_data_size;
+
+ /* number of etnries in the input table */
+ u32 num_entries_in_table;
+
+ /* number of etnries in the output table */
+ u32 num_entries_out_table;
+
+ /*---------------------
+ CODE
+ ------------------------*/
+
+ dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
+
+ /* initiate to point after the message area */
+ lli_table_alloc_addr = (u32)(sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES +
+ (sep_context.num_lli_tables_created *
+ (sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
+
+ current_in_entry = 0;
+ current_out_entry = 0;
+ first_table_flag = 1;
+ info_in_entry_ptr = 0;
+ info_out_entry_ptr = 0;
+
+ /* loop till all the entries in in array are not processed */
+ while (current_in_entry < sep_in_lli_entries) {
+ /* set the new input and output tables */
+ in_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* set the first output tables */
+ out_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
+
+ /* update the number of the lli tables created */
+ sep_context.num_lli_tables_created += 2;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* calculate the maximum size of data for input table */
+ in_table_data_size =
+ sep_calculate_lli_table_max_size(
+ &lli_in_array[current_in_entry],
+ (sep_in_lli_entries - current_in_entry));
+
+ /* calculate the maximum size of data for output table */
+ out_table_data_size =
+ sep_calculate_lli_table_max_size(
+ &lli_out_array[current_out_entry],
+ (sep_out_lli_entries - current_out_entry));
+
+ edbg("SEP Driver:in_table_data_size is %x\n", in_table_data_size);
+ edbg("SEP Driver:out_table_data_size is %x\n", out_table_data_size);
+
+ /* check where the data is smallest */
+ table_data_size = in_table_data_size;
+ if (table_data_size > out_table_data_size)
+ table_data_size = out_table_data_size;
+
+ /* now calculate the table size so that it will be module block size */
+ table_data_size = (table_data_size / block_size) * block_size;
+
+ edbg("SEP Driver:table_data_size is %x\n", table_data_size);
+
+ /* construct input lli table */
+ sep_build_lli_table(&lli_in_array[current_in_entry],
+ in_lli_table_ptr,
+ &current_in_entry,
+ &num_entries_in_table,
+ table_data_size);
+
+ /* construct output lli table */
+ sep_build_lli_table(&lli_out_array[current_out_entry],
+ out_lli_table_ptr,
+ &current_out_entry,
+ &num_entries_out_table,
+ table_data_size);
+
+ /* if info entry is null - this is the first table built */
+ if (info_in_entry_ptr == 0) {
+ /* set the output parameters to physical addresses */
+ *lli_table_in_ptr =
+ sep_shared_area_virt_to_bus(&sep_context, in_lli_table_ptr);
+
+ *in_num_entries_ptr = num_entries_in_table;
+
+ *lli_table_out_ptr =
+ sep_shared_area_virt_to_bus(&sep_context , out_lli_table_ptr);
+
+ *out_num_entries_ptr = num_entries_out_table;
+ *table_data_size_ptr = table_data_size;
+
+ edbg("SEP Driver:output lli_table_in_ptr is %08x\n",
+ (u32)*lli_table_in_ptr);
+ edbg("SEP Driver:output lli_table_out_ptr is %08x\n",
+ (u32)*lli_table_out_ptr);
+ } else {
+ /* update the info entry of the previous in table */
+ info_in_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(&sep_context,
+ in_lli_table_ptr);
+ info_in_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) | (table_data_size);
+
+ /* update the info entry of the previous in table */
+ info_out_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(&sep_context,
+ out_lli_table_ptr);
+ info_out_entry_ptr->block_size =
+ ((num_entries_out_table) << 24) | (table_data_size);
+ }
+
+ /* save the pointer to the info entry of the current tables */
+ info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+ info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
+
+ edbg("SEP Driver:output num_entries_out_table is %x\n",
+ (u32)num_entries_out_table);
+ edbg("SEP Driver:output info_in_entry_ptr is %x\n",
+ (u32)info_in_entry_ptr);
+ edbg("SEP Driver:output info_out_entry_ptr is %x\n",
+ (u32)info_out_entry_ptr);
+ }
+
+ /* print input tables */
+ sep_debug_print_lli_tables(
+ (struct sep_lli_entry_t *)
+ sep_shared_area_bus_to_virt(&sep_context , *lli_table_in_ptr),
+ *in_num_entries_ptr,
+ *table_data_size_ptr);
+
+ /* print output tables */
+ sep_debug_print_lli_tables(
+ (struct sep_lli_entry_t *)
+ sep_shared_area_bus_to_virt(&sep_context , *lli_table_out_ptr),
+ *out_num_entries_ptr,
+ *table_data_size_ptr);
+
+ dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
+
+ return 0;
+}
+
+
+/*
+ This function locks all the physical pages of the kernel virtual buffer
+ and construct a basic lli array, where each entry holds the physical
+ page address and the size that application data holds in this physical pages
+*/
+int sep_lock_kernel_pages(u32 kernel_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry_t **lli_array_ptr,
+ int in_out_flag)
+
+{
+ /* error */
+ int error;
+
+ /* array of lli */
+ struct sep_lli_entry_t *lli_array;
+
+ /* map array */
+ struct sep_dma_map *map_array;
+
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
+
+ error = 0;
+
+ edbg("SEP Driver: kernel_virt_addr is %08x\n", kernel_virt_addr);
+ edbg("SEP Driver: data_size is %x\n", data_size);
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * 1, GFP_ATOMIC);
+ if (!lli_array) {
+ edbg("SEP Driver: kmalloc for lli_array failed\n");
+
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ map_array = kmalloc(sizeof(struct sep_dma_map) * 1 , GFP_ATOMIC);
+ if (!map_array) {
+ edbg("SEP Driver: kmalloc for map_array failed\n");
+ error = -ENOMEM;
+ goto end_function_with_error;
+ }
+
+ map_array[0].dma_addr =
+ dma_map_single(sep_context.dev_ptr ,
+ (void *)kernel_virt_addr,
+ data_size,
+ DMA_BIDIRECTIONAL);
+ map_array[0].size = data_size;
+
+
+ /* set the start address of the first page - app data may start not at
+ the beginning of the page */
+ lli_array[0].bus_address = (u32)map_array[0].dma_addr;
+ lli_array[0].block_size = map_array[0].size;
+
+ /* debug print */
+ edbg("lli_array[0].bus_address is %08x, \
+ lli_array[0].block_size is %x\n",
+ lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+ /* set the output parameters */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ sep_context.in_num_pages = 1;
+ sep_context.in_page_array = 0;
+ sep_context.in_map_array = map_array;
+ sep_context.in_map_num_entries = 1;
+ } else {
+ *lli_array_ptr = lli_array;
+ sep_context.out_num_pages = 1;
+ sep_context.out_page_array = 0;
+ sep_context.out_map_array = map_array;
+ sep_context.out_map_num_entries = 1;
+ }
+
+ goto end_function;
+
+end_function_with_error:
+
+ kfree(lli_array);
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
+
+ return 0;
+}
+
+/*
+ This function locks all the physical pages of the application virtual buffer
+ and construct a basic lli array, where each entry holds the physical page
+ address and the size that application data holds in this physical pages
+*/
+int sep_lock_user_pages(u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry_t **lli_array_ptr,
+ int in_out_flag)
+
+{
+ /* error */
+ int error;
+
+ /* the the page of the end address of the user space buffer */
+ u32 end_page;
+
+ /* the page of the start address of the user space buffer */
+ u32 start_page;
+
+ /* the range in pages */
+ u32 num_pages;
+
+ /* array of pointers ot page */
+ struct page **page_array;
+
+ /* array of lli */
+ struct sep_lli_entry_t *lli_array;
+
+ /* map array */
+ struct sep_dma_map *map_array;
+
+ /* count */
+ u32 count;
+
+ /* result */
+ int result;
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_lock_user_pages start\n");
+
+ error = 0;
+
+ /* set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ edbg("SEP Driver: app_virt_addr is %08x\n", app_virt_addr);
+ edbg("SEP Driver: data_size is %x\n", data_size);
+ edbg("SEP Driver: start_page is %x\n", start_page);
+ edbg("SEP Driver: end_page is %x\n", end_page);
+ edbg("SEP Driver: num_pages is %x\n", num_pages);
+
+ edbg("SEP Driver: starting page_array malloc\n");
+
+ /* allocate array of pages structure pointers */
+ page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
+ if (!page_array) {
+ edbg("SEP Driver: kmalloc for page_array failed\n");
+
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
+ if (!map_array) {
+ edbg("SEP Driver: kmalloc for map_array failed\n");
+ error = -ENOMEM;
+ goto end_function_with_error1;
+ }
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages,
+ GFP_ATOMIC);
+ if (!lli_array) {
+ edbg("SEP Driver: kmalloc for lli_array failed\n");
+
+ error = -ENOMEM;
+ goto end_function_with_error2;
+ }
+
+ edbg("SEP Driver: starting get_user_pages\n");
+
+ /* convert the application virtual address into a set of physical */
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current,
+ current->mm,
+ app_virt_addr,
+ num_pages,
+ 1,
+ 0,
+ page_array,
+ 0);
+ up_read(&current->mm->mmap_sem);
+
+ /* check the number of pages locked - if not all then exit with error */
+ if (result != num_pages) {
+ edbg("SEP Driver: not all pages locked by get_user_pages\n");
+
+ error = -ENOMEM;
+ goto end_function_with_error3;
+ }
+
+ edbg("SEP Driver: get_user_pages succeeded\n");
+
+ /* fill the array using page array data and map the pages - this action
+ will also flush the cache as needed */
+ for (count = 0; count < num_pages; count++) {
+ /* fill the map array */
+ map_array[count].dma_addr =
+ dma_map_page(sep_context.dev_ptr, page_array[count],
+ 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ map_array[count].size = PAGE_SIZE;
+
+ /* fill the lli array entry */
+ lli_array[count].bus_address = (u32)map_array[count].dma_addr;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ edbg("lli_array[%x].bus_address is %08x, \
+ lli_array[%x].block_size is %x\n",
+ count, lli_array[count].bus_address,
+ count,
+ lli_array[count].block_size);
+ }
+
+ /* check the offset for the first page -
+ data may start not at the beginning of the page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+ /* check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ edbg("lli_array[0].bus_address is %08x, \
+ lli_array[0].block_size is %x\n",
+ lli_array[count].bus_address,
+ lli_array[count].block_size);
+
+ /* check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+ edbg("lli_array[%x].bus_address is %08x, \
+ lli_array[%x].block_size is %x\n",
+ num_pages - 1, lli_array[count].bus_address,
+ num_pages - 1,
+ lli_array[count].block_size);
+ }
+
+ /* set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ sep_context.in_num_pages = num_pages;
+ sep_context.in_page_array = page_array;
+ sep_context.in_map_array = map_array;
+ sep_context.in_map_num_entries = num_pages;
+ } else {
+ *lli_array_ptr = lli_array;
+ sep_context.out_num_pages = num_pages;
+ sep_context.out_page_array = page_array;
+ sep_context.out_map_array = map_array;
+ sep_context.out_map_num_entries = num_pages;
+ }
+
+ goto end_function;
+
+end_function_with_error3:
+
+ /* free lli array */
+ kfree(lli_array);
+
+end_function_with_error2:
+
+ kfree(map_array);
+
+end_function_with_error1:
+
+ /* free page array */
+ kfree(page_array);
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
+
+ return 0;
+}
+
+/*
+ This function prepares only input DMA table for synhronic symmetric
+ operations (HASH)
+*/
+int sep_prepare_input_dma_table(unsigned long app_virt_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool isKernelVirtualAddress)
+
+{
+ /* pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry_t *info_entry_ptr;
+
+ /* array of pointers ot page */
+ struct sep_lli_entry_t *lli_array_ptr;
+
+ /* points to the first entry to be processed in the lli_in_array */
+ u32 current_entry;
+
+ /* num entries in the virtual buffer */
+ u32 sep_lli_entries;
+
+ /* lli table pointer */
+ struct sep_lli_entry_t *in_lli_table_ptr;
+
+ /* the total data in one table */
+ u32 table_data_size;
+
+ /* number of entries in lli table */
+ u32 num_entries_in_table;
+
+ /* next table address */
+ u32 lli_table_alloc_addr;
+
+ /* result */
+ u32 result;
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
+
+ edbg("SEP Driver:data_size is %x\n", data_size);
+ edbg("SEP Driver:block_size is %x\n", block_size);
+
+ /* initialize the pages pointers */
+ sep_context.in_page_array = 0;
+ sep_context.in_num_pages = 0;
+
+ /* set the kernel address for first table to be allocated */
+ lli_table_alloc_addr = (u32)(sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES +
+ sep_context.num_lli_tables_created *
+ sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (data_size == 0) {
+
+ /* special case - create meptu table - 2 entries, zero data */
+ sep_prepare_empty_lli_table(lli_table_ptr,
+ num_entries_ptr,
+ table_data_size_ptr);
+
+ goto end_function;
+ }
+
+ /* check if the pages are in Kernel Virtual Address layout */
+ if (isKernelVirtualAddress == true)
+ /* lock the pages of the kernel buffer and translate them to pages */
+ result = sep_lock_kernel_pages(app_virt_addr,
+ data_size,
+ &lli_array_ptr,
+ SEP_DRIVER_IN_FLAG);
+ else
+ /* lock the pages of the user buffer and translate them to pages */
+ result = sep_lock_user_pages(app_virt_addr,
+ data_size,
+ &lli_array_ptr,
+ SEP_DRIVER_IN_FLAG);
+
+ if (result)
+ return result;
+
+ edbg("SEP Driver:output sep_in_num_pages is %x\n",
+ sep_context.in_num_pages);
+
+ current_entry = 0;
+ info_entry_ptr = 0;
+ sep_lli_entries = sep_context.in_num_pages;
+
+ /* loop till all the entries in in array are not processed */
+ while (current_entry < sep_lli_entries) {
+ /* set the new input and output tables */
+ in_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* update the number of created tables */
+ sep_context.num_lli_tables_created++;
+
+ /* calculate the maximum size of data for input table */
+ table_data_size = sep_calculate_lli_table_max_size(
+ &lli_array_ptr[current_entry],
+ (sep_lli_entries - current_entry));
+
+ /* now calculate the table size so that it will be module block size */
+ table_data_size = (table_data_size / block_size) * block_size;
+
+ edbg("SEP Driver:output table_data_size is %x\n", table_data_size);
+
+ /* construct input lli table */
+ sep_build_lli_table(&lli_array_ptr[current_entry],
+ in_lli_table_ptr,
+ &current_entry,
+ &num_entries_in_table,
+ table_data_size);
+
+ if (info_entry_ptr == 0) {
+ /* set the output parameters to physical addresses */
+ *lli_table_ptr = sep_shared_area_virt_to_bus(&sep_context,
+ in_lli_table_ptr);
+ *num_entries_ptr = num_entries_in_table;
+ *table_data_size_ptr = table_data_size;
+
+ edbg("SEP Driver:output lli_table_in_ptr is %08x\n",
+ (u32)*lli_table_ptr);
+ } else {
+ /* update the info entry of the previous in table */
+ info_entry_ptr->bus_address = sep_shared_area_virt_to_bus(&sep_context,
+ in_lli_table_ptr);
+ info_entry_ptr->block_size = ((num_entries_in_table) << 24) |
+ (table_data_size);
+ }
+
+ /* save the pointer to the info entry of the current tables */
+ info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+ }
+
+ /* print input tables */
+ sep_debug_print_lli_tables((struct sep_lli_entry_t *)
+ sep_shared_area_bus_to_virt(&sep_context, *lli_table_ptr),
+ *num_entries_ptr,
+ *table_data_size_ptr);
+
+ /* the array of the pages */
+ kfree(lli_array_ptr);
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
+
+ return 0;
+
+}
+
+/*
+ This function builds input and output DMA tables for synhronic
+ symmetric operations (AES, DES, HASH). It also checks that each table
+ is of the modular block size
+*/
+int sep_prepare_input_output_dma_table(unsigned long app_virt_in_addr,
+ unsigned long app_virt_out_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool isKernelVirtualAddress)
+
+{
+ /* array of pointers of page */
+ struct sep_lli_entry_t *lli_in_array;
+
+ /* array of pointers of page */
+ struct sep_lli_entry_t *lli_out_array;
+
+ /* result */
+ int result;
+
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
+
+ result = 0;
+
+ if (data_size == 0) {
+ /* prepare empty table for input and output */
+ sep_prepare_empty_lli_table(lli_table_in_ptr,
+ in_num_entries_ptr,
+ table_data_size_ptr);
+
+
+ sep_prepare_empty_lli_table(lli_table_out_ptr,
+ out_num_entries_ptr,
+ table_data_size_ptr);
+
+ goto end_function;
+ }
+
+ /* initialize the pages pointers */
+ sep_context.in_page_array = 0;
+ sep_context.out_page_array = 0;
+
+ /* check if the pages are in Kernel Virtual Address layout */
+ if (isKernelVirtualAddress == true) {
+ /* lock the pages of the kernel buffer and
+ translate them to pages */
+ result = sep_lock_kernel_pages(app_virt_in_addr,
+ data_size,
+ &lli_in_array,
+ SEP_DRIVER_IN_FLAG);
+ if (result) {
+ edbg("SEP Driver: sep_lock_kernel_pages for input \
+ virtual buffer failed\n");
+ goto end_function;
+ }
+ } else {
+ /* lock the pages of the user buffer and
+ translate them to pages */
+ result = sep_lock_user_pages(app_virt_in_addr,
+ data_size,
+ &lli_in_array,
+ SEP_DRIVER_IN_FLAG);
+ if (result) {
+ edbg("SEP Driver: sep_lock_user_pages for input virtual\
+ buffer failed\n");
+ goto end_function;
+ }
+ }
+
+ if (isKernelVirtualAddress == true) {
+ result = sep_lock_kernel_pages(app_virt_out_addr,
+ data_size,
+ &lli_out_array,
+ SEP_DRIVER_OUT_FLAG);
+ if (result) {
+ edbg("SEP Driver: sep_lock_kernel_pages \
+ for output virtual buffer failed\n");
+ goto end_function_with_error1;
+ }
+ } else {
+ result = sep_lock_user_pages(app_virt_out_addr,
+ data_size,
+ &lli_out_array,
+ SEP_DRIVER_OUT_FLAG);
+ if (result) {
+ edbg("SEP Driver: sep_lock_user_pages \
+ for output virtual buffer failed\n");
+ goto end_function_with_error1;
+ }
+ }
+
+
+ edbg("sep_in_num_pages is %x\n", sep_context.in_num_pages);
+ edbg("sep_out_num_pages is %x\n", sep_context.out_num_pages);
+ edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+
+ /* call the fucntion that creates table from the lli arrays */
+ result = sep_construct_dma_tables_from_lli(lli_in_array,
+ sep_context.in_num_pages,
+ lli_out_array,
+ sep_context.out_num_pages,
+ block_size,
+ lli_table_in_ptr,
+ lli_table_out_ptr,
+ in_num_entries_ptr,
+ out_num_entries_ptr,
+ table_data_size_ptr);
+ if (result) {
+ edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
+ goto end_function_with_error2;
+ }
+
+ /* fall through - free the lli entry arrays */
+
+ edbg("in_num_entries_ptr is %08x\n", *in_num_entries_ptr);
+ edbg("out_num_entries_ptr is %08x\n", *out_num_entries_ptr);
+ edbg("table_data_size_ptr is %08x\n", *table_data_size_ptr);
+
+
+end_function_with_error2:
+
+ kfree(lli_out_array);
+
+end_function_with_error1:
+
+ kfree(lli_in_array);
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table\
+ end result = %d\n", (int)result);
+
+ return result;
+
+}
+
+
+/* this function frees all preallocated dma resources */
+int sep_free_dma_table_data_handler(void)
+{
+ int count;
+
+ /*-------------------------
+ CODE
+ -----------------------------*/
+
+ dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
+
+ /* free input map array */
+ if (sep_context.in_map_array) {
+ for (count = 0; count < sep_context.in_num_pages; count++) {
+ dma_unmap_page(sep_context.dev_ptr ,
+ sep_context.in_map_array[count].dma_addr,
+ sep_context.in_map_array[count].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ kfree(sep_context.in_map_array);
+ }
+
+ /* free output map array */
+ if (sep_context.out_map_array) {
+ for (count = 0; count < sep_context.out_num_pages; count++) {
+ dma_unmap_page(sep_context.dev_ptr ,
+ sep_context.out_map_array[count].dma_addr,
+ sep_context.out_map_array[count].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ kfree(sep_context.out_map_array);
+ }
+
+ /* free page cache for output */
+ if (sep_context.in_page_array) {
+ for (count = 0; count < sep_context.in_num_pages; count++)
+ page_cache_release(sep_context.in_page_array[count]);
+
+ kfree(sep_context.in_page_array);
+
+ }
+
+ if (sep_context.out_page_array) {
+ for (count = 0; count < sep_context.out_num_pages; count++) {
+ if (!PageReserved(sep_context.out_page_array[count]))
+ SetPageDirty(sep_context.out_page_array[count]);
+ page_cache_release(sep_context.out_page_array[count]);
+ }
+
+ kfree(sep_context.out_page_array);
+ }
+
+ /* reset all the values */
+ sep_context.in_page_array = 0;
+ sep_context.out_page_array = 0;
+ sep_context.in_num_pages = 0;
+ sep_context.out_num_pages = 0;
+ sep_context.num_lli_tables_created = 0;
+ sep_context.in_map_array = 0;
+ sep_context.out_map_array = 0;
+ sep_context.in_map_num_entries = 0;
+ sep_context.out_map_num_entries = 0;
+
+
+ dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
+
+ return 0;
+}
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+MODULE_LICENSE("GPL");
--
1.6.0.4From 14e6bd039387b507d67155cd5d7d0e2a78166987 Mon Sep 17 00:00:00 2001
From: Mark Allyn <mark.a.allyn@intel.com>
Date: Wed, 28 Apr 2010 14:39:30 -0700
Subject: RFC [PATCH 3/4] staging: sep: update program files for SEP December 2009 release

Signed-off-by: Mark A. Allyn <mark.a.allyn@intel.com>
---
drivers/staging/sep/sep_driver.c | 2715 --------------------
drivers/staging/sep/sep_ext_with_pci_driver.c | 548 ++++
drivers/staging/sep/sep_main_mod.c | 3330 +++++++++++++++++++++++++
3 files changed, 3878 insertions(+), 2715 deletions(-)
delete mode 100644 drivers/staging/sep/sep_driver.c
create mode 100644 drivers/staging/sep/sep_ext_with_pci_driver.c
create mode 100644 drivers/staging/sep/sep_main_mod.c

diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index 8888073..0000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2715 +0,0 @@
-/*
- *
- * sep_driver.c - Security Processor Driver main group of functions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <asm/ioctl.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-#if SEP_DRIVER_ARM_DEBUG_MODE
-
-#define CRYS_SEP_ROM_length 0x4000
-#define CRYS_SEP_ROM_start_address 0x8000C000UL
-#define CRYS_SEP_ROM_start_address_offset 0xC000UL
-#define SEP_ROM_BANK_register 0x80008420UL
-#define SEP_ROM_BANK_register_offset 0x8420UL
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
-
-/*
- * THESE 2 definitions are specific to the board - must be
- * defined during integration
- */
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
-
-/* 2M size */
-
-static void sep_load_rom_code(struct sep_device *sep)
-{
- /* Index variables */
- unsigned long i, k, j;
- u32 reg;
- u32 error;
- u32 warning;
-
- /* Loading ROM from SEP_ROM_image.h file */
- k = sizeof(CRYS_SEP_ROM);
-
- edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
-
- edbg("SEP Driver: k is %lu\n", k);
- edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
- edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
-
- for (i = 0; i < 4; i++) {
- /* write bank */
- sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
-
- for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
- sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
-
- k = k - 4;
-
- if (k == 0) {
- j = CRYS_SEP_ROM_length;
- i = 4;
- }
- }
- }
-
- /* reset the SEP */
- sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
-
- /* poll for SEP ROM boot finish */
- do
- reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg);
-
- edbg("SEP Driver: ROM polling ended\n");
-
- switch (reg) {
- case 0x1:
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver: ROM polling case 1\n");
- break;
- case 0x4:
- /* Cold boot ended successfully */
- case 0x8:
- /* Warmboot ended successfully */
- case 0x10:
- /* ColdWarm boot ended successfully */
- error = 0;
- case 0x2:
- /* Boot First Phase ended */
- warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- case 0x20:
- edbg("SEP Driver: ROM polling case %d\n", reg);
- break;
- }
-
-}
-
-#else
-static void sep_load_rom_code(struct sep_device *sep) { }
-#endif /* SEP_DRIVER_ARM_DEBUG_MODE */
-
-
-
-/*----------------------------------------
- DEFINES
------------------------------------------*/
-
-#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
- GLOBAL variables
---------------------------------------------*/
-
-/* debug messages level */
-static int debug;
-module_param(debug, int , 0);
-MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device sep_instance;
-static struct sep_device *sep_dev = &sep_instance;
-
-/*
- mutex for the access to the internals of the sep driver
-*/
-static DEFINE_MUTEX(sep_mutex);
-
-
-/* wait queue head (event) of the driver */
-static DECLARE_WAIT_QUEUE_HEAD(sep_event);
-
-/**
- * sep_load_firmware - copy firmware cache/resident
- * @sep: device we are loading
- *
- * This functions copies the cache and resident from their source
- * location into destination shared memory.
- */
-
-static int sep_load_firmware(struct sep_device *sep)
-{
- const struct firmware *fw;
- char *cache_name = "sep/cache.image.bin";
- char *res_name = "sep/resident.image.bin";
- int error;
-
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-
- /* load cache */
- error = request_firmware(&fw, cache_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request cache fw\n");
- return error;
- }
- edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
-
- memcpy(sep->rar_addr, (void *)fw->data, fw->size);
- sep->cache_size = fw->size;
- release_firmware(fw);
-
- sep->resident_bus = sep->rar_bus + sep->cache_size;
- sep->resident_addr = sep->rar_addr + sep->cache_size;
-
- /* load resident */
- error = request_firmware(&fw, res_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request res fw\n");
- return error;
- }
- edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
-
- memcpy(sep->resident_addr, (void *) fw->data, fw->size);
- sep->resident_size = fw->size;
- release_firmware(fw);
-
- edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
- sep->resident_addr, (unsigned long long)sep->resident_bus,
- sep->rar_addr, (unsigned long long)sep->rar_bus);
- return 0;
-}
-
-MODULE_FIRMWARE("sep/cache.image.bin");
-MODULE_FIRMWARE("sep/resident.image.bin");
-
-/**
- * sep_map_and_alloc_shared_area - allocate shared block
- * @sep: security processor
- * @size: size of shared area
- *
- * Allocate a shared buffer in host memory that can be used by both the
- * kernel and also the hardware interface via DMA.
- */
-
-static int sep_map_and_alloc_shared_area(struct sep_device *sep,
- unsigned long size)
-{
- /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
- sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
- &sep->shared_bus, GFP_KERNEL);
-
- if (!sep->shared_addr) {
- edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
- return -ENOMEM;
- }
- /* set the bus address of the shared area */
- edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
- size, sep->shared_addr, (unsigned long long)sep->shared_bus);
- return 0;
-}
-
-/**
- * sep_unmap_and_free_shared_area - free shared block
- * @sep: security processor
- *
- * Free the shared area allocated to the security processor. The
- * processor must have finished with this and any final posted
- * writes cleared before we do so.
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
-{
- dma_free_coherent(&sep->pdev->dev, size,
- sep->shared_addr, sep->shared_bus);
-}
-
-/**
- * sep_shared_virt_to_bus - convert bus/virt addresses
- *
- * Returns the bus address inside the shared area according
- * to the virtual address.
- */
-
-static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
- void *virt_address)
-{
- dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
- edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
- virt_address);
- return pa;
-}
-
-/**
- * sep_shared_bus_to_virt - convert bus/virt addresses
- *
- * Returns virtual address inside the shared area according
- * to the bus address.
- */
-
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-
-/**
- * sep_try_open - attempt to open a SEP device
- * @sep: device to attempt to open
- *
- * Atomically attempt to get ownership of a SEP device.
- * Returns 1 if the device was opened, 0 on failure.
- */
-
-static int sep_try_open(struct sep_device *sep)
-{
- if (!test_and_set_bit(0, &sep->in_use))
- return 1;
- return 0;
-}
-
-/**
- * sep_open - device open method
- * @inode: inode of sep device
- * @filp: file handle to sep device
- *
- * Open method for the SEP device. Called when userspace opens
- * the SEP device node. Must also release the memory data pool
- * allocations.
- *
- * Returns zero on success otherwise an error code.
- */
-
-static int sep_open(struct inode *inode, struct file *filp)
-{
- if (sep_dev == NULL)
- return -ENODEV;
-
- /* check the blocking mode */
- if (filp->f_flags & O_NDELAY) {
- if (sep_try_open(sep_dev) == 0)
- return -EAGAIN;
- } else
- if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
- return -EINTR;
-
- /* Bind to the device, we only have one which makes it easy */
- filp->private_data = sep_dev;
- /* release data pool allocations */
- sep_dev->data_pool_bytes_allocated = 0;
- return 0;
-}
-
-
-/**
- * sep_release - close a SEP device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device. As the open protects against
- * multiple simultaenous opens that means this method is called when the
- * final reference to the open handle is dropped.
- */
-
-static int sep_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-#endif
- /* Ensure any blocked open progresses */
- clear_bit(0, &sep->in_use);
- wake_up(&sep_event);
- return 0;
-}
-
-/*---------------------------------------------------------------
- map function - this functions maps the message shared area
------------------------------------------------------------------*/
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- dma_addr_t bus_addr;
- struct sep_device *sep = filp->private_data;
-
- dbg("-------->SEP Driver: mmap start\n");
-
- /* check that the size of the mapped range is as the size of the message
- shared area */
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- edbg("SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
- return -EAGAIN;
- }
-
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- /* get bus address */
- bus_addr = sep->shared_bus;
-
- edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
-
- if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- edbg("SEP Driver remap_page_range failed\n");
- printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
- return -EAGAIN;
- }
-
- dbg("SEP Driver:<-------- mmap end\n");
-
- return 0;
-}
-
-
-/*-----------------------------------------------
- poll function
-*----------------------------------------------*/
-static unsigned int sep_poll(struct file *filp, poll_table * wait)
-{
- unsigned long count;
- unsigned int mask = 0;
- unsigned long retval = 0; /* flow id */
- struct sep_device *sep = filp->private_data;
-
- dbg("---------->SEP Driver poll: start\n");
-
-
-#if SEP_DRIVER_POLLING_MODE
-
- while (sep->send_ct != (retval & 0x7FFFFFFF)) {
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
- }
-
- sep->reply_ct++;
-#else
- /* add the event to the polling wait table */
- poll_wait(filp, &sep_event, wait);
-
-#endif
-
- edbg("sep->send_ct is %lu\n", sep->send_ct);
- edbg("sep->reply_ct is %lu\n", sep->reply_ct);
-
- /* check if the data is ready */
- if (sep->send_ct == sep->reply_ct) {
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
-
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- edbg("retval is %lu\n", retval);
- /* check if the this is sep reply or request */
- if (retval >> 31) {
- edbg("SEP Driver: sep request in\n");
- /* request */
- mask |= POLLOUT | POLLWRNORM;
- } else {
- edbg("SEP Driver: sep reply in\n");
- mask |= POLLIN | POLLRDNORM;
- }
- }
- dbg("SEP Driver:<-------- poll exit\n");
- return mask;
-}
-
-/**
- * sep_time_address - address in SEP memory of time
- * @sep: SEP device we want the address from
- *
- * Return the address of the two dwords in memory used for time
- * setting.
- */
-
-static u32 *sep_time_address(struct sep_device *sep)
-{
- return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- * sep_set_time - set the SEP time
- * @sep: the SEP we are setting the time for
- *
- * Calculates time and sets it at the predefined address.
- * Called with the sep mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
- struct timeval time;
- u32 *time_addr; /* address of time as seen by the kernel */
-
-
- dbg("sep:sep_set_time start\n");
-
- do_gettimeofday(&time);
-
- /* set value in the SYSTEM MEMORY offset */
- time_addr = sep_time_address(sep);
-
- time_addr[0] = SEP_TIME_VAL_TOKEN;
- time_addr[1] = time.tv_sec;
-
- edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
- edbg("SEP Driver:time_addr is %p\n", time_addr);
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- return time.tv_sec;
-}
-
-/**
- * sep_dump_message - dump the message that is pending
- * @sep: sep device
- *
- * Dump out the message pending in the shared message area
- */
-
-static void sep_dump_message(struct sep_device *sep)
-{
- int count;
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
-}
-
-/**
- * sep_send_command_handler - kick off a command
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_command_handler start\n");
-
- mutex_lock(&sep_mutex);
- sep_set_time(sep);
-
- /* FIXME: flush cache */
- flush_cache_all();
-
- sep_dump_message(sep);
- /* update counter */
- sep->send_ct++;
- /* send interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
- dbg("SEP Driver:<-------- sep_send_command_handler end\n");
- mutex_unlock(&sep_mutex);
- return;
-}
-
-/**
- * sep_send_reply_command_handler - kick off a command reply
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_reply_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_reply_command_handler start\n");
-
- /* flash cache */
- flush_cache_all();
-
- sep_dump_message(sep);
-
- mutex_lock(&sep_mutex);
- sep->send_ct++; /* update counter */
- /* send the interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
- /* update both counters */
- sep->send_ct++;
- sep->reply_ct++;
- mutex_unlock(&sep_mutex);
- dbg("sep: sep_send_reply_command_handler end\n");
-}
-
-/*
- This function handles the allocate data pool memory request
- This function returns calculates the bus address of the
- allocated memory, and the offset of this area from the mapped address.
- Therefore, the FVOs in user space can calculate the exact virtual
- address of this allocated memory
-*/
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- struct sep_driver_alloc_t command_args;
-
- dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
- if (error)
- goto end_function;
-
- /* allocate memory */
- if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the virtual and bus address */
- command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
- command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-
- /* write the memory back to the user space */
- error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
- if (error)
- goto end_function;
-
- /* set the allocation */
- sep->data_pool_bytes_allocated += command_args.num_bytes;
-
-end_function:
- dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
- return error;
-}
-
-/*
- This function handles write into allocated data pool command
-*/
-static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- void *virt_address;
- unsigned long va;
- unsigned long app_in_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
- error = -EINVAL;
- goto end_function;
- }
- /* copy the application data */
- error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
-end_function:
- dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
- return error;
-}
-
-/*
- this function handles the read from data pool command
-*/
-static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- /* virtual address of dest application buffer */
- unsigned long app_out_address;
- /* virtual address of the data pool */
- unsigned long va;
- void *virt_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
- /* FIXME: These are incomplete all over the driver: what about + len
- and when doing that also overflows */
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -EINVAL;
- goto end_function;
- }
-
- /* copy the application data */
- error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
-end_function:
- dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
- return error;
-}
-
-/*
- This function releases all the application virtual buffer physical pages,
- that were previously locked
-*/
-static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
-{
- unsigned long count;
-
- if (dirtyFlag) {
- for (count = 0; count < num_pages; count++) {
- /* the out array was written, therefore the data was changed */
- if (!PageReserved(page_array_ptr[count]))
- SetPageDirty(page_array_ptr[count]);
- page_cache_release(page_array_ptr[count]);
- }
- } else {
- /* free in pages - the data was only read, therefore no update was done
- on those pages */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array_ptr[count]);
- }
-
- if (page_array_ptr)
- /* free the array */
- kfree(page_array_ptr);
-
- return 0;
-}
-
-/*
- This function locks all the physical pages of the kernel virtual buffer
- and construct a basic lli array, where each entry holds the physical
- page address and the size that application data holds in this physical pages
-*/
-static int sep_lock_kernel_pages(struct sep_device *sep,
- unsigned long kernel_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct sep_lli_entry_t *lli_array;
- /* next kernel address to map */
- unsigned long next_kernel_address;
- unsigned long count;
-
- dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = kernel_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lx\n", start_page);
- edbg("SEP Driver: end_page is %lx\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* advance the address to the start of the next page */
- next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- next_kernel_address += PAGE_SIZE;
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-
- /* set the size of the last page */
- lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = 0;
-end_function:
- dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
- return 0;
-}
-
-/*
- This function locks all the physical pages of the application virtual buffer
- and construct a basic lli array, where each entry holds the physical page
- address and the size that application data holds in this physical pages
-*/
-static int sep_lock_user_pages(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct page **page_array;
- struct sep_lli_entry_t *lli_array;
- unsigned long count;
- int result;
-
- dbg("SEP Driver:--------> sep_lock_user_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = app_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lu\n", start_page);
- edbg("SEP Driver: end_page is %lu\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- /* allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
- if (!page_array) {
- edbg("SEP Driver: kmalloc for page_array failed\n");
-
- error = -ENOMEM;
- goto end_function;
- }
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
-
- error = -ENOMEM;
- goto end_function_with_error1;
- }
-
- /* convert the application virtual address into a set of physical */
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
- up_read(&current->mm->mmap_sem);
-
- /* check the number of pages locked - if not all then exit with error */
- if (result != num_pages) {
- dbg("SEP Driver: not all pages locked by get_user_pages\n");
-
- error = -ENOMEM;
- goto end_function_with_error2;
- }
-
- /* flush the cache */
- for (count = 0; count < num_pages; count++)
- flush_dcache_page(page_array[count]);
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-
- /* set the size of the last page */
- lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", app_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
- count, lli_array[count].physical_address,
- count, lli_array[count].block_size);
- }
-
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = page_array;
- goto end_function;
-
-end_function_with_error2:
- /* release the cache */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array[count]);
- kfree(lli_array);
-end_function_with_error1:
- kfree(page_array);
-end_function:
- dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
- return 0;
-}
-
-
-/*
- this function calculates the size of data that can be inserted into the lli
- table from this array the condition is that either the table is full
- (all etnries are entered), or there are no more entries in the lli array
-*/
-static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
-{
- unsigned long table_data_size = 0;
- unsigned long counter;
-
- /* calculate the data in the out lli table if till we fill the whole
- table or till the data has ended */
- for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
- table_data_size += lli_in_array_ptr[counter].block_size;
- return table_data_size;
-}
-
-/*
- this functions builds ont lli table from the lli_array according to
- the given size of data
-*/
-static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
-{
- unsigned long curr_table_data_size;
- /* counter of lli array entry */
- unsigned long array_counter;
-
- dbg("SEP Driver:--------> sep_build_lli_table start\n");
-
- /* init currrent table data size and lli array entry counter */
- curr_table_data_size = 0;
- array_counter = 0;
- *num_table_entries_ptr = 1;
-
- edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* fill the table till table size reaches the needed amount */
- while (curr_table_data_size < table_data_size) {
- /* update the number of entries in table */
- (*num_table_entries_ptr)++;
-
- lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
- lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
- curr_table_data_size += lli_table_ptr->block_size;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* check for overflow of the table data */
- if (curr_table_data_size > table_data_size) {
- edbg("SEP Driver:curr_table_data_size > table_data_size\n");
-
- /* update the size of block in the table */
- lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
-
- /* update the physical address in the lli array */
- lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
-
- /* update the block size left in the lli array */
- lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
- } else
- /* advance to the next entry in the lli_array */
- array_counter++;
-
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* move to the next entry in table */
- lli_table_ptr++;
- }
-
- /* set the info entry to default */
- lli_table_ptr->physical_address = 0xffffffff;
- lli_table_ptr->block_size = 0;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* set the output parameter */
- *num_processed_entries_ptr += array_counter;
-
- edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
- dbg("SEP Driver:<-------- sep_build_lli_table end\n");
- return;
-}
-
-/*
- this function goes over the list of the print created tables and
- prints all the data
-*/
-static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
-{
- unsigned long table_count;
- unsigned long entries_count;
-
- dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
-
- table_count = 1;
- while ((unsigned long) lli_table_ptr != 0xffffffff) {
- edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
- edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
-
- /* print entries of the table (without info entry) */
- for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
- edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
- }
-
- /* point to the info entry */
- lli_table_ptr--;
-
- edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
- edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-
-
- table_data_size = lli_table_ptr->block_size & 0xffffff;
- num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
- lli_table_ptr = (struct sep_lli_entry_t *)
- (lli_table_ptr->physical_address);
-
- edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
-
- if ((unsigned long) lli_table_ptr != 0xffffffff)
- lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
-
- table_count++;
- }
- dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
-}
-
-
-/*
- This function prepares only input DMA table for synhronic symmetric
- operations (HASH)
-*/
-static int sep_prepare_input_dma_table(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_ptr,
- unsigned long *num_entries_ptr,
- unsigned long *table_data_size_ptr,
- bool isKernelVirtualAddress)
-{
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_entry_ptr;
- /* array of pointers ot page */
- struct sep_lli_entry_t *lli_array_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_entry;
- /* num entries in the virtual buffer */
- unsigned long sep_lli_entries;
- /* lli table pointer */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* the total data in one table */
- unsigned long table_data_size;
- /* number of entries in lli table */
- unsigned long num_entries_in_table;
- /* next table address */
- void *lli_table_alloc_addr;
- unsigned long result;
-
- dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
-
- edbg("SEP Driver:data_size is %lu\n", data_size);
- edbg("SEP Driver:block_size is %lu\n", block_size);
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->in_num_pages = 0;
-
- if (data_size == 0) {
- /* special case - created 2 entries table with zero data */
- in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
- /* FIXME: Should the entry below not be for _bus */
- in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- in_lli_table_ptr->block_size = 0;
-
- in_lli_table_ptr++;
- in_lli_table_ptr->physical_address = 0xFFFFFFFF;
- in_lli_table_ptr->block_size = 0;
-
- *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- *num_entries_ptr = 2;
- *table_data_size_ptr = 0;
-
- goto end_function;
- }
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
- else
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-
- if (result)
- return result;
-
- edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
-
- current_entry = 0;
- info_entry_ptr = 0;
- sep_lli_entries = sep->in_num_pages;
-
- /* initiate to point after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* loop till all the entries in in array are not processed */
- while (current_entry < sep_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
-
- if (info_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *num_entries_ptr = num_entries_in_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
-
- /* the array of the pages */
- kfree(lli_array_ptr);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
- return 0;
-
-}
-
-/*
- This function creates the input and output dma tables for
- symmetric operations (AES/DES) according to the block size from LLI arays
-*/
-static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
- struct sep_lli_entry_t *lli_in_array,
- unsigned long sep_in_lli_entries,
- struct sep_lli_entry_t *lli_out_array,
- unsigned long sep_out_lli_entries,
- unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
-{
- /* points to the area where next lli table can be allocated: keep void *
- as there is pointer scaling to fix otherwise */
- void *lli_table_alloc_addr;
- /* input lli table */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* output lli table */
- struct sep_lli_entry_t *out_lli_table_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_in_entry_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_out_entry_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_in_entry;
- /* points to the first entry to be processed in the lli_out_array */
- unsigned long current_out_entry;
- /* max size of the input table */
- unsigned long in_table_data_size;
- /* max size of the output table */
- unsigned long out_table_data_size;
- /* flag te signifies if this is the first tables build from the arrays */
- unsigned long first_table_flag;
- /* the data size that should be in table */
- unsigned long table_data_size;
- /* number of etnries in the input table */
- unsigned long num_entries_in_table;
- /* number of etnries in the output table */
- unsigned long num_entries_out_table;
-
- dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
-
- /* initiate to pint after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- current_in_entry = 0;
- current_out_entry = 0;
- first_table_flag = 1;
- info_in_entry_ptr = 0;
- info_out_entry_ptr = 0;
-
- /* loop till all the entries in in array are not processed */
- while (current_in_entry < sep_in_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* set the first output tables */
- out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
-
- /* calculate the maximum size of data for output table */
- out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
-
- edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
- edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
-
- /* check where the data is smallest */
- table_data_size = in_table_data_size;
- if (table_data_size > out_table_data_size)
- table_data_size = out_table_data_size;
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
-
- /* construct output lli table */
- sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
-
- /* if info entry is null - this is the first table built */
- if (info_in_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *in_num_entries_ptr = num_entries_in_table;
- *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- *out_num_entries_ptr = num_entries_out_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
- edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-
- /* update the info entry of the previous in table */
- info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
-
- edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
- edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
- edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
- /* print output tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
- dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
- return 0;
-}
-
-
-/*
- This function builds input and output DMA tables for synhronic
- symmetric operations (AES, DES). It also checks that each table
- is of the modular block size
-*/
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
- unsigned long app_virt_in_addr,
- unsigned long app_virt_out_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
-{
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_in_array;
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_out_array;
- int result = 0;
-
- dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true) {
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
- goto end_function;
- }
- } else {
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
- goto end_function;
- }
- }
-
- if (isKernelVirtualAddress == true) {
- result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- } else {
- result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- }
- edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
- edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
- edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
-
- /* call the fucntion that creates table from the lli arrays */
- result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
- if (result) {
- edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
- goto end_function_with_error2;
- }
-
- /* fall through - free the lli entry arrays */
- dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
- dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
- dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
-end_function_with_error2:
- kfree(lli_out_array);
-end_function_with_error1:
- kfree(lli_in_array);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
- return result;
-
-}
-
-/*
- this function handles tha request for creation of the DMA table
- for the synchronic symmetric operations (AES,DES)
-*/
-static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- /* command arguments */
- struct sep_driver_build_sync_table_t command_args;
-
- dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
- if (error)
- goto end_function;
-
- edbg("app_in_address is %08lx\n", command_args.app_in_address);
- edbg("app_out_address is %08lx\n", command_args.app_out_address);
- edbg("data_size is %lu\n", command_args.data_in_size);
- edbg("block_size is %lu\n", command_args.block_size);
-
- /* check if we need to build only input table or input/output */
- if (command_args.app_out_address)
- /* prepare input and output tables */
- error = sep_prepare_input_output_dma_table(sep,
- command_args.app_in_address,
- command_args.app_out_address,
- command_args.data_in_size,
- command_args.block_size,
- &command_args.in_table_address,
- &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
- else
- /* prepare input tables */
- error = sep_prepare_input_dma_table(sep,
- command_args.app_in_address,
- command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-
- if (error)
- goto end_function;
- /* copy to user */
- if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for freeing dma table for synhronic actions
-*/
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
- dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
-
- /* free input pages array */
- sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
-
- /* free output pages array if needed */
- if (sep->out_page_array)
- sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
-
- /* reset all the values */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
- sep->in_num_pages = 0;
- sep->out_num_pages = 0;
- dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
- return 0;
-}
-
-/*
- this function find a space for the new flow dma table
-*/
-static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
- unsigned long **table_address_ptr)
-{
- int error = 0;
- /* pointer to the id field of the flow dma table */
- unsigned long *start_table_ptr;
- /* Do not make start_addr unsigned long * unless fixing the offset
- computations ! */
- void *flow_dma_area_start_addr;
- unsigned long *flow_dma_area_end_addr;
- /* maximum table size in words */
- unsigned long table_size_in_words;
-
- /* find the start address of the flow DMA table area */
- flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* set end address of the flow table area */
- flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
-
- /* set table size in words */
- table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
-
- /* set the pointer to the start address of DMA area */
- start_table_ptr = flow_dma_area_start_addr;
-
- /* find the space for the next table */
- while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
- start_table_ptr += table_size_in_words;
-
- /* check if we reached the end of floa tables area */
- if (start_table_ptr >= flow_dma_area_end_addr)
- error = -1;
- else
- *table_address_ptr = start_table_ptr;
-
- return error;
-}
-
-/*
- This function creates one DMA table for flow and returns its data,
- and pointer to its info entry
-*/
-static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
- unsigned long virt_buff_addr,
- unsigned long virt_buff_size,
- struct sep_lli_entry_t *table_data,
- struct sep_lli_entry_t **info_entry_ptr,
- struct sep_flow_context_t *flow_data_ptr,
- bool isKernelVirtualAddress)
-{
- int error;
- /* the range in pages */
- unsigned long lli_array_size;
- struct sep_lli_entry_t *lli_array;
- struct sep_lli_entry_t *flow_dma_table_entry_ptr;
- unsigned long *start_dma_table_ptr;
- /* total table data counter */
- unsigned long dma_table_data_count;
- /* pointer that will keep the pointer to the pages of the virtual buffer */
- struct page **page_array_ptr;
- unsigned long entry_count;
-
- /* find the space for the new table */
- error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
- if (error)
- goto end_function;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock kernel buffer in the memory */
- error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
- else
- /* lock user buffer in the memory */
- error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-
- if (error)
- goto end_function;
-
- /* set the pointer to page array at the beginning of table - this table is
- now considered taken */
- *start_dma_table_ptr = lli_array_size;
-
- /* point to the place of the pages pointers of the table */
- start_dma_table_ptr++;
-
- /* set the pages pointer */
- *start_dma_table_ptr = (unsigned long) page_array_ptr;
-
- /* set the pointer to the first entry */
- flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
-
- /* now create the entries for table */
- for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
- flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
-
- flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
-
- /* set the total data of a table */
- dma_table_data_count += lli_array[entry_count].block_size;
-
- flow_dma_table_entry_ptr++;
- }
-
- /* set the physical address */
- table_data->physical_address = virt_to_phys(start_dma_table_ptr);
-
- /* set the num_entries and total data size */
- table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
-
- /* set the info entry */
- flow_dma_table_entry_ptr->physical_address = 0xffffffff;
- flow_dma_table_entry_ptr->block_size = 0;
-
- /* set the pointer to info entry */
- *info_entry_ptr = flow_dma_table_entry_ptr;
-
- /* the array of the lli entries */
- kfree(lli_array);
-end_function:
- return error;
-}
-
-
-
-/*
- This function creates a list of tables for flow and returns the data for
- the first and last tables of the list
-*/
-static int sep_prepare_flow_dma_tables(struct sep_device *sep,
- unsigned long num_virtual_buffers,
- unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
-{
- int error;
- unsigned long virt_buff_addr;
- unsigned long virt_buff_size;
- struct sep_lli_entry_t table_data;
- struct sep_lli_entry_t *info_entry_ptr;
- struct sep_lli_entry_t *prev_info_entry_ptr;
- unsigned long i;
-
- /* init vars */
- error = 0;
- prev_info_entry_ptr = 0;
-
- /* init the first table to default */
- table_data.physical_address = 0xffffffff;
- first_table_data_ptr->physical_address = 0xffffffff;
- table_data.block_size = 0;
-
- for (i = 0; i < num_virtual_buffers; i++) {
- /* get the virtual buffer address */
- error = get_user(virt_buff_addr, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* get the virtual buffer size */
- first_buff_addr++;
- error = get_user(virt_buff_size, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* advance the address to point to the next pair of address|size */
- first_buff_addr++;
-
- /* now prepare the one flow LLI table from the data */
- error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
- if (error)
- goto end_function;
-
- if (i == 0) {
- /* if this is the first table - save it to return to the user
- application */
- *first_table_data_ptr = table_data;
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- } else {
- /* not first table - the previous table info entry should
- be updated */
- prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- }
- }
-
- /* set the last table data */
- *last_table_data_ptr = table_data;
-end_function:
- return error;
-}
-
-/*
- this function goes over all the flow tables connected to the given
- table and deallocate them
-*/
-static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
-{
- /* id pointer */
- unsigned long *table_ptr;
- /* end address of the flow dma area */
- unsigned long num_entries;
- unsigned long num_pages;
- struct page **pages_ptr;
- /* maximum table size in words */
- struct sep_lli_entry_t *info_entry_ptr;
-
- /* set the pointer to the first table */
- table_ptr = (unsigned long *) first_table_ptr->physical_address;
-
- /* set the num of entries */
- num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
- & SEP_NUM_ENTRIES_MASK;
-
- /* go over all the connected tables */
- while (*table_ptr != 0xffffffff) {
- /* get number of pages */
- num_pages = *(table_ptr - 2);
-
- /* get the pointer to the pages */
- pages_ptr = (struct page **) (*(table_ptr - 1));
-
- /* free the pages */
- sep_free_dma_pages(pages_ptr, num_pages, 1);
-
- /* goto to the info entry */
- info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
-
- table_ptr = (unsigned long *) info_entry_ptr->physical_address;
- num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
- }
-
- return;
-}
-
-/**
- * sep_find_flow_context - find a flow
- * @sep: the SEP we are working with
- * @flow_id: flow identifier
- *
- * Returns a pointer the matching flow, or NULL if the flow does not
- * exist.
- */
-
-static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
- unsigned long flow_id)
-{
- int count;
- /*
- * always search for flow with id default first - in case we
- * already started working on the flow there can be no situation
- * when 2 flows are with default flag
- */
- for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
- if (sep->flows[count].flow_id == flow_id)
- return &sep->flows[count];
- }
- return NULL;
-}
-
-
-/*
- this function handles the request to create the DMA tables for flow
-*/
-static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = -ENOENT;
- struct sep_driver_build_flow_table_t command_args;
- /* first table - output */
- struct sep_lli_entry_t first_table_data;
- /* dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the previuos DMA table */
- struct sep_lli_entry_t *prev_info_entry_ptr;
- /* pointer to the flow data strucutre */
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
-
- /* init variables */
- prev_info_entry_ptr = 0;
- first_table_data.physical_address = 0xffffffff;
-
- /* find the free structure for flow data */
- error = -EINVAL;
- flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
- if (error)
- goto end_function;
-
- /* create flow tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* check if flow is static */
- if (!command_args.flow_type)
- /* point the info entry of the last to the info entry of the first */
- last_table_data = first_table_data;
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
- if (error)
- goto end_function_with_error;
-
- /* all the flow created - update the flow entry with temp id */
- flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
-
- /* set the processing tables data in the context */
- if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
- flow_context_ptr->input_tables_in_process = first_table_data;
- else
- flow_context_ptr->output_tables_in_process = first_table_data;
-
- goto end_function;
-
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles add tables to flow
-*/
-static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- unsigned long num_entries;
- struct sep_driver_add_flow_table_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
- /* first dma table data */
- struct sep_lli_entry_t first_table_data;
- /* last dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the current DMA table */
- struct sep_lli_entry_t *info_entry_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
-
- /* get input parameters */
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
- if (error)
- goto end_function;
-
- /* find the flow structure for the flow id */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* prepare the flow dma tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* now check if there is already an existing add table for this flow */
- if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
- /* this buffer was for input buffers */
- if (flow_context_ptr->input_tables_flag) {
- /* add table already exists - add the new tables to the end
- of the previous */
- num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_input_table;
- } else {
- /* set the input flag */
- flow_context_ptr->input_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_input_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_input_table = last_table_data;
- } else { /* this is output tables */
-
- /* this buffer was for input buffers */
- if (flow_context_ptr->output_tables_flag) {
- /* add table already exists - add the new tables to
- the end of the previous */
- num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_output_table;
- } else {
- /* set the input flag */
- flow_context_ptr->output_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_output_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_output_table = last_table_data;
- }
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
- return error;
-}
-
-/*
- this function add the flow add message to the specific flow
-*/
-static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_add_message_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
- if (error)
- goto end_function;
-
- /* check input */
- if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* find the flow context */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* copy the message into context */
- flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
- error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
- return error;
-}
-
-
-/*
- this function returns the bus and virtual addresses of the static pool
-*/
-static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_static_pool_addr_t command_args;
-
- dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
-
- /*prepare the output parameters in the struct */
- command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
- command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
- edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
- dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
- return error;
-}
-
-/*
- this address gets the offset of the physical address from the start
- of the mapped area
-*/
-static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_get_mapped_offset_t command_args;
-
- dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
- if (error)
- goto end_function;
-
- if (command_args.physical_address < sep->shared_bus) {
- error = -EINVAL;
- goto end_function;
- }
-
- /*prepare the output parameters in the struct */
- command_args.offset = command_args.physical_address - sep->shared_bus;
-
- edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
-end_function:
- dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
- return error;
-}
-
-
-/*
- ?
-*/
-static int sep_start_handler(struct sep_device *sep)
-{
- unsigned long reg_val;
- unsigned long error = 0;
-
- dbg("SEP Driver:--------> sep_start_handler start\n");
-
- /* wait in polling for message from SEP */
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg_val);
-
- /* check the value */
- if (reg_val == 0x1)
- /* fatal error - read error status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- dbg("SEP Driver:<-------- sep_start_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for SEP initialization
-*/
-static int sep_init_handler(struct sep_device *sep, unsigned long arg)
-{
- unsigned long message_word;
- unsigned long *message_ptr;
- struct sep_driver_init_t command_args;
- unsigned long counter;
- unsigned long error;
- unsigned long reg_val;
-
- dbg("SEP Driver:--------> sep_init_handler start\n");
- error = 0;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
-
- dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
-
- if (error)
- goto end_function;
-
- /* PATCH - configure the DMA to single -burst instead of multi-burst */
- /*sep_configure_dma_burst(); */
-
- dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
-
- message_ptr = (unsigned long *) command_args.message_addr;
-
- /* set the base address of the SRAM */
- sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
-
- for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
- get_user(message_word, message_ptr);
- /* write data to SRAM */
- sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
- edbg("SEP Driver:message_word is %lu\n", message_word);
- /* wait for write complete */
- sep_wait_sram_write(sep);
- }
- dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
- /* signal SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
-
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!(reg_val & 0xFFFFFFFD));
-
- dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
-
- /* check the value */
- if (reg_val == 0x1) {
- edbg("SEP Driver:init failed\n");
-
- error = sep_read_reg(sep, 0x8060);
- edbg("SEP Driver:sw monitor is %lu\n", error);
-
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver:error is %lu\n", error);
- }
-end_function:
- dbg("SEP Driver:<-------- sep_init_handler end\n");
- return error;
-
-}
-
-/*
- this function handles the request cache and resident reallocation
-*/
-static int sep_realloc_cache_resident_handler(struct sep_device *sep,
- unsigned long arg)
-{
- struct sep_driver_realloc_cache_resident_t command_args;
- int error;
-
- /* copy cache and resident to the their intended locations */
- error = sep_load_firmware(sep);
- if (error)
- return error;
-
- command_args.new_base_addr = sep->shared_bus;
-
- /* find the new base address according to the lowest address between
- cache, resident and shared area */
- if (sep->resident_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->resident_bus;
- if (sep->rar_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->rar_bus;
-
- /* set the return parameters */
- command_args.new_cache_addr = sep->rar_bus;
- command_args.new_resident_addr = sep->resident_bus;
-
- /* set the new shared area */
- command_args.new_shared_area_addr = sep->shared_bus;
-
- edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
- edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
- edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
- edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
-
- /* return to user */
- if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
- return -EFAULT;
- return 0;
-}
-
-/**
- * sep_get_time_handler - time request from user space
- * @sep: sep we are to set the time for
- * @arg: pointer to user space arg buffer
- *
- * This function reports back the time and the address in the SEP
- * shared buffer at which it has been placed. (Do we really need this!!!)
- */
-
-static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
-{
- struct sep_driver_get_time_t command_args;
-
- mutex_lock(&sep_mutex);
- command_args.time_value = sep_set_time(sep);
- command_args.time_physical_address = (unsigned long)sep_time_address(sep);
- mutex_unlock(&sep_mutex);
- if (copy_to_user((void __user *)arg,
- &command_args, sizeof(struct sep_driver_get_time_t)))
- return -EFAULT;
- return 0;
-
-}
-
-/*
- This API handles the end transaction request
-*/
-static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
-{
- dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
-
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
- /* lock the sep mutex */
- mutex_unlock(&sep_mutex);
-#endif
-
- dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
-
- return 0;
-}
-
-
-/**
- * sep_set_flow_id_handler - handle flow setting
- * @sep: the SEP we are configuring
- * @flow_id: the flow we are setting
- *
- * This function handler the set flow id command
- */
-static int sep_set_flow_id_handler(struct sep_device *sep,
- unsigned long flow_id)
-{
- int error = 0;
- struct sep_flow_context_t *flow_data_ptr;
-
- /* find the flow data structure that was just used for creating new flow
- - its id should be default */
-
- mutex_lock(&sep_mutex);
- flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
- if (flow_data_ptr)
- flow_data_ptr->flow_id = flow_id; /* set flow id */
- else
- error = -EINVAL;
- mutex_unlock(&sep_mutex);
- return error;
-}
-
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int error = 0;
- struct sep_device *sep = filp->private_data;
-
- dbg("------------>SEP Driver: ioctl start\n");
-
- edbg("SEP Driver: cmd is %x\n", cmd);
-
- switch (cmd) {
- case SEP_IOCSENDSEPCOMMAND:
- /* send command to SEP */
- sep_send_command_handler(sep);
- edbg("SEP Driver: after sep_send_command_handler\n");
- break;
- case SEP_IOCSENDSEPRPLYCOMMAND:
- /* send reply command to SEP */
- sep_send_reply_command_handler(sep);
- break;
- case SEP_IOCALLOCDATAPOLL:
- /* allocate data pool */
- error = sep_allocate_data_pool_memory_handler(sep, arg);
- break;
- case SEP_IOCWRITEDATAPOLL:
- /* write data into memory pool */
- error = sep_write_into_data_pool_handler(sep, arg);
- break;
- case SEP_IOCREADDATAPOLL:
- /* read data from data pool into application memory */
- error = sep_read_from_data_pool_handler(sep, arg);
- break;
- case SEP_IOCCREATESYMDMATABLE:
- /* create dma table for synhronic operation */
- error = sep_create_sync_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCCREATEFLOWDMATABLE:
- /* create flow dma tables */
- error = sep_create_flow_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCFREEDMATABLEDATA:
- /* free the pages */
- error = sep_free_dma_table_data_handler(sep);
- break;
- case SEP_IOCSETFLOWID:
- /* set flow id */
- error = sep_set_flow_id_handler(sep, (unsigned long)arg);
- break;
- case SEP_IOCADDFLOWTABLE:
- /* add tables to the dynamic flow */
- error = sep_add_flow_tables_handler(sep, arg);
- break;
- case SEP_IOCADDFLOWMESSAGE:
- /* add message of add tables to flow */
- error = sep_add_flow_tables_message_handler(sep, arg);
- break;
- case SEP_IOCSEPSTART:
- /* start command to sep */
- error = sep_start_handler(sep);
- break;
- case SEP_IOCSEPINIT:
- /* init command to sep */
- error = sep_init_handler(sep, arg);
- break;
- case SEP_IOCGETSTATICPOOLADDR:
- /* get the physical and virtual addresses of the static pool */
- error = sep_get_static_pool_addr_handler(sep, arg);
- break;
- case SEP_IOCENDTRANSACTION:
- error = sep_end_transaction_handler(sep, arg);
- break;
- case SEP_IOCREALLOCCACHERES:
- error = sep_realloc_cache_resident_handler(sep, arg);
- break;
- case SEP_IOCGETMAPPEDADDROFFSET:
- error = sep_get_physical_mapped_offset_handler(sep, arg);
- break;
- case SEP_IOCGETIME:
- error = sep_get_time_handler(sep, arg);
- break;
- default:
- error = -ENOTTY;
- break;
- }
- dbg("SEP Driver:<-------- ioctl end\n");
- return error;
-}
-
-
-
-#if !SEP_DRIVER_POLLING_MODE
-
-/* handler for flow done interrupt */
-
-static void sep_flow_done_handler(struct work_struct *work)
-{
- struct sep_flow_context_t *flow_data_ptr;
-
- /* obtain the mutex */
- mutex_lock(&sep_mutex);
-
- /* get the pointer to context */
- flow_data_ptr = (struct sep_flow_context_t *) work;
-
- /* free all the current input tables in sep */
- sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
-
- /* free all the current tables output tables in SEP (if needed) */
- if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
- sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
-
- /* check if we have additional tables to be sent to SEP only input
- flag may be checked */
- if (flow_data_ptr->input_tables_flag) {
- /* copy the message to the shared RAM and signal SEP */
- memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
-
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
- }
- mutex_unlock(&sep_mutex);
-}
-/*
- interrupt handler function
-*/
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
- irqreturn_t int_error;
- unsigned long reg_val;
- unsigned long flow_id;
- struct sep_flow_context_t *flow_context_ptr;
- struct sep_device *sep = dev_id;
-
- int_error = IRQ_HANDLED;
-
- /* read the IRR register to check if this is SEP interrupt */
- reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
- edbg("SEP Interrupt - reg is %08lx\n", reg_val);
-
- /* check if this is the flow interrupt */
- if (0 /*reg_val & (0x1 << 11) */ ) {
- /* read GPRO to find out the which flow is done */
- flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
- /* find the contex of the flow */
- flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
- if (flow_context_ptr == NULL)
- goto end_function_with_error;
-
- /* queue the work */
- INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
- queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
-
- } else {
- /* check if this is reply interrupt from SEP */
- if (reg_val & (0x1 << 13)) {
- /* update the counter of reply messages */
- sep->reply_ct++;
- /* wake up the waiting process */
- wake_up(&sep_event);
- } else {
- int_error = IRQ_NONE;
- goto end_function;
- }
- }
-end_function_with_error:
- /* clear the interrupt */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-end_function:
- return int_error;
-}
-
-#endif
-
-
-
-#if 0
-
-static void sep_wait_busy(struct sep_device *sep)
-{
- u32 reg;
-
- do {
- reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
- } while (reg);
-}
-
-/*
- PATCH for configuring the DMA to single burst instead of multi-burst
-*/
-static void sep_configure_dma_burst(struct sep_device *sep)
-{
-#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
-
- /* request access to registers from SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
-
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
-
- /* set the DMA burst register to single burst */
- sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
-
- /* release the sep busy */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
-
-}
-
-#endif
-
-/*
- Function that is activated on the successful probe of the SEP device
-*/
-static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int error = 0;
- struct sep_device *sep;
- int counter;
- int size; /* size of memory for allocation */
-
- edbg("Sep pci probe starting\n");
- if (sep_dev != NULL) {
- dev_warn(&pdev->dev, "only one SEP supported.\n");
- return -EBUSY;
- }
-
- /* enable the device */
- error = pci_enable_device(pdev);
- if (error) {
- edbg("error enabling pci device\n");
- goto end_function;
- }
-
- /* set the pci dev pointer */
- sep_dev = &sep_instance;
- sep = &sep_instance;
-
- edbg("sep->shared_addr = %p\n", sep->shared_addr);
- /* transaction counter that coordinates the transactions between SEP
- and HOST */
- sep->send_ct = 0;
- /* counter for the messages from sep */
- sep->reply_ct = 0;
- /* counter for the number of bytes allocated in the pool
- for the current transaction */
- sep->data_pool_bytes_allocated = 0;
-
- /* calculate the total size for allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
- /* allocate the shared area */
- if (sep_map_and_alloc_shared_area(sep, size)) {
- error = -ENOMEM;
- /* allocation failed */
- goto end_function_error;
- }
- /* now set the memory regions */
-#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
- /* Note: this test section will need moving before it could ever
- work as the registers are not yet mapped ! */
- /* send the new SHARED MESSAGE AREA to the SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
- /* poll for SEP response */
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
- while (retval != 0xffffffff && retval != sep->shared_bus)
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- /* check the return value (register) */
- if (retval != sep->shared_bus) {
- error = -ENOMEM;
- goto end_function_deallocate_sep_shared_area;
- }
-#endif
- /* init the flow contextes */
- for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
- sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
-
- sep->flow_wq = create_singlethread_workqueue("sepflowwq");
- if (sep->flow_wq == NULL) {
- error = -ENOMEM;
- edbg("sep_driver:flow queue creation failed\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver: create flow workqueue \n");
- sep->pdev = pci_dev_get(pdev);
-
- sep->reg_addr = pci_ioremap_bar(pdev, 0);
- if (!sep->reg_addr) {
- edbg("sep: ioremap of registers failed.\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
-
- /* load the rom code */
- sep_load_rom_code(sep);
-
- /* set up system base address and shared memory location */
- sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
- 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- &sep->rar_bus, GFP_KERNEL);
-
- if (!sep->rar_addr) {
- edbg("SEP Driver:can't allocate rar\n");
- goto end_function_uniomap;
- }
-
-
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-
-#if !SEP_DRIVER_POLLING_MODE
-
- edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
-
- /* clear ICR register */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
- edbg("SEP Driver: about to call request_irq\n");
- /* get the interrupt line */
- error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
- if (error)
- goto end_function_free_res;
- return 0;
- edbg("SEP Driver: about to write IMR REG_ADDR");
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-end_function_free_res:
- dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- sep->rar_addr, sep->rar_bus);
-#endif /* SEP_DRIVER_POLLING_MODE */
-end_function_uniomap:
- iounmap(sep->reg_addr);
-end_function_deallocate_sep_shared_area:
- /* de-allocate shared area */
- sep_unmap_and_free_shared_area(sep, size);
-end_function_error:
- sep_dev = NULL;
-end_function:
- return error;
-}
-
-static const struct pci_device_id sep_pci_id_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
- .name = "sep_sec_driver",
- .id_table = sep_pci_id_tbl,
- .probe = sep_probe
- /* FIXME: remove handler */
-};
-
-/* major and minor device numbers */
-static dev_t sep_devno;
-
-/* the files operations structure of the driver */
-static struct file_operations sep_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_ioctl,
- .poll = sep_poll,
- .open = sep_open,
- .release = sep_release,
- .mmap = sep_mmap,
-};
-
-
-/* cdev struct of the driver */
-static struct cdev sep_cdev;
-
-/*
- this function registers the driver to the file system
-*/
-static int sep_register_driver_to_fs(void)
-{
- int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
- if (ret_val) {
- edbg("sep: major number allocation failed, retval is %d\n",
- ret_val);
- return ret_val;
- }
- /* init cdev */
- cdev_init(&sep_cdev, &sep_file_operations);
- sep_cdev.owner = THIS_MODULE;
-
- /* register the driver with the kernel */
- ret_val = cdev_add(&sep_cdev, sep_devno, 1);
- if (ret_val) {
- edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- }
- return ret_val;
-}
-
-
-/*--------------------------------------------------------------
- init function
-----------------------------------------------------------------*/
-static int __init sep_init(void)
-{
- int ret_val = 0;
- dbg("SEP Driver:-------->Init start\n");
- /* FIXME: Probe can occur before we are ready to survive a probe */
- ret_val = pci_register_driver(&sep_pci_driver);
- if (ret_val) {
- edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
- goto end_function_unregister_from_fs;
- }
- /* register driver to fs */
- ret_val = sep_register_driver_to_fs();
- if (ret_val)
- goto end_function_unregister_pci;
- goto end_function;
-end_function_unregister_pci:
- pci_unregister_driver(&sep_pci_driver);
-end_function_unregister_from_fs:
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
-end_function:
- dbg("SEP Driver:<-------- Init end\n");
- return ret_val;
-}
-
-
-/*-------------------------------------------------------------
- exit function
---------------------------------------------------------------*/
-static void __exit sep_exit(void)
-{
- int size;
-
- dbg("SEP Driver:--------> Exit start\n");
-
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- /* calculate the total size for de-allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
- /* FIXME: We need to do this in the unload for the device */
- /* free shared area */
- if (sep_dev) {
- sep_unmap_and_free_shared_area(sep_dev, size);
- edbg("SEP Driver: free pages SEP SHARED AREA \n");
- iounmap((void *) sep_dev->reg_addr);
- edbg("SEP Driver: iounmap \n");
- }
- edbg("SEP Driver: release_mem_region \n");
- dbg("SEP Driver:<-------- Exit end\n");
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_ext_with_pci_driver.c b/drivers/staging/sep/sep_ext_with_pci_driver.c
new file mode 100644
index 0000000..68da00e
--- /dev/null
+++ b/drivers/staging/sep/sep_ext_with_pci_driver.c
@@ -0,0 +1,548 @@
+/*
+ *
+ * sep_ext_with_pci_driver.c - Security Processor Driver
+ * pci initialization functions
+ *
+ *
+ **************************************************************************
+ * Copyright 2009 © Discretix Technologies Ltd. *
+ * Copyright 2009 © Intel Corporation *
+ * *
+ * This software is protected by copyright, international treaties and *
+ * various patents. Any copy or reproduction of this Software as *
+ * permitted below, must include this Copyright Notice as well as any *
+ * other notices provided under such license. *
+ * *
+ * This program shall be governed by, and may be used and redistributed *
+ * under the terms and conditions of the GNU General Public License, *
+ * version 2, as published by the Free Software Foundation. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY liability and WARRANTY; without even the implied *
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+ * See the GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, please write to the Free Software *
+ * Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ **************************************************************************
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ *
+ * CHANGES:
+ *
+ * 2010.01.08 Initial publish
+ *
+ */
+
+#define DEBUG
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/semaphore.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/ioctl.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+
+#include <linux/rar/memrar.h>
+#include <linux/rar/rar_register.h>
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_driver_ext_api.h"
+
+/* NOTE - must be defined specific to the board */
+#define VENDOR_ID PCI_VENDOR_ID_INTEL
+
+/*-----------------------------
+ private functions
+--------------------------------*/
+
+/*
+ This functions locks the area of the resdent and cache sep code
+*/
+int sep_lock_cache_resident_area(void)
+{
+ int error;
+ error = rar_lock(RAR_TYPE_IMAGE);
+ if (error) {
+ pr_debug("SEP Driver:cant lock RAR region %d\n",
+ error);
+ return error;
+ }
+ return 0;
+}
+
+
+/*
+ This functions copies the cache and resident from their source location into
+ destination memory, which is external to Linux VM and is given as
+ physical address
+*/
+int sep_copy_cache_resident_to_area(struct device_context *sep_context_ptr,
+ dma_addr_t *dst_new_cache_addr_ptr,
+ dma_addr_t *dst_new_resident_addr_ptr,
+ dma_addr_t *dst_new_dcache_addr_ptr)
+
+{
+ const struct firmware *fw;
+
+ /* firmware regions */
+ size_t cache_size;
+ void *cache_virtual_address;
+
+ size_t resident_size;
+ void *resident_virtual_address;
+
+ char *cache_name = "cache.image.bin";
+ char *res_name = "resident.image.bin";
+
+ /* error */
+ int error;
+
+ /*--------------------------------
+ CODE
+ -------------------------------------*/
+ error = 0;
+
+ pr_debug("SEP Driver:rar_virtual is %p\n",
+ sep_context_ptr->rar_virtual_address);
+ pr_debug("SEP Driver:rar_physical is %08lx\n",
+ (unsigned long)sep_context_ptr->rar_physical_address);
+
+ *dst_new_cache_addr_ptr = sep_context_ptr->rar_physical_address;
+ cache_virtual_address = sep_context_ptr->rar_virtual_address;
+
+ /* load cache */
+ error = request_firmware(&fw, cache_name, &sep_context_ptr->pdev->dev);
+ if (error) {
+ pr_debug("SEP Driver:cant request cache fw\n");
+ goto end_function;
+ }
+
+ pr_debug("SEP Driver:cache data loc is %p\n",
+ (void *)fw->data);
+ pr_debug("SEP Driver:cache data size is %08lx\n",
+ (unsigned long)fw->size);
+
+ memcpy(cache_virtual_address, fw->data, fw->size);
+
+ cache_size = fw->size;
+
+ release_firmware(fw);
+
+ *dst_new_resident_addr_ptr = *dst_new_cache_addr_ptr+cache_size;
+ resident_virtual_address = cache_virtual_address+cache_size;
+
+ /* load resident */
+ error = request_firmware(&fw, res_name, &sep_context_ptr->pdev->dev);
+ if (error) {
+ pr_debug("SEP Driver:cant request res fw\n");
+ goto end_function;
+ }
+
+ pr_debug("SEP Driver:res data loc is %p\n",
+ (void *)fw->data);
+ pr_debug("SEP Driver:res data size is %08x\n",
+ fw->size);
+
+ memcpy(resident_virtual_address, fw->data, fw->size);
+
+ resident_size = fw->size;
+
+ release_firmware(fw);
+
+ *dst_new_dcache_addr_ptr = *dst_new_resident_addr_ptr+resident_size;
+
+ pr_debug("SEP Driver:resident_addr (physical )is %08lx\n",
+ (unsigned long)*dst_new_resident_addr_ptr);
+ pr_debug("SEP Driver:cache_addr (physical) is %08lx\n",
+ (unsigned long)*dst_new_cache_addr_ptr);
+
+ pr_debug("SEP Driver:resident_addr (logical )is %08lx\n",
+ (unsigned long)resident_virtual_address);
+ pr_debug("SEP Driver:cache_addr (logical) is %08lx\n",
+ (unsigned long)cache_virtual_address);
+
+ pr_debug("SEP Driver:resident_size is %08lx\n",
+ (unsigned long)resident_size);
+ pr_debug("SEP Driver:cache_size is %08lx\n",
+ (unsigned long)cache_size);
+
+end_function:
+
+ return error;
+}
+
+/*
+ This functions maps and allocates the
+ shared area on the external RAM (device)
+ The input is shared_area_size - the size of the memory to
+ allocate. The outputs
+ are kernel_shared_area_addr_ptr - the kerenl
+ address of the mapped and allocated
+ shared area, and phys_shared_area_addr_ptr
+ - the physical address of the shared area
+*/
+int sep_map_and_alloc_shared_area(struct device_context *sep_context_ptr)
+
+{
+
+ if (!sep_context_ptr) {
+ pr_debug(
+ "sep_driver:alloc_shared_area invalid parameter\n");
+ WARN_ON(!sep_context_ptr);
+ return -1;
+ }
+
+ sep_context_ptr->shared_area_virt_addr =
+ dma_alloc_coherent(&sep_context_ptr->pdev->dev,
+ sep_context_ptr->shared_area_size,
+ &sep_context_ptr->shared_area_bus_addr,
+ GFP_KERNEL);
+
+ if (!sep_context_ptr->shared_area_virt_addr) {
+ pr_debug(
+ "sep_driver:shared memory kmalloc failed\n");
+ return -1;
+ }
+
+ pr_debug(
+ "SEP Driver:sep_context_ptr->shared_area_virt_addr is %p\n",
+ sep_context_ptr->shared_area_virt_addr);
+ pr_debug(
+ "SEP Driver:sep_context_ptr->shared_region_size is %08lx\n",
+ (unsigned long)sep_context_ptr->shared_area_size);
+ pr_debug(
+ "SEP Driver:sep_context_ptr->shared_area_bus_addr is %08lx\n",
+ (unsigned long)sep_context_ptr->shared_area_bus_addr);
+
+ return 0;
+}
+
+/*
+ This functions unmaps and deallocates the shared area
+ on the external RAM (device) Input parameter is pointer to
+ device context
+*/
+void sep_unmap_and_free_shared_area(struct device_context *sep_context_ptr)
+{
+ dma_free_coherent(&sep_context_ptr->pdev->dev,
+ sep_context_ptr->shared_area_size,
+ sep_context_ptr->shared_area_virt_addr,
+ sep_context_ptr->shared_area_bus_addr);
+}
+
+
+/*
+ This functions returns the physical address inside shared area according
+ to the virtual address. It can be either on the externa RAM device
+ (ioremapped), or on the system RAM
+ This implementation is for the external RAM
+*/
+dma_addr_t sep_shared_area_virt_to_bus(struct device_context *sep_context_ptr,
+ void *virt_address)
+{
+ pr_debug("SEP Driver:sh virt to phys v %08lx\n",
+ (unsigned long)virt_address);
+ pr_debug("SEP Driver:sh virt to phys p %08lx\n",
+ (unsigned long)(sep_context_ptr->shared_area_bus_addr
+ + (virt_address - sep_context_ptr->shared_area_virt_addr)));
+
+ return sep_context_ptr->shared_area_bus_addr
+ + (size_t)(virt_address - sep_context_ptr->shared_area_virt_addr);
+}
+
+/*
+ This functions returns the virtual address inside shared area
+ according to the physical address. It can be either on the
+ externa RAM device (ioremapped), or on the system RAM This implementation
+ is for the external RAM
+*/
+
+void *sep_shared_area_bus_to_virt(struct device_context *sep_context_ptr,
+ dma_addr_t bus_address)
+{
+ return sep_context_ptr->shared_area_virt_addr
+ + (size_t)(bus_address - sep_context_ptr->shared_area_bus_addr);
+}
+
+/*
+ * This function translates the RAR handle into the bus address
+ * */
+int sep_rar_handle_to_bus(void *rar_handle , dma_addr_t *rar_bus)
+{
+ struct RAR_buffer buf;
+ static size_t const BUF_COUNT = 1;
+
+ pr_debug("SEP Driver - sep_rar_handle_to_bus %08lx\n",
+ (unsigned long)rar_handle);
+
+ if (rar_bus == NULL)
+ return -EINVAL;
+
+ buf.info.handle = (unsigned long)rar_handle;
+
+ if (rar_handle_to_bus(&buf, BUF_COUNT) != BUF_COUNT)
+ return -1;
+
+ *rar_bus = buf.bus_address;
+
+ return 0;
+}
+
+/*
+ function that is activaed on the succesfull probe of the SEP device
+*/
+static int __devinit sep_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ /* error */
+ int error;
+
+ /* io memory (register area) */
+ static dma_addr_t iomem_start_physical;
+ static dma_addr_t iomem_end_physcal;
+ size_t io_memory_size;
+ void __iomem *iomem_start_virtual;
+
+ /*------------------------
+ CODE
+ ---------------------------*/
+
+ pr_debug("Sep pci probe starting\n");
+ error = 0;
+
+ if (sep_context.pdev != NULL) {
+ pr_debug("Only one device supported\n");
+ return -EBUSY;
+ }
+
+ /* enable the device */
+ error = pci_enable_device(pdev);
+ if (error) {
+ pr_debug("error enabling pci device\n");
+ return -ENODEV;
+ }
+
+ /* set the pci dev pointer */
+ sep_context.pdev = pdev;
+
+ /* get the io memory start address */
+ iomem_start_physical = pci_resource_start(pdev, 0);
+ if (!iomem_start_physical) {
+ pr_debug("SEP Driver error pci resource start\n");
+ goto end_function;
+ }
+
+ /* get the io memory end address */
+ iomem_end_physcal = pci_resource_end(pdev, 0);
+ if (!iomem_end_physcal) {
+ pr_debug("SEP Driver error pci resource end\n");
+ goto end_function;
+ }
+
+ io_memory_size = iomem_end_physcal -
+ iomem_start_physical + 1;
+
+ pr_debug("SEP Driver:iomem_start_physical is %08lx\n",
+ (unsigned long)iomem_start_physical);
+
+ pr_debug("SEP Driver:io_memory_end_phyaical_address is %08lx\n",
+ (unsigned long)iomem_end_physcal);
+
+ pr_debug("SEP Driver:io_memory_size is %08lx\n",
+ (unsigned long)io_memory_size);
+
+ iomem_start_virtual = pci_ioremap_bar(pdev, 0);
+ if (!iomem_start_virtual) {
+ pr_debug("SEP Driver pci_iomem failed\n");
+ goto end_function;
+ }
+
+ pr_debug("SEP Driver:iomem_start_virtual is %p\n",
+ iomem_start_virtual);
+
+ sep_context.reg_addr = iomem_start_virtual;
+
+ /* call the register_rar function (this will use the sep_callback
+ * function to perform the rar related activities; this callback
+ * is used in case we are running now prior to the rar register
+ * driver running. */
+ error = register_rar(&sep_callback, (void *)&sep_context);
+
+ if (error) {
+ pr_debug("SEP Driver: register_rar failed\n");
+ WARN_ON(error);
+ goto end_function;
+ }
+
+#if !SEP_DRIVER_POLLING_MODE
+
+ /* clear ICR register */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_ICR_REG_ADDR,
+ 0xFFFFFFFF);
+
+ /* set the IMR register - open only GPR 2 */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_IMR_REG_ADDR,
+ (~(0x1 << 13)));
+
+ sep_context.sep_irq = pdev->irq;
+
+ pr_debug("SEP Driver: my irq is %d\n", sep_context.sep_irq);
+
+ error = request_irq(sep_context.sep_irq, sep_inthandler, IRQF_SHARED,
+ "sep_driver", &sep_context.reg_addr);
+
+ if (error) {
+ pr_debug("SEP Driver: Unable to request_irq\n");
+ goto end_function;
+ }
+
+ /* set the IMR register - open only GPR 2 */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_IMR_REG_ADDR,
+ (~(0x1 << 13)));
+
+#endif /* Interrupt mode (!POLLING) */
+
+goto end_ok_function;
+
+end_function:
+
+ if (sep_context.reg_addr) {
+ iounmap(sep_context.reg_addr);
+ sep_context.reg_addr = NULL;
+ }
+
+ if (sep_context.pdev) {
+ pci_disable_device(sep_context.pdev);
+ sep_context.pdev = NULL;
+ }
+
+end_ok_function:
+
+ return error;
+}
+
+/* following is the callback for the rar_driver to call in case we attempt
+ * to register too soon
+ */
+
+int sep_callback(void *sep_context_pointer)
+{
+ int error;
+ struct device_context *my_sep_context;
+
+ my_sep_context = (struct device_context *)sep_context_pointer;
+
+ /* set up rar base address from rar_register driver*/
+ error = rar_get_address(RAR_TYPE_IMAGE,
+ &my_sep_context->rar_start_address,
+ &my_sep_context->rar_end_address);
+
+ if (error) {
+ pr_debug(
+ "SEP Driver:cant get RAR region - error is %d\n",
+ error);
+ goto end_function;
+ }
+
+ pr_debug("SEP Driver:retrieved RAR start is %08lx\n",
+ (unsigned long)my_sep_context->rar_start_address);
+ pr_debug("SEP Driver:retrieved RAR end is %08lx\n",
+ (unsigned long)my_sep_context->rar_end_address);
+
+ /* now make sure we 'own' this address range */
+ my_sep_context->rar_size =
+ (size_t)(my_sep_context->rar_end_address -
+ my_sep_context->rar_start_address + 1);
+
+ if (!request_mem_region(my_sep_context->rar_start_address,
+ my_sep_context->rar_size, "sep_sec_driver")) {
+ pr_debug(
+ "SEP Driver:request mem region failed for rar\n");
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ my_sep_context->rar_virtual_address = ioremap_nocache(
+ my_sep_context->rar_start_address, my_sep_context->rar_size);
+ if (!my_sep_context->rar_virtual_address) {
+ pr_debug(
+ "SEP Driver:cant ioremap_nocache rar\n");
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ my_sep_context->rar_physical_address =
+ my_sep_context->rar_start_address;
+
+ pr_debug("SEP Driver:rar_physical is %08lx\n",
+ (unsigned long)my_sep_context->rar_physical_address);
+
+ pr_debug("SEP Driver:rar_virtual is %p\n",
+ my_sep_context->rar_virtual_address);
+
+ goto end_ok_function;
+
+end_function:
+
+ if (my_sep_context->rar_virtual_address) {
+ iounmap(my_sep_context->rar_virtual_address);
+ my_sep_context->rar_virtual_address = NULL;
+ }
+
+ if (my_sep_context->reg_addr) {
+ iounmap(my_sep_context->reg_addr);
+ my_sep_context->reg_addr = NULL;
+ }
+
+ if (my_sep_context->pdev) {
+ pci_disable_device(my_sep_context->pdev);
+ my_sep_context->pdev = NULL;
+ }
+
+end_ok_function:
+
+ return error;
+}
+
+static struct pci_device_id sep_pci_id_tbl[] = {
+ { PCI_DEVICE(VENDOR_ID, 0x080c) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+/* field for registering driver to PCI device */
+static struct pci_driver sep_pci_driver = {
+ .name = "sep_sec_driver",
+ .id_table = sep_pci_id_tbl,
+ .probe = sep_probe
+};
+
+
+/*
+ this function registers th driver to
+ the device subsystem( either PCI, USB, etc)
+*/
+int sep_register_driver_to_device(void)
+{
+ return pci_register_driver(&sep_pci_driver);
+}
+
diff --git a/drivers/staging/sep/sep_main_mod.c b/drivers/staging/sep/sep_main_mod.c
new file mode 100644
index 0000000..744e287
--- /dev/null
+++ b/drivers/staging/sep/sep_main_mod.c
@@ -0,0 +1,3330 @@
+ /*
+ * sep_main_mod.c
+ *
+ **************************************************************************
+ * Copyright 2009 © Discretix Technologies Ltd. *
+ * Copyright 2009 © Intel Corporation *
+ * *
+ * This software is protected by copyright, international treaties and *
+ * various patents. Any copy or reproduction of this Software as *
+ * permitted below, must include this Copyright Notice as well as any *
+ * other notices provided under such license. *
+ * *
+ * This program shall be governed by, and may be used and redistributed *
+ * under the terms and conditions of the GNU General Public License, *
+ * version 2, as published by the Free Software Foundation. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY liability and WARRANTY; without even the implied *
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+ * See the GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, please write to the Free Software *
+ * Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ **************************************************************************
+ *
+ * CONTACTS:
+ *
+ * Mark Allyn mark.a.allyn@intel.com
+ *
+ * CHANGES:
+ *
+ * 2010.01.08 Initial publish
+ *
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/ioctl.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+
+#ifdef DX_CC52_SUPPORT
+#include <linux/netlink.h>
+#include <linux/connector.h>
+#include <linux/cn_proc.h>
+#endif
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_driver_ext_api.h"
+
+/*----------------------------------------
+ DEFINES
+-----------------------------------------*/
+
+
+
+/*--------------------------------------------
+ GLOBAL variables
+--------------------------------------------*/
+
+/* debug messages level */
+static int sep_debug = 0x0;
+module_param(sep_debug, int, 0);
+MODULE_PARM_DESC(sep_debug, "Flag to enable SEP debug messages");
+
+/* context of the device */
+struct device_context sep_context;
+
+
+/*---------------------------------------------
+ FUNCTIONS
+-----------------------------------------------*/
+/*
+ this function locks SEP by locking the semaphore
+*/
+int sep_lock(void)
+{
+ /* get the lock */
+ wait_event_interruptible(sep_context.event ,
+ test_and_set_bit(SEP_MMAP_LOCK_BIT, &sep_context.in_use_flag) == 0);
+ if (signal_pending(current))
+ return -EINTR;
+
+ return 0;
+}
+
+/*
+ this function unlocks SEP
+*/
+void sep_unlock(void)
+{
+ /* check if mappings must be freed */
+ if (sep_context.shared_area_dma_addr)
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ /* release lock */
+ clear_bit(SEP_MMAP_LOCK_BIT, &sep_context.in_use_flag);
+ wake_up(&sep_context.event);
+}
+
+/*
+ this function returns the address of the message shared area
+*/
+void *sep_map_shared_area(void)
+{
+ return sep_context.shared_area_virt_addr;
+}
+
+/*
+ calculates time and sets it at the predefined address
+*/
+static int sep_set_time(dma_addr_t *bus_ptr,
+ u32 *time_in_sec_ptr)
+{
+ /* time struct */
+ struct timeval time;
+
+ /* address of time in the kernel */
+ u32 *time_addr;
+
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_set_time start\n");
+
+
+ do_gettimeofday(&time);
+
+ /* set value in the SYSTEM MEMORY offset */
+ time_addr = sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
+
+ time_addr[0] = SEP_TIME_VAL_TOKEN;
+ time_addr[1] = time.tv_sec;
+
+ edbg("SEP Driver:time.tv_sec is %x\n",
+ (u32)time.tv_sec);
+ edbg("SEP Driver:time_addr is %p\n",
+ time_addr);
+ edbg("SEP Driver:g_sep_shared_area_virt_addr is %p\n",
+ sep_context.shared_area_virt_addr);
+
+ /* set the output parameters if needed */
+ if (bus_ptr)
+ *bus_ptr = sep_shared_area_virt_to_bus(&sep_context,
+ time_addr);
+
+ if (time_in_sec_ptr)
+ *time_in_sec_ptr = time.tv_sec;
+
+ dbg("SEP Driver:<-------- sep_set_time end\n");
+
+ return 0;
+}
+
+
+#ifdef DX_CC52_SUPPORT
+/*
+ open function for the singleton driver
+*/
+static int sep_singleton_open(struct inode *inode_ptr , struct file *file_ptr)
+{
+ /* return value */
+ int error;
+
+ /*-----------------
+ CODE
+ ---------------------*/
+
+ error = 0;
+
+ dbg("SEP Driver:--------> sep_singleton_open start\n");
+
+ if (test_and_set_bit(0, &sep_context.singleton_access_flag))
+ error = -EBUSY;
+
+ dbg("SEP Driver:<-------- sep_singleton_open end\n");
+
+ return error;
+}
+
+/*
+ inserts the data into the caller id table
+*/
+static int sep_set_caller_id_handler(u32 arg)
+{
+ /* error */
+ int error;
+
+ /* counter */
+ int i;
+
+ /* command arguments */
+ struct sep_driver_set_caller_id_t command_args;
+
+ /*----------------------------
+ CODE
+ ------------------------------*/
+
+ dbg("SEP Driver:--------> sep_set_caller_id_handler start\n");
+
+ error = 0;
+
+ for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
+ if (sep_context.caller_id_table[i].pid == 0)
+ break;
+ }
+
+ if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
+ edbg("SEP Driver:i == SEP_CALLER_ID_TABLE_NUM_ENTRIES\n");
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* copy the data */
+ if (copy_from_user(&command_args ,
+ (void *)arg ,
+ sizeof(struct sep_driver_set_caller_id_t))) {
+ edbg("SEP Driver:copy_from_user failed\n");
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ if (!command_args.pid ||
+ !command_args.callerIdAddress ||
+ !command_args.callerIdSizeInBytes) {
+
+ edbg("SEP Driver: params validation error\n");
+
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ edbg("SEP Driver:pid is %x\n" ,
+ command_args.pid);
+ edbg("SEP Driver:callerIdSizeInBytes is %x\n" ,
+ command_args.callerIdSizeInBytes);
+
+ if (command_args.callerIdSizeInBytes >
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ sep_context.caller_id_table[i].pid = command_args.pid;
+
+ if (copy_from_user(sep_context.caller_id_table[i].callerIdHash,
+ command_args.callerIdAddress ,
+ command_args.callerIdSizeInBytes))
+ error = -EFAULT;
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_set_caller_id_handler end\n");
+
+ return error;
+}
+
+/*
+ set the caller id (if exists) of the current process that send request to SeP
+*/
+static int sep_set_current_caller_id(void)
+{
+ int i;
+
+ dbg("SEP Driver:--------> sep_set_current_caller_id start\n");
+
+ edbg("current process is %d\n", current->pid);
+
+ /* zero the previous value */
+ memset((void *)(sep_context.shared_area_virt_addr +
+ (SEP_DRIVER_SYSTEM_CALLER_ID_MEMORY_OFFSET_IN_BYTES)),
+ 0,
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
+
+ for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
+ if (sep_context.caller_id_table[i].pid == current->pid) {
+ edbg("Caller Id found\n");
+
+ memcpy((void *)(sep_context.shared_area_virt_addr +
+ (SEP_DRIVER_SYSTEM_CALLER_ID_MEMORY_OFFSET_IN_BYTES)),
+ (void *)(sep_context.caller_id_table[i].callerIdHash),
+ SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
+ break;
+ }
+ }
+
+ dbg("SEP Driver:<-------- sep_set_current_caller_id end\n");
+
+ return 0;
+}
+
+
+/*
+ callback for the netlink connector registration
+*/
+static void sep_cn_callback(void *data)
+{
+ /* counter */
+ int counter;
+
+ /* message */
+ struct cn_msg *msg;
+
+ /* event */
+ struct proc_event *ev;
+
+ /* process id */
+ int pid;
+
+ /*--------------------
+ CODE
+ ----------------------*/
+
+ msg = data;
+
+ /* get the proc event pointer */
+ ev = (struct proc_event *)msg->data;
+
+ /* check the event */
+ switch (ev->what) {
+ case PROC_EVENT_EXEC:
+
+ /* get the pid */
+ pid = ev->event_data.exec.process_pid;
+
+ break;
+
+ case PROC_EVENT_EXIT:
+
+ /* get the pid */
+ pid = ev->event_data.exit.process_pid;
+
+ break;
+
+ default:
+ /* not interested in the rest of the events */
+ goto end_function;
+ }
+
+ /* check if the pid is relevant */
+ for (counter = 0;
+ counter < SEP_CALLER_ID_TABLE_NUM_ENTRIES;
+ counter++) {
+ /* if the pid is found - remove it from the table */
+ if (sep_context.caller_id_table[counter].pid == pid)
+ sep_context.caller_id_table[counter].pid = 0;
+ }
+
+
+end_function:
+
+ return;
+}
+
+/*
+ initializes the caller id functionality
+*/
+static int sep_init_caller_id(void)
+{
+ /* return value */
+ int ret_val;
+
+ /* counter */
+ int counter;
+
+ struct cb_id caller_id;
+
+
+ /*----------------------------
+ CODE
+ ---------------------------------*/
+
+ ret_val = 0;
+
+ /* init cb_id struct */
+ caller_id.idx = CN_IDX_PROC;
+ caller_id.val = CN_VAL_PROC;
+
+ /* init caller id table */
+ for (counter = 0; counter < SEP_CALLER_ID_TABLE_NUM_ENTRIES; counter++)
+ sep_context.caller_id_table[counter].pid = 0;
+
+
+ /* init access flag */
+ sep_context.singleton_access_flag = 0;
+
+ edbg("SEP Driver: caller id table init finished\n");
+
+ /* register to netlink connector */
+ /*ret_val = cn_add_callback(&caller_id, "sep_caller_id", &sep_cn_callback);*/
+
+
+ return ret_val;
+}
+
+#endif /*DX_CC52_SUPPORT*/
+
+
+/*
+ This function raises interrupt to SEP that signals that is has a new
+ command from HOST
+*/
+static int sep_send_command_handler(void)
+{
+ int error;
+
+ u32 count;
+
+ dbg("SEP Driver:--------> sep_send_command_handler start\n");
+
+ error = 0;
+
+ /* check if message was already sent without poll */
+ if (test_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ /* set bit that allows poll */
+ set_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag);
+
+ sep_set_time(0, 0);
+
+#ifdef DX_CC52_SUPPORT
+ /* set the current caller if - if exists */
+ sep_set_current_caller_id();
+#endif
+
+ for (count = 0; count < 12 * 4; count += 4)
+ edbg("Word %x of the message is %x\n",
+ count,
+ *((u32 *)(sep_context.shared_area_virt_addr + count)));
+
+ /* update counter */
+ sep_context.host_to_sep_send_counter++;
+
+ sep_context.shared_area_dma_addr =
+ dma_map_single(sep_context.dev_ptr,
+ (void *)sep_context.shared_area_virt_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* send interrupt to SEP */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
+
+ dbg("SEP Driver:<-------- sep_send_command_handler end\n");
+
+end_function:
+
+ return error;
+}
+
+/*
+ this function notifies the SeP of the incoming message
+*/
+int sep_send_msg_rdy_cmd(void)
+{
+ return sep_send_command_handler();
+}
+
+
+/* poll(suspend), until reply from sep */
+int sep_driver_poll(u32 *source_ptr)
+{
+ /* error */
+ int error;
+
+ /* gpr3 register value */
+ u32 retVal2;
+
+ /* gp3 register value */
+ u32 retVal3;
+
+ /*-----------------
+ CODE
+ -------------------*/
+
+ error = 0;
+
+ /* check if send command or send_reply were activated previously */
+ if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag)) {
+ error = -EIO;
+ goto end_function;
+ }
+
+#if SEP_DRIVER_POLLING_MODE
+ do {
+ /* read gp2 */
+ retVal2 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ /* read gp3 */
+ retVal3 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+
+ /* sleep between loops */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(50);
+
+ } while ((retVal3 == 0x0) &&
+ sep_context.host_to_sep_send_counter != (retVal2 & 0x7FFFFFFF));
+
+ sep_context.sep_to_host_reply_counter++;
+#else
+ /* poll, until reply from sep */
+ wait_event(sep_context.event,
+ (sep_context.host_to_sep_send_counter ==
+ sep_context.sep_to_host_reply_counter));
+
+#endif
+
+ /* check if error occured during poll */
+ retVal3 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ if (retVal3 != 0x0) {
+ edbg("SEP Driver: error during poll\n");
+ error = -EIO;
+ goto end_function;
+ }
+
+ if (sep_context.host_to_sep_send_counter ==
+ sep_context.sep_to_host_reply_counter) {
+
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ retVal2 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ edbg("retVal2 is %x\n", retVal2);
+
+ /* clear the bit-in case it must be set
+ again by send_reply_comand */
+ clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag);
+
+ /* check printf request from sep */
+ if ((retVal2 >> 30) & 0x1) {
+
+ edbg("SEP Driver: sep printf request in\n");
+ *source_ptr = SEP_DRIVER_SRC_PRINTF;
+ goto end_function;
+ }
+
+ /* check if the this is sep reply or request */
+ if (retVal2 >> 31) {
+ edbg("SEP Driver: sep request in\n");
+ *source_ptr = SEP_DRIVER_SRC_REQ;
+ /* request */
+ } else {
+ edbg("SEP Driver: sep reply in\n");
+ *source_ptr = SEP_DRIVER_SRC_REPLY;
+ }
+ }
+
+end_function:
+
+ return error;
+}
+
+/*----------------------------------------------------------------------
+ open function of the character driver - initializes the private data
+------------------------------------------------------------------------*/
+static int sep_open(struct inode *inode_ptr, struct file *file_ptr)
+{
+ /* return value */
+ int error;
+
+ /*-----------------
+ CODE
+ ---------------------*/
+
+ error = 0;
+
+ dbg("SEP Driver:--------> open start\n");
+
+ /* init the private data flag */
+ file_ptr->private_data = (void *)SEP_DRIVER_DISOWN_LOCK_FLAG;
+
+ dbg("SEP Driver:<-------- open end\n");
+
+ return error;
+}
+
+
+/*------------------------------------------------------------
+ release function
+-------------------------------------------------------------*/
+static int sep_release(struct inode *inode_ptr, struct file *file_ptr)
+{
+ /*-----------------
+ CODE
+ ---------------------*/
+
+ dbg("SEP Driver:--------> sep_release start\n");
+
+ /* check that all the dma resources were freed */
+ sep_free_dma_table_data_handler();
+
+ /* unlock on release only if file pointer still holds the lock flag */
+ if ((u32)file_ptr->private_data == SEP_DRIVER_OWN_LOCK_FLAG) {
+
+ /* check if mappings must be freed */
+ if (sep_context.shared_area_dma_addr)
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ /* check that all the dma resources were freed */
+ sep_free_dma_table_data_handler();
+
+ /* release lock */
+ clear_bit(SEP_MMAP_LOCK_BIT, &sep_context.in_use_flag);
+
+ /* raise event for stuck contextes */
+ wake_up(&sep_context.event);
+ }
+
+ dbg("SEP Driver:<-------- sep_release end\n");
+
+ return 0;
+}
+
+
+
+
+/*---------------------------------------------------------------
+ map function - this functions maps the message shared area
+-----------------------------------------------------------------*/
+static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ /* physical addr */
+ dma_addr_t phys_addr;
+
+ /* error */
+ int error;
+
+ /*-----------------------
+ CODE
+ -------------------------*/
+
+ dbg("SEP Driver:--------> mmap start\n");
+
+ /* lock the access to SeP */
+ wait_event_interruptible(sep_context.event ,
+ test_and_set_bit(SEP_MMAP_LOCK_BIT, &sep_context.in_use_flag) == 0);
+ if (signal_pending(current)) {
+ error = -EINTR;
+ goto end_function;
+ }
+
+ /* set the flag for LOCK */
+ filp->private_data = (void *)SEP_DRIVER_OWN_LOCK_FLAG;
+
+ /* zero the pools */
+ sep_context.data_pool_bytes_allocated = 0;
+
+ /* check that the size of the mapped range is as the size of the message
+ shared area */
+ if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+ edbg("SEP Driver mmap requested size is more than allowed\n");
+ error = -EAGAIN;
+
+ goto end_function_with_error;
+ }
+
+ edbg("SEP Driver:g_sep_shared_area_virt_addr is %p\n",
+ sep_context.shared_area_virt_addr);
+
+ /* get physical address */
+ phys_addr = sep_context.shared_area_bus_addr;
+
+ edbg("SEP Driver: phys_addr is %08x\n", (u32)phys_addr);
+
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ phys_addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ edbg("SEP Driver remap_page_range failed\n");
+ error = -EAGAIN;
+
+ goto end_function_with_error;
+ }
+
+ goto end_function;
+
+end_function_with_error:
+
+ /* clear the bit */
+ clear_bit(SEP_MMAP_LOCK_BIT , &sep_context.in_use_flag);
+
+ /* raise event for stuck contextes */
+ wake_up(&sep_context.event);
+
+end_function:
+
+ dbg("SEP Driver:<-------- mmap end\n");
+
+ return 0;
+}
+
+
+/*-----------------------------------------------
+ poll function
+*----------------------------------------------*/
+static u32 sep_poll(struct file *filp, poll_table *wait)
+{
+ u32 count;
+
+ u32 mask = 0;
+
+ /* GPR2 register */
+ u32 retVal2;
+
+ /* GPR3 register */
+ u32 retVal3;
+
+ /*----------------------------------------------
+ CODE
+ -------------------------------------------------*/
+
+ dbg("SEP Driver:--------> poll start\n");
+
+ retVal2 = retVal3 = 0;
+
+ /* check that only process that lock the driver may call the poll */
+ if ((u32)filp->private_data != SEP_DRIVER_OWN_LOCK_FLAG) {
+ mask = POLLERR;
+ goto end_function;
+ }
+
+ /* check if send command or send_reply were activated previously */
+ if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag)) {
+ mask = POLLERR;
+ goto end_function;
+ }
+
+#if SEP_DRIVER_POLLING_MODE
+
+ while ((retVal3 == 0x0) &&
+ (sep_context.host_to_sep_send_counter != (retVal2 & 0x3FFFFFFF))) {
+ retVal2 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ retVal3 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ }
+
+ sep_context.sep_to_host_reply_counter++;
+#else
+ /* add the event to the polling wait table */
+ poll_wait(filp, &sep_context.event, wait);
+
+#endif
+
+ edbg("sep_host_to_sep_send_counter is %x\n",
+ sep_context.host_to_sep_send_counter);
+ edbg("sep_sep_to_host_reply_counter is %x\n",
+ sep_context.sep_to_host_reply_counter);
+
+ /* check if error occured during poll */
+ retVal3 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ if (retVal3 != 0x0) {
+ edbg("SEP Driver: error during poll\n");
+ mask |= POLLERR;
+
+ goto end_function;
+ }
+
+ /* check if the data is ready */
+ if (sep_context.host_to_sep_send_counter ==
+ sep_context.sep_to_host_reply_counter) {
+
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ for (count = 0; count < 12 * 4; count += 4)
+ edbg("Sep Mesg Word %x of the message is %x\n",
+ count,
+ *((u32 *)(sep_context.shared_area_virt_addr + count)));
+
+ for (count = 0; count < 10 * 4; count += 4)
+ edbg("Debug Data Word %x of the message is %x\n",
+ count,
+ *((u32 *)(sep_context.shared_area_virt_addr +
+ 0x1800 +
+ count)));
+
+ retVal2 = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+
+ edbg("retVal2 is %x\n", retVal2);
+
+ /* clear the bit-in case it must be set
+ again by send_reply_comand */
+ clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag);
+
+ /* check printf request from sep */
+ if ((retVal2 >> 30) & 0x1) {
+ edbg("SEP Driver: sep printf request in\n");
+ mask |= POLLHUP;
+
+ goto end_function;
+ }
+
+ /* check if the this is sep reply or request */
+ if (retVal2 >> 31) {
+ edbg("SEP Driver: sep request in\n");
+ /* request */
+ mask |= POLLOUT | POLLWRNORM;
+ } else {
+ edbg("SEP Driver: sep reply in\n");
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ }
+
+end_function:
+
+ dbg("SEP Driver:<-------- poll exit\n");
+ return mask;
+}
+
+/*
+ This function raises interrupt to SEPm that signals that is has a
+ new command from HOST
+*/
+static int sep_send_reply_command_handler(void)
+{
+ /* count */
+ u32 count;
+
+ /* error */
+ int error;
+
+
+ dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
+
+ error = 0;
+
+ /* check if message was already sent without poll */
+ if (test_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ for (count = 0; count < 12 * 4; count += 4)
+ edbg("Word %x of the message is %x\n",
+ count,
+ *((u32 *)(sep_context.shared_area_virt_addr + count)));
+
+ /* set bit that allows poll */
+ set_bit(SEP_SEND_MSG_LOCK_BIT, &sep_context.in_use_flag);
+
+ /* update both counters counter */
+ sep_context.host_to_sep_send_counter++;
+
+ sep_context.sep_to_host_reply_counter++;
+
+ /* flush shared area */
+ sep_context.shared_area_dma_addr =
+ dma_map_single(sep_context.dev_ptr,
+ (void *)sep_context.shared_area_virt_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* send the interrupt to SEP */
+ SEP_WRITE_REGISTER(&sep_context,
+ HW_HOST_HOST_SEP_GPR2_REG_ADDR,
+ sep_context.host_to_sep_send_counter);
+
+ /* update host to sep counter */
+ sep_context.host_to_sep_send_counter++;
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
+
+ return error;
+}
+
+/*
+ This function handles the allocate data pool memory request
+ This function returns calculates the physical address of the
+ allocated memory, and the offset of this area from the mapped address.
+ Therefore, the FVOs in user space can calculate the exact virtual
+ address of this allocated memory
+*/
+static int sep_allocate_data_pool_memory_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command paramaters */
+ struct sep_driver_alloc_t command_args;
+
+ /*-------------------------
+ CODE
+ ----------------------------*/
+
+ dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
+
+ error = 0;
+
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_alloc_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* allocate memory */
+ if (
+ (SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES -
+ sep_context.data_pool_bytes_allocated) <
+ command_args.num_bytes) {
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ /* set the virtual and physical address */
+ command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
+ sep_context.data_pool_bytes_allocated;
+ command_args.bus_address = sep_context.shared_area_bus_addr +
+ SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
+ sep_context.data_pool_bytes_allocated;
+
+ /* write the memory back to the user space */
+ if (copy_to_user((void *)arg,
+ (void *)&command_args,
+ sizeof(struct sep_driver_alloc_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* set the allocation */
+ sep_context.data_pool_bytes_allocated += command_args.num_bytes;
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
+
+ return error;
+}
+
+
+
+
+
+/*
+ this function handles the request for creation of the DMA table
+ for the synchronic symmetric operations (AES,DES,HASH).
+ it returns the physical addresses of the created DMA table to the
+ user space which insert them as a parameters to the HOST-SEP message.
+ this pointers are NOT being treated by the user application in any case.
+*/
+static int sep_create_sync_dma_tables_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command arguments */
+ struct sep_driver_build_sync_table_t command_args;
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
+
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_build_sync_table_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ edbg("app_in_address is %08lx\n", command_args.app_in_address);
+ edbg("app_out_address is %08lx\n", command_args.app_out_address);
+ edbg("data_size is %x\n", command_args.data_in_size);
+ edbg("block_size is %x\n", command_args.block_size);
+
+ /* validate user parameters */
+ if (!command_args.app_in_address) {
+
+ edbg("SEP Driver: params validation error\n");
+
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /* check if we need to build only input table or input/output */
+ if (command_args.app_out_address) {
+
+ /* prepare input/output tables. */
+ error = sep_prepare_input_output_dma_table(
+ command_args.app_in_address,
+ command_args.app_out_address,
+ command_args.data_in_size,
+ command_args.block_size,
+ &command_args.in_table_address,
+ &command_args.out_table_address,
+ &command_args.in_table_num_entries,
+ &command_args.out_table_num_entries,
+ &command_args.table_data_size,
+ command_args.isKernelVirtualAddress);
+
+ } else {
+ /* prepare input tables */
+ error = sep_prepare_input_dma_table(
+ command_args.app_in_address,
+ command_args.data_in_size,
+ command_args.block_size,
+ &command_args.in_table_address,
+ &command_args.in_table_num_entries,
+ &command_args.table_data_size,
+ command_args.isKernelVirtualAddress);
+ }
+
+ if (error)
+ goto end_function;
+
+ /* copy to user */
+ if (copy_to_user(arg,
+ (void *)&command_args,
+ sizeof(struct sep_driver_build_sync_table_t)))
+ error = -EFAULT;
+
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
+
+ return error;
+}
+
+/*
+ This API handles the end transaction request
+*/
+static int sep_end_transaction_handler(struct file *file_ptr)
+{
+ /*----------------------------
+ CODE
+ -----------------------------*/
+
+ dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
+
+ /* check that poll was called before */
+ if (sep_context.shared_area_dma_addr)
+ dma_unmap_single(sep_context.dev_ptr,
+ sep_context.shared_area_dma_addr,
+ sep_context.shared_area_size,
+ DMA_BIDIRECTIONAL);
+
+ /* zero the shared area mapping */
+ sep_context.shared_area_dma_addr = 0;
+
+ /* check that all the dma resources were freed */
+ sep_free_dma_table_data_handler();
+
+ clear_bit(SEP_MMAP_LOCK_BIT , &sep_context.in_use_flag);
+
+ /* raise event for stuck contextes */
+ wake_up(&sep_context.event);
+
+ file_ptr->private_data = (void *)SEP_DRIVER_DISOWN_LOCK_FLAG;
+
+ dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
+
+ return 0;
+}
+
+static int sep_start_handler(void)
+{
+ /* reg val */
+ u32 reg_val;
+
+ /* error */
+ u32 error;
+
+ /*-----------------------------
+ CODE
+ ------------------------------*/
+
+ dbg("SEP Driver:--------> sep_start_handler start\n");
+
+ error = 0;
+
+ /* wait in polling for message from SEP */
+ do {
+ reg_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (!reg_val);
+
+ /* check the value */
+ if (reg_val == 0x1) {
+ /* fatal error - read erro status from GPRO */
+ error = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR0_REG_ADDR);
+ goto end_function;
+ }
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_start_handler end\n");
+
+ return error;
+}
+
+
+/*
+ this function handles the request for SEP initialization
+*/
+static int sep_init_handler(void *arg)
+{
+ /* word from message */
+ u32 message_word;
+
+ /* message ptr */
+ u32 *message_ptr;
+
+ /* command arguments */
+ struct sep_driver_init_t command_args;
+
+ /* counter */
+ u32 counter;
+
+ /* error */
+ int error;
+
+ /* reg val */
+ u32 reg_val;
+
+ /*-------------------
+ CODE
+ ---------------------*/
+
+ dbg("SEP Driver:--------> sep_init_handler start\n");
+
+ error = 0;
+
+ /* this API may be run only by root */
+ if (!capable(CAP_SYS_ADMIN)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ if (copy_from_user(&command_args, arg,
+ sizeof(struct sep_driver_init_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* validate user parameters */
+ if (!command_args.message_addr || !command_args.sep_sram_addr) {
+ edbg("SEP Driver: params validation error\n");
+
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ message_ptr = command_args.message_addr;
+
+ edbg("SEP Driver: before write1\n");
+ /* set the base address of the SRAM */
+ SEP_WRITE_REGISTER(&sep_context,
+ HW_SRAM_ADDR_REG_ADDR,
+ command_args.sep_sram_addr);
+
+ for (counter = 0 ;
+ counter < command_args.message_size_in_words;
+ counter++, message_ptr++) {
+ get_user(message_word, message_ptr);
+
+ edbg("SEP Driver: before write2\n")
+ /* write data to SRAM */
+ SEP_WRITE_REGISTER(&sep_context, HW_SRAM_DATA_REG_ADDR,
+ message_word);
+
+ edbg("SEP Driver:message_word is %x\n", message_word);
+
+ /* wait for write complete */
+ SEP_WAIT_SRAM_WRITE_COMPLETE(&sep_context);
+ }
+
+ /* signal SEP */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
+
+ do {
+ reg_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (!(reg_val & 0xFFFFFFFD));
+
+ /* check the value */
+ if (reg_val == 0x1) {
+ edbg("SEP Driver:init failed\n");
+
+ error = SEP_READ_REGISTER(&sep_context, 0x8060);
+ edbg("SEP Driver:sw monitor is %x\n", error);
+
+ /* fatal error - read erro status from GPRO */
+ error = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR0_REG_ADDR);
+ edbg("SEP Driver:error is %x\n", error);
+ goto end_function;
+ }
+
+ edbg("SEP Driver: signal end CC_INIT, reg_val is %d\n", reg_val);
+
+ /* signal sep to zero the GPR3 */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x10);
+ /* poll gpr3 till it is 0 */
+ do {
+ reg_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+ } while (reg_val != 0);
+
+ #ifndef __LITTLE_ENDIAN
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_ENDIAN_REG_ADDR, 0x1/*BE*/);
+ #endif
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_init_handler end\n");
+
+ return error;
+
+}
+
+/*
+ this function handles the request cache and resident reallocation
+*/
+static int sep_realloc_cache_resident_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* reg val */
+ u32 reg_val;
+
+ /* physical cache addr */
+ dma_addr_t cache_bus_address;
+
+ /* physical resident addr */
+ dma_addr_t resident_bus_address;
+
+ /* physical d-cache addr */
+ dma_addr_t dcache_bus_address;
+
+ /* command arguments */
+ struct sep_driver_realloc_cache_resident_t command_args;
+
+ /*------------------
+ CODE
+ ---------------------*/
+
+ /* check if SEP already initialized */
+ reg_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+
+ /* check old boot finished or warm boot finished or cold-warm boot finished */
+ if (reg_val != 0x2) {
+ error = SEP_ALREADY_INITIALIZED_ERR;
+ goto end_function;
+ }
+
+ /* this API may be run only by root */
+ if (!capable(CAP_SYS_ADMIN)) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ /* copy the data */
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_realloc_cache_resident_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+
+ /* copy cache and resident to the their intended locations */
+ error = sep_copy_cache_resident_to_area(&sep_context,
+ &cache_bus_address,
+ &resident_bus_address,
+ &dcache_bus_address);
+
+ if (error)
+ goto end_function;
+
+#if SEP_DRIVER_LOCK_RAR_MODE
+
+ error = sep_copy_ext_cache_to_area(command_args.extcache_addr,
+ command_args.extcache_size_in_bytes,
+ 0);
+
+ if (error)
+ goto end_function;
+
+
+#endif
+
+ /* lock the area (if needed) */
+ error = sep_lock_cache_resident_area();
+ if (error)
+ goto end_function;
+
+ command_args.new_base_addr = sep_context.shared_area_bus_addr;
+
+ /* find the new base address according to the lowest address between
+ cache, resident and shared area */
+ if (resident_bus_address < command_args.new_base_addr)
+ command_args.new_base_addr = resident_bus_address;
+
+ if (cache_bus_address < command_args.new_base_addr)
+ command_args.new_base_addr = cache_bus_address;
+
+ if (dcache_bus_address < command_args.new_base_addr)
+ command_args.new_base_addr = dcache_bus_address;
+
+ /* set the return parameters */
+ command_args.new_cache_addr = cache_bus_address;
+ command_args.new_resident_addr = resident_bus_address;
+ command_args.new_dcache_addr = dcache_bus_address;
+
+
+ /* set the new shared area */
+ command_args.new_shared_area_addr = sep_context.shared_area_bus_addr;
+
+ edbg("SEP Driver:command_args.new_shared_area_addr is %08x\n",
+ (u32)command_args.new_shared_area_addr);
+ edbg("SEP Driver:command_args.new_base_addr is %08x\n",
+ (u32)command_args.new_base_addr);
+ edbg("SEP Driver:command_args.new_resident_addr is %08x\n",
+ (u32)command_args.new_resident_addr);
+ edbg("SEP Driver:command_args.new_cache_addr is %08x\n",
+ (u32)command_args.new_cache_addr);
+
+ /* return to user */
+ if (copy_to_user(arg,
+ (void *)&command_args,
+ sizeof(struct sep_driver_realloc_cache_resident_t)))
+ error = -EFAULT;
+
+
+end_function:
+
+ return error;
+}
+
+/*
+ This function will retrieve the RAR buffer physical addresses, type
+ and size corresponding to the RAR handles provided in the buffers vector.
+*/
+static int sep_rar_prepare_output_msg_handler(void *arg)
+{
+ /* error return code */
+ int error;
+
+ /* command args */
+ struct sep_driver_rar_handle_to_bus command_args;
+
+ /* bus address */
+ dma_addr_t rar_bus;
+
+ /* holds the RAR address in the system memory offset */
+ u32 *rar_addr;
+
+ /*------------------
+ CODE
+ ---------------------*/
+
+ dbg("SEP Driver:--------> sep_rar_prepare_output_msg_handler start\n");
+
+ error = 0;
+ rar_bus = 0;
+
+ /* copy the data */
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_rar_handle_to_bus))) {
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ /* call to translation function only if user handle is not NULL */
+ if (command_args.rar_handle) {
+ error = sep_rar_handle_to_bus(command_args.rar_handle, &rar_bus);
+ if (error)
+ goto end_function;
+ }
+
+ edbg("SEP Driver: rar_addr_bus = %x\n", (u32)rar_bus);
+
+ /* set value in the SYSTEM MEMORY offset */
+ rar_addr = (u32 *)(sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
+
+ /* copy the physical address to the System Area.
+ The SEP will follow this address */
+ rar_addr[0] = SEP_RAR_VAL_TOKEN;
+ rar_addr[1] = rar_bus;
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_rar_prepare_output_msg_handler end\n");
+
+ return error;
+}
+
+/*
+ this function returns the physical and virtual addresses of the static pool
+*/
+static int sep_get_static_pool_addr_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command arguments */
+ struct sep_driver_static_pool_addr_t command_args;
+
+ /*-----------------------------
+ CODE
+ ------------------------------*/
+
+ dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
+
+ error = 0;
+
+ /*prepare the output parameters in the struct */
+ command_args.static_bus_address = sep_context.shared_area_bus_addr +
+ SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
+ command_args.static_virt_address = sep_context.shared_area_virt_addr +
+ SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
+
+ edbg(
+ "SEP Driver:physical_static_address is %08x, virtual_static_address %p\n",
+ (u32)command_args.static_bus_address,
+ command_args.static_virt_address);
+
+ /* send the parameters to user application */
+ if (copy_to_user(arg,
+ &command_args,
+ sizeof(struct sep_driver_static_pool_addr_t)))
+ error = -EFAULT;
+
+
+ dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
+
+ return error;
+}
+
+/*
+ this address gets the offset of the bus address from the start
+ of the mapped area
+*/
+static int sep_get_physical_mapped_offset_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command arguments */
+ struct sep_driver_get_mapped_offset_t command_args;
+
+ /*-----------------------------
+ CODE
+ ------------------------------*/
+
+ dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
+
+ error = 0;
+
+ if (copy_from_user(&command_args,
+ arg,
+ sizeof(struct sep_driver_get_mapped_offset_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+ if (!command_args.bus_address) {
+ edbg("SEP Driver: params validation error\n");
+
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ if (command_args.bus_address < sep_context.shared_area_bus_addr) {
+ error = -EINVAL;
+ goto end_function;
+ }
+
+ /*prepare the output parameters in the struct */
+ command_args.offset = command_args.bus_address -
+ sep_context.shared_area_bus_addr;
+
+ edbg("SEP Driver:physical_address is %08x, offset is %x\n",
+ (u32)command_args.bus_address,
+ command_args.offset);
+
+ /* send the parameters to user application */
+ if (copy_to_user(arg,
+ &command_args,
+ sizeof(struct sep_driver_get_mapped_offset_t)))
+ error = -EFAULT;
+
+
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
+
+ return error;
+}
+
+/*
+ this function handles the request for get time
+*/
+static int sep_get_time_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* command arguments */
+ struct sep_driver_get_time_t command_args;
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ error = sep_set_time(&command_args.time_bus_address,
+ &command_args.time_value);
+ if (error)
+ goto end_function;
+
+ /* return to user */
+ if (copy_to_user(arg,
+ (void *)&command_args,
+ sizeof(struct sep_driver_get_time_t)))
+ error = -EFAULT;
+
+
+end_function:
+
+ return error;
+}
+
+#ifdef DX_CC52_SUPPORT
+/*
+ this function handles the request for ext(3rd party)cache reallocation
+*/
+static int sep_realloc_ext_cache_handler(void *arg)
+{
+ /* error */
+ int error;
+
+ /* physical ext cache addr */
+ dma_addr_t ext_cache_bus_address;
+
+ /* command arguments */
+ struct sep_driver_realloc_ext_cache_t command_args;
+
+ /*------------------
+ CODE
+ ---------------------*/
+
+ /* copy the data */
+ if (copy_from_user(&command_args,
+ arg ,
+ sizeof(struct sep_driver_realloc_ext_cache_t))) {
+
+ error = -EFAULT;
+ goto end_function;
+ }
+
+#if SEP_DRIVER_LOCK_RAR_MODE
+
+ /* if we are in RAR mode - then the ext cache was already copied
+ and we just need to receive back its' physical address */
+ command_args.ext_cache_size_in_bytes = 0;
+
+#endif
+ /* copy cache and resident to the their intended locations */
+ error = sep_copy_ext_cache_to_area(command_args.ext_cache_addr,
+ command_args.ext_cache_size_in_bytes,
+ &ext_cache_bus_address);
+ if (error)
+ goto end_function;
+
+
+
+ command_args.new_ext_cache_addr = ext_cache_bus_address;
+
+ edbg("SEP Driver:command_args.new_ext_cache_addr is %x\n",
+ command_args.new_ext_cache_addr);
+
+ /* return to user */
+ if (copy_to_user(arg ,
+ (void *)&command_args ,
+ sizeof(struct sep_driver_realloc_ext_cache_t)))
+ error = -EFAULT;
+
+
+end_function:
+
+ return error;
+}
+#endif /*#ifdef DX_CC52_SUPPORT*/
+
+static long sep_ioctl(
+ struct file *filp,
+ u32 cmd,
+ unsigned long arg)
+{
+
+ /* error */
+ long error;
+
+ /*------------------------
+ CODE
+ ------------------------*/
+ error = 0;
+
+ dbg("SEP Driver:--------> ioctl start\n");
+
+
+ edbg("SEP Driver: cmd is %x\n", cmd);
+
+ /* check the lock flag - does not allow any other process
+ to call ioctl on the driver */
+ if ((u32)filp->private_data != SEP_DRIVER_OWN_LOCK_FLAG) {
+ error = -EACCES;
+ goto end_function;
+ }
+
+ /* lock access to ioctl */
+ mutex_lock(&sep_context.ioctl_mutex);
+
+ /* check that the command is for sep device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -EFAULT;
+ goto end_function_unlock;
+ }
+
+ switch (cmd) {
+ case SEP_IOCSENDSEPCOMMAND:
+
+ /* send command to SEP */
+ error = sep_send_command_handler();
+
+ edbg("SEP Driver: after sep_send_command_handler\n");
+
+ break;
+
+ case SEP_IOCSENDSEPRPLYCOMMAND:
+
+ /* send reply command to SEP */
+ error = sep_send_reply_command_handler();
+
+ break;
+
+ case SEP_IOCALLOCDATAPOLL:
+
+ /* allocate data pool */
+ error = sep_allocate_data_pool_memory_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCCREATESYMDMATABLE:
+
+ /* create dma table for synhronic operation */
+ error = sep_create_sync_dma_tables_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCFREEDMATABLEDATA:
+
+ /* free the pages */
+ error = sep_free_dma_table_data_handler();
+
+ break;
+
+ case SEP_IOCSEPSTART:
+
+ /* start command to sep */
+ error = sep_start_handler();
+ break;
+
+ case SEP_IOCSEPINIT:
+
+ /* init command to sep */
+ error = sep_init_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCGETSTATICPOOLADDR:
+
+ /* get the physical and virtual addresses of the static pool */
+ error = sep_get_static_pool_addr_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCENDTRANSACTION:
+
+ error = sep_end_transaction_handler(filp);
+
+ break;
+
+ case SEP_IOCREALLOCCACHERES:
+
+ error = sep_realloc_cache_resident_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCGETMAPPEDADDROFFSET:
+
+ error = sep_get_physical_mapped_offset_handler((void *)arg);
+
+ break;
+
+#ifdef DX_CC52_SUPPORT
+ case SEP_IOCREALLOCEXTCACHE:
+
+ error = sep_realloc_ext_cache_handler((void *)arg);
+
+ break;
+#endif
+
+ case SEP_IOCGETIME:
+
+ error = sep_get_time_handler((void *)arg);
+
+ break;
+
+ case SEP_IOCRARPREPAREMESSAGE:
+
+ error = sep_rar_prepare_output_msg_handler((void *)arg);
+
+ break;
+
+ default:
+ edbg("SEP Driver: **** NO COMMAND ****\n");
+
+ error = -ENOTTY;
+ break;
+ }
+
+end_function_unlock:
+
+ mutex_unlock(&sep_context.ioctl_mutex);
+
+end_function:
+
+ dbg("SEP Driver:<-------- ioctl end\n");
+
+ return error;
+}
+
+#ifdef DX_CC52_SUPPORT
+
+
+/*
+ singleton ioctl
+*/
+static long sep_singleton_ioctl(/*struct inode *inode,*/
+ struct file *filp,
+ u32 cmd,
+ unsigned long arg)
+{
+
+ /* error */
+ long error;
+
+ /*------------------------
+ CODE
+ ------------------------*/
+ error = 0;
+
+ dbg("SEP Driver:--------> sep_singleton_ioctl start\n");
+
+ edbg("SEP Driver: cmd is %x\n", cmd);
+
+ /* check that the command is for sep device */
+ if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+ error = -ENOTTY;
+ goto end_function;
+ }
+
+ switch (cmd) {
+ case SEP_IOCTLSETCALLERID:
+ error = sep_set_caller_id_handler(arg);
+ break;
+
+ default:
+ error = sep_ioctl(filp, cmd, arg);
+ break;
+ }
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_singleton_ioctl end\n");
+
+ return error;
+}
+
+/* file operation for singleton sep operations */
+static const struct file_operations singleton_file_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_singleton_ioctl,
+ .poll = sep_poll,
+ .open = sep_singleton_open,
+ .release = sep_release,
+ .mmap = sep_mmap,
+};
+
+#endif /*DX_CC52_SUPPORT*/
+
+
+/* file operation for normal sep operations */
+static const struct file_operations sep_file_operarions = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sep_ioctl,
+ .poll = sep_poll,
+ .open = sep_open,
+ .release = sep_release,
+ .mmap = sep_mmap,
+
+};
+
+/*
+ this function registers the driver to the file system
+*/
+static int sep_register_driver_to_fs(void)
+{
+ /* return value */
+ int ret_val;
+
+ /* major number */
+ int major;
+
+ /*---------------------
+ CODE
+ -----------------------*/
+
+#ifdef DX_CC52_SUPPORT
+ ret_val = alloc_chrdev_region(&sep_context.device_number,
+ 0,
+ 2,
+ DRIVER_NAME);
+#else
+ ret_val = alloc_chrdev_region(&sep_context.device_number,
+ 0,
+ 1,
+ DRIVER_NAME);
+#endif
+ if (ret_val) {
+ edbg("sep_driver:major number allocation failed,\
+ retval is %d\n",
+ ret_val);
+ goto end_function;
+ }
+
+ /* get the major number */
+ major = MAJOR(sep_context.device_number);
+
+ /* init cdev */
+ cdev_init(&sep_context.cdev, &sep_file_operarions);
+ sep_context.cdev.owner = THIS_MODULE;
+
+ /* register the driver with the kernel */
+ ret_val = cdev_add(&sep_context.cdev, sep_context.device_number, 1);
+
+ if (ret_val) {
+ edbg("sep_driver:cdev_add failed, retval is %d\n",
+ ret_val);
+ goto end_function_unregister_devnum;
+ }
+
+goto end_function;
+#ifdef DX_CC52_SUPPORT
+
+ sep_context.singleton_device_number = MKDEV(major, 1);
+
+ /* init singleton cdev */
+ cdev_init(&sep_context.singleton_cdev , &singleton_file_operations);
+ sep_context.singleton_cdev.owner = THIS_MODULE;
+
+ /* register the driver with the kernel fs */
+ ret_val = cdev_add(&sep_context.singleton_cdev ,
+ sep_context.singleton_device_number ,
+ 1);
+ if (ret_val) {
+ edbg("sep_driver:cdev_add 2 failed, retval is %d\n" ,
+ ret_val);
+ goto end_function_unregister_first_dev;
+ }
+
+ goto end_function;
+
+end_function_unregister_first_dev:
+
+ /* delete the first device */
+ cdev_del(&sep_context.cdev);
+
+#endif
+
+end_function_unregister_devnum:
+
+#ifdef DX_CC52_SUPPORT
+ /* unregister dev numbers */
+ unregister_chrdev_region(sep_context.device_number, 2);
+#else
+ unregister_chrdev_region(sep_context.device_number, 1);
+#endif
+
+end_function:
+
+ return ret_val;
+}
+
+/*
+ this function unregisters driver from fs
+*/
+static void sep_unregister_driver_from_fs(void)
+{
+ /*-------------------
+ CODE
+ ---------------------*/
+
+ cdev_del(&sep_context.cdev);
+
+#ifdef DX_CC52_SUPPORT
+ cdev_del(&sep_context.singleton_cdev);
+
+ /* unregister dev numbers */
+ unregister_chrdev_region(sep_context.device_number, 2);
+#else
+ unregister_chrdev_region(sep_context.device_number, 1);
+#endif
+
+}
+
+/*
+ reconfig the shared area between HOST and SEP - needed in case
+ the DX_CC_Init function was called before OS loading
+*/
+static int sep_reconfig_shared_area(void)
+{
+ int ret_val;
+
+ /*----------------------
+ CODE
+ --------------------------*/
+
+ ret_val = 0;
+
+#if SEP_DRIVER_RECONFIG_MESSAGE_AREA
+
+ /* send the new SHARED MESSAGE AREA to the SEP */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_HOST_SEP_GPR1_REG_ADDR ,
+ sep_context.phys_shared_area_addr);
+
+ /* poll for SEP response */
+ ret_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+ while (ret_val != 0xffffffff &&
+ ret_val != sep_context.phys_shared_area_addr) {
+ ret_val = SEP_READ_REGISTER(&sep_context,
+ HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+ }
+
+ /* check the return value (register) */
+ if (ret_val != sep_context.phys_shared_area_addr)
+ ret_val = -ENOMEM;
+
+
+#endif
+
+ return ret_val;
+}
+
+static void sep_init_context(void)
+{
+ /*-------------
+ CODE
+ ----------------*/
+
+ /* zero fields */
+ sep_context.num_lli_tables_created = 0;
+ sep_context.in_page_array = 0;
+ sep_context.out_page_array = 0;
+ sep_context.in_num_pages = 0;
+ sep_context.out_num_pages = 0;
+ sep_context.in_map_array = 0;
+ sep_context.out_map_array = 0;
+ sep_context.in_map_num_entries = 0;
+ sep_context.out_map_num_entries = 0;
+ sep_context.shared_area_dma_addr = 0;
+
+
+ /* init ioctl mutex */
+ mutex_init(&sep_context.ioctl_mutex);
+
+ /* calculate the shared_area_size */
+ sep_context.shared_area_size =
+ SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+}
+
+
+/*--------------------------------------------------------------
+ init function
+----------------------------------------------------------------*/
+static int __init sep_init(void)
+{
+ /* return value */
+ int ret_val;
+
+ /*------------------------
+ CODE
+ ------------------------*/
+
+ dbg("SEP Driver:-------->Init start\n");
+
+ ret_val = 0;
+
+ ret_val = sep_register_driver_to_device();
+ if (ret_val) {
+ edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n",
+ ret_val);
+ goto end_function_unregister_from_fs;
+ }
+
+ /* initialize the context fields */
+ sep_init_context();
+
+ /* allocate the shared area */
+ if (sep_map_and_alloc_shared_area(&sep_context)) {
+ ret_val = -ENOMEM;
+ /* allocation failed */
+ goto end_function;
+ }
+
+ /* initialize wait queue */
+ init_waitqueue_head(&sep_context.event);
+
+#ifdef DX_CC52_SUPPORT
+ /* init the caller id */
+ ret_val = sep_init_caller_id();
+ if (ret_val)
+ goto end_function_deallocate_sep_shared_area;
+
+#endif
+
+ /* reconfig the shared area - needed in case that OS
+ was initialized after the DX_CC_Init */
+ ret_val = sep_reconfig_shared_area();
+ if (ret_val)
+ goto end_function_unregister_from_fs;
+
+ /* register driver to fs */
+ ret_val = sep_register_driver_to_fs();
+ if (ret_val)
+ goto end_function_deallocate_sep_shared_area;
+
+ goto end_function;
+
+end_function_unregister_from_fs:
+
+ /* unregister from fs */
+ sep_unregister_driver_from_fs();
+
+end_function_deallocate_sep_shared_area:
+
+ /* de-allocate shared area */
+ sep_unmap_and_free_shared_area(&sep_context);
+
+end_function:
+
+ dbg("SEP Driver:<-------- Init end\n");
+
+ return ret_val;
+}
+
+
+
+
+/*-------------------------------------------------------------
+ exit function
+--------------------------------------------------------------*/
+static void __exit sep_exit(void)
+{
+ /* size */
+ int size;
+
+ /*-----------------------------
+ CODE
+ --------------------------------*/
+
+ dbg("SEP Driver:--------> Exit start\n");
+
+ /* unregister from fs */
+ sep_unregister_driver_from_fs();
+
+ /* calculate the total size for de-allocation */
+ size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+ SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+
+
+ /* free shared area */
+ sep_unmap_and_free_shared_area(&sep_context);
+
+ iounmap(sep_context.rar_virtual_address);
+ iounmap((void *)sep_context.reg_addr);
+
+ sep_context.rar_virtual_address = NULL;
+ sep_context.reg_addr = NULL;
+
+ dbg("SEP Driver:<-------- Exit end\n");
+}
+
+
+/*
+ interrupt handler function
+*/
+irqreturn_t sep_inthandler(int irq, void *dev_id)
+{
+ /* int error */
+ irqreturn_t int_error;
+
+ /* reg value */
+ u32 reg_val;
+
+ /*-----------------------------
+ CODE
+ -----------------------------*/
+
+ int_error = IRQ_HANDLED;
+
+ /* read the IRR register to check if this is SEP interrupt */
+ reg_val = SEP_READ_REGISTER(&sep_context, HW_HOST_IRR_REG_ADDR);
+ edbg("SEP Interrupt - reg is %08x\n", reg_val);
+
+
+ /* check if this is reply interrupt from SEP */
+ if (reg_val & (0x1 << 13)) {
+ /* update the counter of reply messages */
+ sep_context.sep_to_host_reply_counter++;
+
+ /* wake up the waiting process */
+ wake_up(&sep_context.event);
+ } else {
+ int_error = IRQ_NONE;
+ goto end_function;
+ }
+
+ /* clear the interrupt */
+ SEP_WRITE_REGISTER(&sep_context, HW_HOST_ICR_REG_ADDR, reg_val);
+
+end_function:
+
+ return int_error;
+}
+
+/*
+ this function calculates the size of data that can be inserted into the lli
+ table from this array the condition is that either the table is full
+ (all etnries are entered), or there are no more entries in the lli array
+*/
+u32 sep_calculate_lli_table_max_size(
+ struct sep_lli_entry_t *lli_in_array_ptr,
+ u32 num_array_entries)
+{
+ /* table data size */
+ u32 table_data_size;
+
+ /* counter */
+ u32 counter;
+
+ /*---------------------
+ CODE
+ ----------------------*/
+
+ table_data_size = 0;
+
+ /* calculate the data in the out lli table if till we fill the whole
+ table or till the data has ended */
+ for (counter = 0;
+ (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
+ (counter < num_array_entries); counter++)
+ table_data_size += lli_in_array_ptr[counter].block_size;
+
+ return table_data_size;
+}
+
+/*
+ this functions builds ont lli table from the lli_array according to
+ the given size of data
+*/
+static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr,
+ struct sep_lli_entry_t *lli_table_ptr,
+ u32 *num_processed_entries_ptr,
+ u32 *num_table_entries_ptr,
+ u32 table_data_size)
+{
+ /* current table data size */
+ u32 curr_table_data_size;
+
+ /* counter of lli array entry */
+ u32 array_counter;
+
+ /*-----------------------
+ CODE
+ ---------------------------*/
+
+ dbg("SEP Driver:--------> sep_build_lli_table start\n");
+
+ /* init currrent table data size and lli array entry counter */
+ curr_table_data_size = 0;
+ array_counter = 0;
+ *num_table_entries_ptr = 1;
+
+ edbg("SEP Driver:table_data_size is %x\n", table_data_size);
+
+ /* fill the table till table size reaches the needed amount */
+ while (curr_table_data_size < table_data_size) {
+ /* update the number of entries in table */
+ (*num_table_entries_ptr)++;
+
+ lli_table_ptr->bus_address =
+ SEP_CHANGE_ENDIANNESS(lli_array_ptr[array_counter].bus_address);
+
+ lli_table_ptr->block_size =
+ SEP_CHANGE_ENDIANNESS(lli_array_ptr[array_counter].block_size);
+
+ curr_table_data_size += lli_array_ptr[array_counter].block_size;
+
+ edbg("SEP Driver:lli_table_ptr is %08x\n", (u32)lli_table_ptr);
+ edbg("SEP Driver:lli_table_ptr->bus_address is %08x\n",
+ lli_table_ptr->bus_address);
+ edbg("SEP Driver:lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+ /* check for overflow of the table data */
+ if (curr_table_data_size > table_data_size) {
+ edbg("SEP Driver:curr_table_data_size > table_data_size\n");
+
+ /* update the size of block in the table */
+ lli_table_ptr->block_size -=
+ SEP_CHANGE_ENDIANNESS((curr_table_data_size - table_data_size));
+
+ /* update the physical address in the lli array */
+ lli_array_ptr[array_counter].bus_address +=
+ SEP_CHANGE_ENDIANNESS(lli_table_ptr->block_size);
+
+ /* update the block size left in the lli array */
+ lli_array_ptr[array_counter].block_size =
+ (curr_table_data_size - table_data_size);
+ } else
+ /* advance to the next entry in the lli_array */
+ array_counter++;
+
+ edbg("SEP Driver:lli_table_ptr->bus_address is %08x\n",
+ lli_table_ptr->bus_address);
+ edbg("SEP Driver:lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+ /* move to the next entry in table */
+ lli_table_ptr++;
+ }
+
+ /* set the info entry to default */
+ lli_table_ptr->bus_address = 0xffffffff;
+ lli_table_ptr->block_size = 0;
+
+ edbg("SEP Driver:lli_table_ptr is %08x\n", (u32)lli_table_ptr);
+ edbg("SEP Driver:lli_table_ptr->bus_address is %08x\n",
+ lli_table_ptr->bus_address);
+ edbg("SEP Driver:lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+
+
+ /* set the output parameter */
+ *num_processed_entries_ptr += array_counter;
+
+ edbg("SEP Driver:*num_processed_entries_ptr is %x\n",
+ *num_processed_entries_ptr);
+
+
+ dbg("SEP Driver:<-------- sep_build_lli_table end\n");
+
+ return;
+}
+
+/*
+ this function goes over the list of the print created tables and
+ prints all the data
+*/
+static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr,
+ u32 num_table_entries,
+ u32 table_data_size)
+{
+ u32 table_count;
+
+ u32 entries_count;
+ /*-----------------------------
+ CODE
+ -------------------------------*/
+
+ dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
+
+ table_count = 1;
+ while ((u32)lli_table_ptr != 0xffffffff) {
+ edbg("SEP Driver: lli table %08x, table_data_size is %x\n",
+ table_count,
+ table_data_size);
+ edbg("SEP Driver: num_table_entries is %x\n",
+ num_table_entries);
+
+ /* print entries of the table (without info entry) */
+ for (entries_count = 0;
+ entries_count < num_table_entries;
+ entries_count++, lli_table_ptr++) {
+
+ edbg("SEP Driver:lli_table_ptr address is %08x\n",
+ (u32)lli_table_ptr);
+ edbg("SEP Driver:phys address is %08x\
+ block size is %x\n",
+ lli_table_ptr->bus_address,
+ lli_table_ptr->block_size);
+ }
+
+ /* point to the info entry */
+ lli_table_ptr--;
+
+ edbg("SEP Driver:phys lli_table_ptr->block_size is %x\n",
+ lli_table_ptr->block_size);
+ edbg("SEP Driver:phys lli_table_ptr->physical_address\
+ is %08x\n",
+ lli_table_ptr->bus_address);
+
+
+ table_data_size =
+ SEP_CHANGE_ENDIANNESS(lli_table_ptr->block_size) & 0xffffff;
+
+ num_table_entries =
+ (SEP_CHANGE_ENDIANNESS(lli_table_ptr->block_size) >> 24) & 0xff;
+
+ lli_table_ptr = (struct sep_lli_entry_t *)
+ (SEP_CHANGE_ENDIANNESS(lli_table_ptr->bus_address));
+
+ edbg("SEP Driver:phys table_data_size is %x\
+ num_table_entries is %x lli_table_ptr is%x\n",
+ table_data_size, num_table_entries, (u32)lli_table_ptr);
+
+ if ((u32)lli_table_ptr != 0xffffffff)
+ lli_table_ptr = (struct sep_lli_entry_t *)
+ sep_shared_area_bus_to_virt(&sep_context,
+ (u32)lli_table_ptr);
+ table_count++;
+ }
+
+ dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
+}
+
+
+static void sep_prepare_empty_lli_table(dma_addr_t *lli_table_addr_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr)
+{
+ /* lli table pointer */
+ struct sep_lli_entry_t *lli_table_ptr;
+
+ /*---------------------
+ CODE
+ ------------------------*/
+
+ /* find the area for new table */
+ lli_table_ptr =
+ (struct sep_lli_entry_t *)(sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES +
+ sep_context.num_lli_tables_created *
+ sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ lli_table_ptr->bus_address = 0;
+ lli_table_ptr->block_size = 0;
+
+ lli_table_ptr++;
+ lli_table_ptr->bus_address = 0xFFFFFFFF;
+ lli_table_ptr->block_size = 0;
+
+ /* set the output parameter value */
+ *lli_table_addr_ptr = sep_context.shared_area_bus_addr +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES +
+ sep_context.num_lli_tables_created *
+ sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* set the num of entries and table data size for empty table */
+ *num_entries_ptr = 2;
+ *table_data_size_ptr = 0;
+
+ /* update the number of created tables */
+ sep_context.num_lli_tables_created++;
+
+}
+
+/*
+ This function creates the input and output dma tables for
+ symmetric operations (AES/DES) according to the block size from LLI arays
+*/
+static int sep_construct_dma_tables_from_lli(
+ struct sep_lli_entry_t *lli_in_array,
+ u32 sep_in_lli_entries,
+ struct sep_lli_entry_t *lli_out_array,
+ u32 sep_out_lli_entries,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr)
+{
+ /* points to the area where next lli table can be allocated */
+ u32 lli_table_alloc_addr;
+
+ /* input lli table */
+ struct sep_lli_entry_t *in_lli_table_ptr;
+
+ /* output lli table */
+ struct sep_lli_entry_t *out_lli_table_ptr;
+
+ /* pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry_t *info_in_entry_ptr;
+
+ /* pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry_t *info_out_entry_ptr;
+
+ /* points to the first entry to be processed in the lli_in_array */
+ u32 current_in_entry;
+
+ /* points to the first entry to be processed in the lli_out_array */
+ u32 current_out_entry;
+
+ /* max size of the input table */
+ u32 in_table_data_size;
+
+ /* max size of the output table */
+ u32 out_table_data_size;
+
+ /* flag te signifies if this is the first tables build from the arrays */
+ u32 first_table_flag;
+
+ /* the data size that should be in table */
+ u32 table_data_size;
+
+ /* number of etnries in the input table */
+ u32 num_entries_in_table;
+
+ /* number of etnries in the output table */
+ u32 num_entries_out_table;
+
+ /*---------------------
+ CODE
+ ------------------------*/
+
+ dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
+
+ /* initiate to point after the message area */
+ lli_table_alloc_addr = (u32)(sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES +
+ (sep_context.num_lli_tables_created *
+ (sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
+
+ current_in_entry = 0;
+ current_out_entry = 0;
+ first_table_flag = 1;
+ info_in_entry_ptr = 0;
+ info_out_entry_ptr = 0;
+
+ /* loop till all the entries in in array are not processed */
+ while (current_in_entry < sep_in_lli_entries) {
+ /* set the new input and output tables */
+ in_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* set the first output tables */
+ out_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
+
+ /* update the number of the lli tables created */
+ sep_context.num_lli_tables_created += 2;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* calculate the maximum size of data for input table */
+ in_table_data_size =
+ sep_calculate_lli_table_max_size(
+ &lli_in_array[current_in_entry],
+ (sep_in_lli_entries - current_in_entry));
+
+ /* calculate the maximum size of data for output table */
+ out_table_data_size =
+ sep_calculate_lli_table_max_size(
+ &lli_out_array[current_out_entry],
+ (sep_out_lli_entries - current_out_entry));
+
+ edbg("SEP Driver:in_table_data_size is %x\n", in_table_data_size);
+ edbg("SEP Driver:out_table_data_size is %x\n", out_table_data_size);
+
+ /* check where the data is smallest */
+ table_data_size = in_table_data_size;
+ if (table_data_size > out_table_data_size)
+ table_data_size = out_table_data_size;
+
+ /* now calculate the table size so that it will be module block size */
+ table_data_size = (table_data_size / block_size) * block_size;
+
+ edbg("SEP Driver:table_data_size is %x\n", table_data_size);
+
+ /* construct input lli table */
+ sep_build_lli_table(&lli_in_array[current_in_entry],
+ in_lli_table_ptr,
+ &current_in_entry,
+ &num_entries_in_table,
+ table_data_size);
+
+ /* construct output lli table */
+ sep_build_lli_table(&lli_out_array[current_out_entry],
+ out_lli_table_ptr,
+ &current_out_entry,
+ &num_entries_out_table,
+ table_data_size);
+
+ /* if info entry is null - this is the first table built */
+ if (info_in_entry_ptr == 0) {
+ /* set the output parameters to physical addresses */
+ *lli_table_in_ptr =
+ sep_shared_area_virt_to_bus(&sep_context, in_lli_table_ptr);
+
+ *in_num_entries_ptr = num_entries_in_table;
+
+ *lli_table_out_ptr =
+ sep_shared_area_virt_to_bus(&sep_context , out_lli_table_ptr);
+
+ *out_num_entries_ptr = num_entries_out_table;
+ *table_data_size_ptr = table_data_size;
+
+ edbg("SEP Driver:output lli_table_in_ptr is %08x\n",
+ (u32)*lli_table_in_ptr);
+ edbg("SEP Driver:output lli_table_out_ptr is %08x\n",
+ (u32)*lli_table_out_ptr);
+ } else {
+ /* update the info entry of the previous in table */
+ info_in_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(&sep_context,
+ in_lli_table_ptr);
+ info_in_entry_ptr->block_size =
+ ((num_entries_in_table) << 24) | (table_data_size);
+
+ /* update the info entry of the previous in table */
+ info_out_entry_ptr->bus_address =
+ sep_shared_area_virt_to_bus(&sep_context,
+ out_lli_table_ptr);
+ info_out_entry_ptr->block_size =
+ ((num_entries_out_table) << 24) | (table_data_size);
+ }
+
+ /* save the pointer to the info entry of the current tables */
+ info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+ info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
+
+ edbg("SEP Driver:output num_entries_out_table is %x\n",
+ (u32)num_entries_out_table);
+ edbg("SEP Driver:output info_in_entry_ptr is %x\n",
+ (u32)info_in_entry_ptr);
+ edbg("SEP Driver:output info_out_entry_ptr is %x\n",
+ (u32)info_out_entry_ptr);
+ }
+
+ /* print input tables */
+ sep_debug_print_lli_tables(
+ (struct sep_lli_entry_t *)
+ sep_shared_area_bus_to_virt(&sep_context , *lli_table_in_ptr),
+ *in_num_entries_ptr,
+ *table_data_size_ptr);
+
+ /* print output tables */
+ sep_debug_print_lli_tables(
+ (struct sep_lli_entry_t *)
+ sep_shared_area_bus_to_virt(&sep_context , *lli_table_out_ptr),
+ *out_num_entries_ptr,
+ *table_data_size_ptr);
+
+ dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
+
+ return 0;
+}
+
+
+/*
+ This function locks all the physical pages of the kernel virtual buffer
+ and construct a basic lli array, where each entry holds the physical
+ page address and the size that application data holds in this physical pages
+*/
+int sep_lock_kernel_pages(u32 kernel_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry_t **lli_array_ptr,
+ int in_out_flag)
+
+{
+ /* error */
+ int error;
+
+ /* array of lli */
+ struct sep_lli_entry_t *lli_array;
+
+ /* map array */
+ struct sep_dma_map *map_array;
+
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
+
+ error = 0;
+
+ edbg("SEP Driver: kernel_virt_addr is %08x\n", kernel_virt_addr);
+ edbg("SEP Driver: data_size is %x\n", data_size);
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * 1, GFP_ATOMIC);
+ if (!lli_array) {
+ edbg("SEP Driver: kmalloc for lli_array failed\n");
+
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ map_array = kmalloc(sizeof(struct sep_dma_map) * 1 , GFP_ATOMIC);
+ if (!map_array) {
+ edbg("SEP Driver: kmalloc for map_array failed\n");
+ error = -ENOMEM;
+ goto end_function_with_error;
+ }
+
+ map_array[0].dma_addr =
+ dma_map_single(sep_context.dev_ptr ,
+ (void *)kernel_virt_addr,
+ data_size,
+ DMA_BIDIRECTIONAL);
+ map_array[0].size = data_size;
+
+
+ /* set the start address of the first page - app data may start not at
+ the beginning of the page */
+ lli_array[0].bus_address = (u32)map_array[0].dma_addr;
+ lli_array[0].block_size = map_array[0].size;
+
+ /* debug print */
+ edbg("lli_array[0].bus_address is %08x, \
+ lli_array[0].block_size is %x\n",
+ lli_array[0].bus_address,
+ lli_array[0].block_size);
+
+ /* set the output parameters */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ sep_context.in_num_pages = 1;
+ sep_context.in_page_array = 0;
+ sep_context.in_map_array = map_array;
+ sep_context.in_map_num_entries = 1;
+ } else {
+ *lli_array_ptr = lli_array;
+ sep_context.out_num_pages = 1;
+ sep_context.out_page_array = 0;
+ sep_context.out_map_array = map_array;
+ sep_context.out_map_num_entries = 1;
+ }
+
+ goto end_function;
+
+end_function_with_error:
+
+ kfree(lli_array);
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
+
+ return 0;
+}
+
+/*
+ This function locks all the physical pages of the application virtual buffer
+ and construct a basic lli array, where each entry holds the physical page
+ address and the size that application data holds in this physical pages
+*/
+int sep_lock_user_pages(u32 app_virt_addr,
+ u32 data_size,
+ struct sep_lli_entry_t **lli_array_ptr,
+ int in_out_flag)
+
+{
+ /* error */
+ int error;
+
+ /* the the page of the end address of the user space buffer */
+ u32 end_page;
+
+ /* the page of the start address of the user space buffer */
+ u32 start_page;
+
+ /* the range in pages */
+ u32 num_pages;
+
+ /* array of pointers ot page */
+ struct page **page_array;
+
+ /* array of lli */
+ struct sep_lli_entry_t *lli_array;
+
+ /* map array */
+ struct sep_dma_map *map_array;
+
+ /* count */
+ u32 count;
+
+ /* result */
+ int result;
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_lock_user_pages start\n");
+
+ error = 0;
+
+ /* set start and end pages and num pages */
+ end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+ start_page = app_virt_addr >> PAGE_SHIFT;
+ num_pages = end_page - start_page + 1;
+
+ edbg("SEP Driver: app_virt_addr is %08x\n", app_virt_addr);
+ edbg("SEP Driver: data_size is %x\n", data_size);
+ edbg("SEP Driver: start_page is %x\n", start_page);
+ edbg("SEP Driver: end_page is %x\n", end_page);
+ edbg("SEP Driver: num_pages is %x\n", num_pages);
+
+ edbg("SEP Driver: starting page_array malloc\n");
+
+ /* allocate array of pages structure pointers */
+ page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
+ if (!page_array) {
+ edbg("SEP Driver: kmalloc for page_array failed\n");
+
+ error = -ENOMEM;
+ goto end_function;
+ }
+
+ map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
+ if (!map_array) {
+ edbg("SEP Driver: kmalloc for map_array failed\n");
+ error = -ENOMEM;
+ goto end_function_with_error1;
+ }
+
+ lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages,
+ GFP_ATOMIC);
+ if (!lli_array) {
+ edbg("SEP Driver: kmalloc for lli_array failed\n");
+
+ error = -ENOMEM;
+ goto end_function_with_error2;
+ }
+
+ edbg("SEP Driver: starting get_user_pages\n");
+
+ /* convert the application virtual address into a set of physical */
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current,
+ current->mm,
+ app_virt_addr,
+ num_pages,
+ 1,
+ 0,
+ page_array,
+ 0);
+ up_read(&current->mm->mmap_sem);
+
+ /* check the number of pages locked - if not all then exit with error */
+ if (result != num_pages) {
+ edbg("SEP Driver: not all pages locked by get_user_pages\n");
+
+ error = -ENOMEM;
+ goto end_function_with_error3;
+ }
+
+ edbg("SEP Driver: get_user_pages succeeded\n");
+
+ /* fill the array using page array data and map the pages - this action
+ will also flush the cache as needed */
+ for (count = 0; count < num_pages; count++) {
+ /* fill the map array */
+ map_array[count].dma_addr =
+ dma_map_page(sep_context.dev_ptr, page_array[count],
+ 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ map_array[count].size = PAGE_SIZE;
+
+ /* fill the lli array entry */
+ lli_array[count].bus_address = (u32)map_array[count].dma_addr;
+ lli_array[count].block_size = PAGE_SIZE;
+
+ edbg("lli_array[%x].bus_address is %08x, \
+ lli_array[%x].block_size is %x\n",
+ count, lli_array[count].bus_address,
+ count,
+ lli_array[count].block_size);
+ }
+
+ /* check the offset for the first page -
+ data may start not at the beginning of the page */
+ lli_array[0].bus_address =
+ lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+ /* check that not all the data is in the first page only */
+ if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+ lli_array[0].block_size = data_size;
+ else
+ lli_array[0].block_size =
+ PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+ edbg("lli_array[0].bus_address is %08x, \
+ lli_array[0].block_size is %x\n",
+ lli_array[count].bus_address,
+ lli_array[count].block_size);
+
+ /* check the size of the last page */
+ if (num_pages > 1) {
+ lli_array[num_pages - 1].block_size =
+ (app_virt_addr + data_size) & (~PAGE_MASK);
+ edbg("lli_array[%x].bus_address is %08x, \
+ lli_array[%x].block_size is %x\n",
+ num_pages - 1, lli_array[count].bus_address,
+ num_pages - 1,
+ lli_array[count].block_size);
+ }
+
+ /* set output params acording to the in_out flag */
+ if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+ *lli_array_ptr = lli_array;
+ sep_context.in_num_pages = num_pages;
+ sep_context.in_page_array = page_array;
+ sep_context.in_map_array = map_array;
+ sep_context.in_map_num_entries = num_pages;
+ } else {
+ *lli_array_ptr = lli_array;
+ sep_context.out_num_pages = num_pages;
+ sep_context.out_page_array = page_array;
+ sep_context.out_map_array = map_array;
+ sep_context.out_map_num_entries = num_pages;
+ }
+
+ goto end_function;
+
+end_function_with_error3:
+
+ /* free lli array */
+ kfree(lli_array);
+
+end_function_with_error2:
+
+ kfree(map_array);
+
+end_function_with_error1:
+
+ /* free page array */
+ kfree(page_array);
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
+
+ return 0;
+}
+
+/*
+ This function prepares only input DMA table for synhronic symmetric
+ operations (HASH)
+*/
+int sep_prepare_input_dma_table(unsigned long app_virt_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_ptr,
+ u32 *num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool isKernelVirtualAddress)
+
+{
+ /* pointer to the info entry of the table - the last entry */
+ struct sep_lli_entry_t *info_entry_ptr;
+
+ /* array of pointers ot page */
+ struct sep_lli_entry_t *lli_array_ptr;
+
+ /* points to the first entry to be processed in the lli_in_array */
+ u32 current_entry;
+
+ /* num entries in the virtual buffer */
+ u32 sep_lli_entries;
+
+ /* lli table pointer */
+ struct sep_lli_entry_t *in_lli_table_ptr;
+
+ /* the total data in one table */
+ u32 table_data_size;
+
+ /* number of entries in lli table */
+ u32 num_entries_in_table;
+
+ /* next table address */
+ u32 lli_table_alloc_addr;
+
+ /* result */
+ u32 result;
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
+
+ edbg("SEP Driver:data_size is %x\n", data_size);
+ edbg("SEP Driver:block_size is %x\n", block_size);
+
+ /* initialize the pages pointers */
+ sep_context.in_page_array = 0;
+ sep_context.in_num_pages = 0;
+
+ /* set the kernel address for first table to be allocated */
+ lli_table_alloc_addr = (u32)(sep_context.shared_area_virt_addr +
+ SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES +
+ sep_context.num_lli_tables_created *
+ sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+ if (data_size == 0) {
+
+ /* special case - create meptu table - 2 entries, zero data */
+ sep_prepare_empty_lli_table(lli_table_ptr,
+ num_entries_ptr,
+ table_data_size_ptr);
+
+ goto end_function;
+ }
+
+ /* check if the pages are in Kernel Virtual Address layout */
+ if (isKernelVirtualAddress == true)
+ /* lock the pages of the kernel buffer and translate them to pages */
+ result = sep_lock_kernel_pages(app_virt_addr,
+ data_size,
+ &lli_array_ptr,
+ SEP_DRIVER_IN_FLAG);
+ else
+ /* lock the pages of the user buffer and translate them to pages */
+ result = sep_lock_user_pages(app_virt_addr,
+ data_size,
+ &lli_array_ptr,
+ SEP_DRIVER_IN_FLAG);
+
+ if (result)
+ return result;
+
+ edbg("SEP Driver:output sep_in_num_pages is %x\n",
+ sep_context.in_num_pages);
+
+ current_entry = 0;
+ info_entry_ptr = 0;
+ sep_lli_entries = sep_context.in_num_pages;
+
+ /* loop till all the entries in in array are not processed */
+ while (current_entry < sep_lli_entries) {
+ /* set the new input and output tables */
+ in_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
+
+ lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+ /* update the number of created tables */
+ sep_context.num_lli_tables_created++;
+
+ /* calculate the maximum size of data for input table */
+ table_data_size = sep_calculate_lli_table_max_size(
+ &lli_array_ptr[current_entry],
+ (sep_lli_entries - current_entry));
+
+ /* now calculate the table size so that it will be module block size */
+ table_data_size = (table_data_size / block_size) * block_size;
+
+ edbg("SEP Driver:output table_data_size is %x\n", table_data_size);
+
+ /* construct input lli table */
+ sep_build_lli_table(&lli_array_ptr[current_entry],
+ in_lli_table_ptr,
+ &current_entry,
+ &num_entries_in_table,
+ table_data_size);
+
+ if (info_entry_ptr == 0) {
+ /* set the output parameters to physical addresses */
+ *lli_table_ptr = sep_shared_area_virt_to_bus(&sep_context,
+ in_lli_table_ptr);
+ *num_entries_ptr = num_entries_in_table;
+ *table_data_size_ptr = table_data_size;
+
+ edbg("SEP Driver:output lli_table_in_ptr is %08x\n",
+ (u32)*lli_table_ptr);
+ } else {
+ /* update the info entry of the previous in table */
+ info_entry_ptr->bus_address = sep_shared_area_virt_to_bus(&sep_context,
+ in_lli_table_ptr);
+ info_entry_ptr->block_size = ((num_entries_in_table) << 24) |
+ (table_data_size);
+ }
+
+ /* save the pointer to the info entry of the current tables */
+ info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+ }
+
+ /* print input tables */
+ sep_debug_print_lli_tables((struct sep_lli_entry_t *)
+ sep_shared_area_bus_to_virt(&sep_context, *lli_table_ptr),
+ *num_entries_ptr,
+ *table_data_size_ptr);
+
+ /* the array of the pages */
+ kfree(lli_array_ptr);
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
+
+ return 0;
+
+}
+
+/*
+ This function builds input and output DMA tables for synhronic
+ symmetric operations (AES, DES, HASH). It also checks that each table
+ is of the modular block size
+*/
+int sep_prepare_input_output_dma_table(unsigned long app_virt_in_addr,
+ unsigned long app_virt_out_addr,
+ u32 data_size,
+ u32 block_size,
+ dma_addr_t *lli_table_in_ptr,
+ dma_addr_t *lli_table_out_ptr,
+ u32 *in_num_entries_ptr,
+ u32 *out_num_entries_ptr,
+ u32 *table_data_size_ptr,
+ bool isKernelVirtualAddress)
+
+{
+ /* array of pointers of page */
+ struct sep_lli_entry_t *lli_in_array;
+
+ /* array of pointers of page */
+ struct sep_lli_entry_t *lli_out_array;
+
+ /* result */
+ int result;
+
+
+ /*------------------------
+ CODE
+ --------------------------*/
+
+ dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
+
+ result = 0;
+
+ if (data_size == 0) {
+ /* prepare empty table for input and output */
+ sep_prepare_empty_lli_table(lli_table_in_ptr,
+ in_num_entries_ptr,
+ table_data_size_ptr);
+
+
+ sep_prepare_empty_lli_table(lli_table_out_ptr,
+ out_num_entries_ptr,
+ table_data_size_ptr);
+
+ goto end_function;
+ }
+
+ /* initialize the pages pointers */
+ sep_context.in_page_array = 0;
+ sep_context.out_page_array = 0;
+
+ /* check if the pages are in Kernel Virtual Address layout */
+ if (isKernelVirtualAddress == true) {
+ /* lock the pages of the kernel buffer and
+ translate them to pages */
+ result = sep_lock_kernel_pages(app_virt_in_addr,
+ data_size,
+ &lli_in_array,
+ SEP_DRIVER_IN_FLAG);
+ if (result) {
+ edbg("SEP Driver: sep_lock_kernel_pages for input \
+ virtual buffer failed\n");
+ goto end_function;
+ }
+ } else {
+ /* lock the pages of the user buffer and
+ translate them to pages */
+ result = sep_lock_user_pages(app_virt_in_addr,
+ data_size,
+ &lli_in_array,
+ SEP_DRIVER_IN_FLAG);
+ if (result) {
+ edbg("SEP Driver: sep_lock_user_pages for input virtual\
+ buffer failed\n");
+ goto end_function;
+ }
+ }
+
+ if (isKernelVirtualAddress == true) {
+ result = sep_lock_kernel_pages(app_virt_out_addr,
+ data_size,
+ &lli_out_array,
+ SEP_DRIVER_OUT_FLAG);
+ if (result) {
+ edbg("SEP Driver: sep_lock_kernel_pages \
+ for output virtual buffer failed\n");
+ goto end_function_with_error1;
+ }
+ } else {
+ result = sep_lock_user_pages(app_virt_out_addr,
+ data_size,
+ &lli_out_array,
+ SEP_DRIVER_OUT_FLAG);
+ if (result) {
+ edbg("SEP Driver: sep_lock_user_pages \
+ for output virtual buffer failed\n");
+ goto end_function_with_error1;
+ }
+ }
+
+
+ edbg("sep_in_num_pages is %x\n", sep_context.in_num_pages);
+ edbg("sep_out_num_pages is %x\n", sep_context.out_num_pages);
+ edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
+ SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+
+ /* call the fucntion that creates table from the lli arrays */
+ result = sep_construct_dma_tables_from_lli(lli_in_array,
+ sep_context.in_num_pages,
+ lli_out_array,
+ sep_context.out_num_pages,
+ block_size,
+ lli_table_in_ptr,
+ lli_table_out_ptr,
+ in_num_entries_ptr,
+ out_num_entries_ptr,
+ table_data_size_ptr);
+ if (result) {
+ edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
+ goto end_function_with_error2;
+ }
+
+ /* fall through - free the lli entry arrays */
+
+ edbg("in_num_entries_ptr is %08x\n", *in_num_entries_ptr);
+ edbg("out_num_entries_ptr is %08x\n", *out_num_entries_ptr);
+ edbg("table_data_size_ptr is %08x\n", *table_data_size_ptr);
+
+
+end_function_with_error2:
+
+ kfree(lli_out_array);
+
+end_function_with_error1:
+
+ kfree(lli_in_array);
+
+end_function:
+
+ dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table\
+ end result = %d\n", (int)result);
+
+ return result;
+
+}
+
+
+/* this function frees all preallocated dma resources */
+int sep_free_dma_table_data_handler(void)
+{
+ int count;
+
+ /*-------------------------
+ CODE
+ -----------------------------*/
+
+ dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
+
+ /* free input map array */
+ if (sep_context.in_map_array) {
+ for (count = 0; count < sep_context.in_num_pages; count++) {
+ dma_unmap_page(sep_context.dev_ptr ,
+ sep_context.in_map_array[count].dma_addr,
+ sep_context.in_map_array[count].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ kfree(sep_context.in_map_array);
+ }
+
+ /* free output map array */
+ if (sep_context.out_map_array) {
+ for (count = 0; count < sep_context.out_num_pages; count++) {
+ dma_unmap_page(sep_context.dev_ptr ,
+ sep_context.out_map_array[count].dma_addr,
+ sep_context.out_map_array[count].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ kfree(sep_context.out_map_array);
+ }
+
+ /* free page cache for output */
+ if (sep_context.in_page_array) {
+ for (count = 0; count < sep_context.in_num_pages; count++)
+ page_cache_release(sep_context.in_page_array[count]);
+
+ kfree(sep_context.in_page_array);
+
+ }
+
+ if (sep_context.out_page_array) {
+ for (count = 0; count < sep_context.out_num_pages; count++) {
+ if (!PageReserved(sep_context.out_page_array[count]))
+ SetPageDirty(sep_context.out_page_array[count]);
+ page_cache_release(sep_context.out_page_array[count]);
+ }
+
+ kfree(sep_context.out_page_array);
+ }
+
+ /* reset all the values */
+ sep_context.in_page_array = 0;
+ sep_context.out_page_array = 0;
+ sep_context.in_num_pages = 0;
+ sep_context.out_num_pages = 0;
+ sep_context.num_lli_tables_created = 0;
+ sep_context.in_map_array = 0;
+ sep_context.out_map_array = 0;
+ sep_context.in_map_num_entries = 0;
+ sep_context.out_map_num_entries = 0;
+
+
+ dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
+
+ return 0;
+}
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+MODULE_LICENSE("GPL");
--
1.6.0.4
\
 
 \ /
  Last update: 2010-04-29 01:19    [W:0.459 / U:0.272 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site