lkml.org 
[lkml]   [2009]   [Jan]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 04/16] dma-debug: add allocator code
Date
Impact: add allocator code for struct dma_debug_entry

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
lib/dma-debug.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 57 insertions(+), 0 deletions(-)
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index a90f9c4..c9e44e8 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -50,6 +50,16 @@ struct hash_bucket {

/* Hash list to save the allocated dma addresses */
static struct hash_bucket dma_entry_hash[HASH_SIZE];
+/* List of pre-allocated dma_debug_entry's */
+static LIST_HEAD(free_entries);
+/* Lock for the list above */
+static DEFINE_SPINLOCK(free_entries_lock);
+
+/* Global disable flag - will be set in case of an error */
+static bool global_disable __read_mostly;
+
+static u32 num_free_entries;
+static u32 min_free_entries;

/*
* Hash related functions
@@ -139,3 +149,50 @@ static void add_dma_entry(struct dma_debug_entry *entry)
put_hash_bucket(bucket, &flags);
}

+/* struct dma_entry allocator
+ *
+ * The next two functions implement the allocator for
+ * struct dma_debug_entries.
+ */
+static struct dma_debug_entry *dma_entry_alloc(void)
+{
+ struct dma_debug_entry *entry = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&free_entries_lock, flags);
+
+ if (list_empty(&free_entries)) {
+ printk(KERN_ERR "DMA-API: debugging out of memory "
+ "- disabling\n");
+ global_disable = true;
+ goto out;
+ }
+
+ entry = list_entry(free_entries.next, struct dma_debug_entry, list);
+ list_del(&entry->list);
+ memset(entry, 0, sizeof(*entry));
+
+ num_free_entries -= 1;
+ if (num_free_entries < min_free_entries)
+ min_free_entries = num_free_entries;
+
+out:
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+
+ return entry;
+}
+
+static void dma_entry_free(struct dma_debug_entry *entry)
+{
+ unsigned long flags;
+
+ /*
+ * add to beginning of the list - this way the entries are
+ * more likely cache hot when they are reallocated.
+ */
+ spin_lock_irqsave(&free_entries_lock, flags);
+ list_add(&entry->list, &free_entries);
+ num_free_entries += 1;
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+}
+
--
1.5.6.4



\
 
 \ /
  Last update: 2009-01-19 15:57    [from the cache]
©2003-2014 Jasper Spaans. Advertise on this site