]> err.no Git - linux-2.6/blobdiff - drivers/net/ehea/ehea_qmr.c
net/fs_enet: remove redundant messages for performance
[linux-2.6] / drivers / net / ehea / ehea_qmr.c
index 329a25248d75c13aa7a6d5ca348675dd6c08bddc..140f05baafd822b97946e183e9f221cf602a8d41 100644 (file)
 #include "ehea_phyp.h"
 #include "ehea_qmr.h"
 
+struct ehea_bmap *ehea_bmap = NULL;
 
-struct ehea_busmap ehea_bmap = { 0, 0, NULL };
-extern u64 ehea_driver_flags;
-extern struct workqueue_struct *ehea_driver_wq;
-extern struct work_struct ehea_rereg_mr_task;
 
 
 static void *hw_qpageit_get_inc(struct hw_queue *queue)
@@ -66,7 +63,7 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
        }
 
        queue->queue_length = nr_of_pages * pagesize;
-       queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+       queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
        if (!queue->queue_pages) {
                ehea_error("no mem for queue_pages");
                return -ENOMEM;
@@ -79,11 +76,11 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
         */
        i = 0;
        while (i < nr_of_pages) {
-               u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+               u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
                if (!kpage)
                        goto out_nomem;
                for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
-                       (queue->queue_pages)[i] = (struct ehea_page*)kpage;
+                       (queue->queue_pages)[i] = (struct ehea_page *)kpage;
                        kpage += pagesize;
                        i++;
                }
@@ -236,8 +233,8 @@ int ehea_destroy_cq(struct ehea_cq *cq)
                return 0;
 
        hcp_epas_dtor(&cq->epas);
-
-       if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) {
+       hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
+       if (hret == H_R_STATE) {
                ehea_error_data(cq->adapter, cq->fw_handle);
                hret = ehea_destroy_cq_res(cq, FORCE_FREE);
        }
@@ -302,13 +299,13 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
                if (i == (eq->attr.nr_pages - 1)) {
                        /* last page */
                        vpage = hw_qpageit_get_inc(&eq->hw_queue);
-                       if ((hret != H_SUCCESS) || (vpage)) {
+                       if ((hret != H_SUCCESS) || (vpage))
                                goto out_kill_hwq;
-                       }
+
                } else {
-                       if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+                       if ((hret != H_PAGE_REGISTERED) || (!vpage))
                                goto out_kill_hwq;
-                       }
+
                }
        }
 
@@ -332,7 +329,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
        unsigned long flags;
 
        spin_lock_irqsave(&eq->spinlock, flags);
-       eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+       eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
        spin_unlock_irqrestore(&eq->spinlock, flags);
 
        return eqe;
@@ -365,7 +362,8 @@ int ehea_destroy_eq(struct ehea_eq *eq)
 
        hcp_epas_dtor(&eq->epas);
 
-       if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) {
+       hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
+       if (hret == H_R_STATE) {
                ehea_error_data(eq->adapter, eq->fw_handle);
                hret = ehea_destroy_eq_res(eq, FORCE_FREE);
        }
@@ -547,7 +545,8 @@ int ehea_destroy_qp(struct ehea_qp *qp)
 
        hcp_epas_dtor(&qp->epas);
 
-       if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
+       hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
+       if (hret == H_R_STATE) {
                ehea_error_data(qp->adapter, qp->fw_handle);
                hret = ehea_destroy_qp_res(qp, FORCE_FREE);
        }
@@ -560,125 +559,253 @@ int ehea_destroy_qp(struct ehea_qp *qp)
        return 0;
 }
 
-int ehea_create_busmap( void )
+static inline int ehea_calc_index(unsigned long i, unsigned long s)
 {
-       u64 vaddr = EHEA_BUSMAP_START;
-       unsigned long high_section_index = 0;
-       int i;
+       return (i >> s) & EHEA_INDEX_MASK;
+}
 
-       /*
-        * Sections are not in ascending order -> Loop over all sections and
-        * find the highest PFN to compute the required map size.
-       */
-       ehea_bmap.valid_sections = 0;
+static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
+                                    int dir)
+{
+       if(!ehea_top_bmap->dir[dir]) {
+               ehea_top_bmap->dir[dir] =
+                       kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
+               if (!ehea_top_bmap->dir[dir])
+                       return -ENOMEM;
+       }
+       return 0;
+}
 
-       for (i = 0; i < NR_MEM_SECTIONS; i++)
-               if (valid_section_nr(i))
-                       high_section_index = i;
+static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
+{
+       if(!ehea_bmap->top[top]) {
+               ehea_bmap->top[top] =
+                       kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
+               if (!ehea_bmap->top[top])
+                       return -ENOMEM;
+       }
+       return ehea_init_top_bmap(ehea_bmap->top[top], dir);
+}
 
-       ehea_bmap.entries = high_section_index + 1;
-       ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
+static int ehea_create_busmap_callback(unsigned long pfn,
+                                      unsigned long nr_pages, void *arg)
+{
+       unsigned long i, mr_len, start_section, end_section;
+       start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
+       end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
+       mr_len = *(unsigned long *)arg;
 
-       if (!ehea_bmap.vaddr)
+       ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
+       if (!ehea_bmap)
                return -ENOMEM;
 
-       for (i = 0 ; i < ehea_bmap.entries; i++) {
-               unsigned long pfn = section_nr_to_pfn(i);
+       for (i = start_section; i < end_section; i++) {
+               int ret;
+               int top, dir, idx;
+               u64 vaddr;
+
+               top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
+               dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
+
+               ret = ehea_init_bmap(ehea_bmap, top, dir);
+               if(ret)
+                       return ret;
+
+               idx = i & EHEA_INDEX_MASK;
+               vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE;
 
-               if (pfn_valid(pfn)) {
-                       ehea_bmap.vaddr[i] = vaddr;
-                       vaddr += EHEA_SECTSIZE;
-                       ehea_bmap.valid_sections++;
-               } else
-                       ehea_bmap.vaddr[i] = 0;
+               ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr;
        }
 
+       mr_len += nr_pages * PAGE_SIZE;
+       *(unsigned long *)arg = mr_len;
+
        return 0;
 }
 
-void ehea_destroy_busmap( void )
+static unsigned long ehea_mr_len;
+
+static DEFINE_MUTEX(ehea_busmap_mutex);
+
+int ehea_create_busmap(void)
+{
+       int ret;
+       mutex_lock(&ehea_busmap_mutex);
+       ehea_mr_len = 0;
+       ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len,
+                                  ehea_create_busmap_callback);
+       mutex_unlock(&ehea_busmap_mutex);
+       return ret;
+}
+
+void ehea_destroy_busmap(void)
 {
-       vfree(ehea_bmap.vaddr);
+       int top, dir;
+       mutex_lock(&ehea_busmap_mutex);
+       if (!ehea_bmap)
+               goto out_destroy;
+
+       for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+               if (!ehea_bmap->top[top])
+                       continue;
+
+               for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+                       if (!ehea_bmap->top[top]->dir[dir])
+                               continue;
+
+                       kfree(ehea_bmap->top[top]->dir[dir]);
+               }
+
+               kfree(ehea_bmap->top[top]);
+       }
+
+       kfree(ehea_bmap);
+       ehea_bmap = NULL;
+out_destroy:   
+       mutex_unlock(&ehea_busmap_mutex);
 }
 
 u64 ehea_map_vaddr(void *caddr)
 {
-       u64 mapped_addr;
-       unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
-
-       if (likely(index < ehea_bmap.entries)) {
-               mapped_addr = ehea_bmap.vaddr[index];
-               if (likely(mapped_addr))
-                       mapped_addr |= (((unsigned long)caddr)
-                                       & (EHEA_SECTSIZE - 1));
-               else
-                       mapped_addr = -1;
-       } else
-               mapped_addr = -1;
-
-       if (unlikely(mapped_addr == -1))
-               if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
-                       queue_work(ehea_driver_wq, &ehea_rereg_mr_task);
-
-       return mapped_addr;
+       int top, dir, idx;
+       unsigned long index, offset;
+
+       if (!ehea_bmap)
+               return EHEA_INVAL_ADDR;
+
+       index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
+       top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
+       if (!ehea_bmap->top[top])
+               return EHEA_INVAL_ADDR;
+
+       dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
+       if (!ehea_bmap->top[top]->dir[dir])
+               return EHEA_INVAL_ADDR;
+
+       idx = index & EHEA_INDEX_MASK;
+       if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
+               return EHEA_INVAL_ADDR;
+
+       offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
+       return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
+}
+
+static inline void *ehea_calc_sectbase(int top, int dir, int idx)
+{
+       unsigned long ret = idx;
+       ret |= dir << EHEA_DIR_INDEX_SHIFT;
+       ret |= top << EHEA_TOP_INDEX_SHIFT;
+       return abs_to_virt(ret << SECTION_SIZE_BITS);
+}
+
+static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
+                              struct ehea_adapter *adapter,
+                              struct ehea_mr *mr)
+{
+       void *pg;
+       u64 j, m, hret;
+       unsigned long k = 0;
+       u64 pt_abs = virt_to_abs(pt);
+
+       void *sectbase = ehea_calc_sectbase(top, dir, idx);
+
+       for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
+
+               for (m = 0; m < EHEA_MAX_RPAGE; m++) {
+                       pg = sectbase + ((k++) * EHEA_PAGESIZE);
+                       pt[m] = virt_to_abs(pg);
+               }
+               hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
+                                               0, pt_abs, EHEA_MAX_RPAGE);
+
+               if ((hret != H_SUCCESS)
+                   && (hret != H_PAGE_REGISTERED)) {
+                       ehea_h_free_resource(adapter->handle, mr->handle,
+                                            FORCE_FREE);
+                       ehea_error("register_rpage_mr failed");
+                       return hret;
+               }
+       }
+       return hret;
+}
+
+static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
+                               struct ehea_adapter *adapter,
+                               struct ehea_mr *mr)
+{
+       u64 hret = H_SUCCESS;
+       int idx;
+
+       for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
+               if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
+                       continue;
+               
+               hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
+               if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+                               return hret;
+       }
+       return hret;
+}
+
+static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
+                                   struct ehea_adapter *adapter,
+                                   struct ehea_mr *mr)
+{
+       u64 hret = H_SUCCESS;
+       int dir;
+
+       for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+               if (!ehea_bmap->top[top]->dir[dir])
+                       continue;
+
+               hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
+               if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+                               return hret;
+       }
+       return hret;
 }
 
 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
 {
        int ret;
        u64 *pt;
-       void *pg;
-       u64 hret, pt_abs, i, j, m, mr_len;
+       u64 hret;
        u32 acc_ctrl = EHEA_MR_ACC_CTRL;
 
-       mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
+       unsigned long top;
 
-       pt =  kzalloc(PAGE_SIZE, GFP_KERNEL);
+       pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
        if (!pt) {
                ehea_error("no mem");
                ret = -ENOMEM;
                goto out;
        }
-       pt_abs = virt_to_abs(pt);
 
-       hret = ehea_h_alloc_resource_mr(adapter->handle,
-                                       EHEA_BUSMAP_START, mr_len,
-                                       acc_ctrl, adapter->pd,
+       hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
+                                       ehea_mr_len, acc_ctrl, adapter->pd,
                                        &mr->handle, &mr->lkey);
+
        if (hret != H_SUCCESS) {
                ehea_error("alloc_resource_mr failed");
                ret = -EIO;
                goto out;
        }
 
-       for (i = 0 ; i < ehea_bmap.entries; i++)
-               if (ehea_bmap.vaddr[i]) {
-                       void *sectbase = __va(i << SECTION_SIZE_BITS);
-                       unsigned long k = 0;
-
-                       for (j = 0; j < (EHEA_PAGES_PER_SECTION /
-                                        EHEA_MAX_RPAGE); j++) {
-
-                               for (m = 0; m < EHEA_MAX_RPAGE; m++) {
-                                       pg = sectbase + ((k++) * EHEA_PAGESIZE);
-                                       pt[m] = virt_to_abs(pg);
-                               }
-
-                               hret = ehea_h_register_rpage_mr(adapter->handle,
-                                                               mr->handle,
-                                                               0, 0, pt_abs,
-                                                               EHEA_MAX_RPAGE);
-                               if ((hret != H_SUCCESS)
-                                   && (hret != H_PAGE_REGISTERED)) {
-                                       ehea_h_free_resource(adapter->handle,
-                                                            mr->handle,
-                                                            FORCE_FREE);
-                                       ehea_error("register_rpage_mr failed");
-                                       ret = -EIO;
-                                       goto out;
-                               }
-                       }
-               }
+       if (!ehea_bmap) {
+               ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+               ehea_error("no busmap available");
+               ret = -EIO;
+               goto out;
+       }
+
+       for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+               if (!ehea_bmap->top[top])
+                       continue;
+
+               hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
+               if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
+                       break;
+       }
 
        if (hret != H_SUCCESS) {
                ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);