struct ehea_busmap ehea_bmap = { 0, 0, NULL };
-extern u64 ehea_driver_flags;
-extern struct work_struct ehea_rereg_mr_task;
static void *hw_qpageit_get_inc(struct hw_queue *queue)
}
queue->queue_length = nr_of_pages * pagesize;
- queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+ queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
ehea_error("no mem for queue_pages");
return -ENOMEM;
*/
i = 0;
while (i < nr_of_pages) {
- u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+ u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
if (!kpage)
goto out_nomem;
for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
- (queue->queue_pages)[i] = (struct ehea_page*)kpage;
+ (queue->queue_pages)[i] = (struct ehea_page *)kpage;
kpage += pagesize;
i++;
}
return 0;
hcp_epas_dtor(&cq->epas);
-
- if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(cq->adapter, cq->fw_handle);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
}
if (i == (eq->attr.nr_pages - 1)) {
/* last page */
vpage = hw_qpageit_get_inc(&eq->hw_queue);
- if ((hret != H_SUCCESS) || (vpage)) {
+ if ((hret != H_SUCCESS) || (vpage))
goto out_kill_hwq;
- }
+
} else {
- if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+ if ((hret != H_PAGE_REGISTERED) || (!vpage))
goto out_kill_hwq;
- }
+
}
}
unsigned long flags;
spin_lock_irqsave(&eq->spinlock, flags);
- eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+ eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
spin_unlock_irqrestore(&eq->spinlock, flags);
return eqe;
hcp_epas_dtor(&eq->epas);
- if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(eq->adapter, eq->fw_handle);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
}
hcp_epas_dtor(&qp->epas);
- if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(qp->adapter, qp->fw_handle);
hret = ehea_destroy_qp_res(qp, FORCE_FREE);
}
return 0;
}
-int ehea_create_busmap( void )
+int ehea_create_busmap(void)
{
u64 vaddr = EHEA_BUSMAP_START;
unsigned long high_section_index = 0;
return 0;
}
-void ehea_destroy_busmap( void )
+void ehea_destroy_busmap(void)
{
vfree(ehea_bmap.vaddr);
}
#define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
-#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
-#error eHEA module can't work if kernel sectionsize < ehea sectionsize
+#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
+#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
#endif
/* Some abbreviations used here:
u64 entry;
};
-#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63)
-#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7)
+#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
{
static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
- u32 qe = *(u8*)retvalue;
+ u32 qe = *(u8 *)retvalue;
if ((qe >> 7) == (queue->toggle_state & 1))
hw_qeit_eq_get_inc(queue);
else
int ehea_destroy_cq(struct ehea_cq *cq);
-struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
struct ehea_qp_init_attr *init_attr);
int ehea_destroy_qp(struct ehea_qp *qp);
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
-int ehea_create_busmap( void );
-void ehea_destroy_busmap( void );
+int ehea_create_busmap(void);
+void ehea_destroy_busmap(void);
u64 ehea_map_vaddr(void *caddr);
#endif /* __EHEA_QMR_H__ */