2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Completion queue handling
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include "ehca_iverbs.h"
47 #include "ehca_classes.h"
51 static struct kmem_cache *cq_cache;
53 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
55 unsigned int qp_num = qp->real_qp_num;
56 unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
59 spin_lock_irqsave(&cq->spinlock, flags);
60 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
61 spin_unlock_irqrestore(&cq->spinlock, flags);
63 ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
64 cq->cq_number, qp_num);
69 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
72 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
73 struct hlist_node *iter;
77 spin_lock_irqsave(&cq->spinlock, flags);
78 hlist_for_each(iter, &cq->qp_hashtab[key]) {
79 qp = hlist_entry(iter, struct ehca_qp, list_entries);
80 if (qp->real_qp_num == real_qp_num) {
82 ehca_dbg(cq->ib_cq.device,
83 "removed qp from cq .cq_num=%x real_qp_num=%x",
84 cq->cq_number, real_qp_num);
89 spin_unlock_irqrestore(&cq->spinlock, flags);
91 ehca_err(cq->ib_cq.device,
92 "qp not found cq_num=%x real_qp_num=%x",
93 cq->cq_number, real_qp_num);
98 struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
100 struct ehca_qp *ret = NULL;
101 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
102 struct hlist_node *iter;
104 hlist_for_each(iter, &cq->qp_hashtab[key]) {
105 qp = hlist_entry(iter, struct ehca_qp, list_entries);
106 if (qp->real_qp_num == real_qp_num) {
114 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
115 struct ib_ucontext *context,
116 struct ib_udata *udata)
118 static const u32 additional_cqe = 20;
120 struct ehca_cq *my_cq;
121 struct ehca_shca *shca =
122 container_of(device, struct ehca_shca, ib_device);
123 struct ipz_adapter_handle adapter_handle;
124 struct ehca_alloc_cq_parms param; /* h_call's out parameters */
128 u64 rpage, cqx_fec, h_ret;
132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
133 return ERR_PTR(-EINVAL);
135 my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
137 ehca_err(device, "Out of memory for ehca_cq struct device=%p",
139 return ERR_PTR(-ENOMEM);
142 memset(¶m, 0, sizeof(struct ehca_alloc_cq_parms));
144 spin_lock_init(&my_cq->spinlock);
145 spin_lock_init(&my_cq->cb_lock);
146 spin_lock_init(&my_cq->task_lock);
147 atomic_set(&my_cq->nr_events, 0);
148 init_waitqueue_head(&my_cq->wait_completion);
152 adapter_handle = shca->ipz_hca_handle;
153 param.eq_handle = shca->eq.ipz_eq_handle;
156 if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
157 cq = ERR_PTR(-ENOMEM);
158 ehca_err(device, "Can't reserve idr nr. device=%p",
160 goto create_cq_exit1;
163 write_lock_irqsave(&ehca_cq_idr_lock, flags);
164 ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
165 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
166 } while (ret == -EAGAIN);
169 cq = ERR_PTR(-ENOMEM);
170 ehca_err(device, "Can't allocate new idr entry. device=%p",
172 goto create_cq_exit1;
175 if (my_cq->token > 0x1FFFFFF) {
176 cq = ERR_PTR(-ENOMEM);
177 ehca_err(device, "Invalid number of cq. device=%p", device);
178 goto create_cq_exit2;
182 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
183 * for receiving errors CQEs.
185 param.nr_cqe = cqe + additional_cqe;
186 h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, ¶m);
188 if (h_ret != H_SUCCESS) {
189 ehca_err(device, "hipz_h_alloc_resource_cq() failed "
190 "h_ret=%li device=%p", h_ret, device);
191 cq = ERR_PTR(ehca2ib_return_code(h_ret));
192 goto create_cq_exit2;
195 ipz_rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
196 EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
198 ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
200 cq = ERR_PTR(-EINVAL);
201 goto create_cq_exit3;
204 for (counter = 0; counter < param.act_pages; counter++) {
205 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
207 ehca_err(device, "ipz_qpageit_get_inc() "
208 "returns NULL device=%p", device);
209 cq = ERR_PTR(-EAGAIN);
210 goto create_cq_exit4;
212 rpage = virt_to_abs(vpage);
214 h_ret = hipz_h_register_rpage_cq(adapter_handle,
215 my_cq->ipz_cq_handle,
224 if (h_ret < H_SUCCESS) {
225 ehca_err(device, "hipz_h_register_rpage_cq() failed "
226 "ehca_cq=%p cq_num=%x h_ret=%li counter=%i "
227 "act_pages=%i", my_cq, my_cq->cq_number,
228 h_ret, counter, param.act_pages);
229 cq = ERR_PTR(-EINVAL);
230 goto create_cq_exit4;
233 if (counter == (param.act_pages - 1)) {
234 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
235 if ((h_ret != H_SUCCESS) || vpage) {
236 ehca_err(device, "Registration of pages not "
237 "complete ehca_cq=%p cq_num=%x "
238 "h_ret=%li", my_cq, my_cq->cq_number,
240 cq = ERR_PTR(-EAGAIN);
241 goto create_cq_exit4;
244 if (h_ret != H_PAGE_REGISTERED) {
245 ehca_err(device, "Registration of page failed "
246 "ehca_cq=%p cq_num=%x h_ret=%li "
247 "counter=%i act_pages=%i",
248 my_cq, my_cq->cq_number,
249 h_ret, counter, param.act_pages);
250 cq = ERR_PTR(-ENOMEM);
251 goto create_cq_exit4;
256 ipz_qeit_reset(&my_cq->ipz_queue);
258 gal = my_cq->galpas.kernel;
259 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
260 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
261 my_cq, my_cq->cq_number, cqx_fec);
263 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
264 param.act_nr_of_entries - additional_cqe;
265 my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
267 for (i = 0; i < QP_HASHTAB_LEN; i++)
268 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
271 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
272 struct ehca_create_cq_resp resp;
273 memset(&resp, 0, sizeof(resp));
274 resp.cq_number = my_cq->cq_number;
275 resp.token = my_cq->token;
276 resp.ipz_queue.qe_size = ipz_queue->qe_size;
277 resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
278 resp.ipz_queue.queue_length = ipz_queue->queue_length;
279 resp.ipz_queue.pagesize = ipz_queue->pagesize;
280 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
281 resp.fw_handle_ofs = (u32)
282 (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
283 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
284 ehca_err(device, "Copy to udata failed.");
285 goto create_cq_exit4;
292 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
295 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
296 if (h_ret != H_SUCCESS)
297 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
298 "cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret);
301 write_lock_irqsave(&ehca_cq_idr_lock, flags);
302 idr_remove(&ehca_cq_idr, my_cq->token);
303 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
306 kmem_cache_free(cq_cache, my_cq);
311 int ehca_destroy_cq(struct ib_cq *cq)
314 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
315 int cq_num = my_cq->cq_number;
316 struct ib_device *device = cq->device;
317 struct ehca_shca *shca = container_of(device, struct ehca_shca,
319 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
323 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
324 ehca_err(device, "Resources still referenced in "
325 "user space cq_num=%x", my_cq->cq_number);
331 * remove the CQ from the idr first to make sure
332 * no more interrupt tasklets will touch this CQ
334 write_lock_irqsave(&ehca_cq_idr_lock, flags);
335 idr_remove(&ehca_cq_idr, my_cq->token);
336 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
338 /* now wait until all pending events have completed */
339 wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
341 /* nobody's using our CQ any longer -- we can destroy it */
342 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
343 if (h_ret == H_R_STATE) {
344 /* cq in err: read err data and destroy it forcibly */
345 ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
346 "state. Try to delete it forcibly.",
347 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
348 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
349 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
350 if (h_ret == H_SUCCESS)
351 ehca_dbg(device, "cq_num=%x deleted successfully.",
354 if (h_ret != H_SUCCESS) {
355 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li "
356 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
357 return ehca2ib_return_code(h_ret);
359 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
360 kmem_cache_free(cq_cache, my_cq);
365 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
367 /* TODO: proper resize needs to be done */
368 ehca_err(cq->device, "not implemented yet");
373 int ehca_init_cq_cache(void)
375 cq_cache = kmem_cache_create("ehca_cache_cq",
376 sizeof(struct ehca_cq), 0,
384 void ehca_cleanup_cq_cache(void)
387 kmem_cache_destroy(cq_cache);