4 * Setup and helper functions to access QDIO.
6 * Copyright IBM Corporation 2002, 2008
11 /* FIXME(tune): free space should be one max. SBAL chain plus what? */
12 #define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
13 - (ZFCP_MAX_SBALS_PER_REQ + 4))
15 static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
19 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
20 sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
24 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
25 if (pos % QBUFF_PER_PAGE)
26 sbal[pos] = sbal[pos - 1] + 1;
30 static volatile struct qdio_buffer_element *
31 zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
33 return &q->sbal[sbal_idx]->element[sbale_idx];
37 * zfcp_qdio_free - free memory used by request- and resposne queue
38 * @adapter: pointer to the zfcp_adapter structure
40 void zfcp_qdio_free(struct zfcp_adapter *adapter)
42 struct qdio_buffer **sbal_req, **sbal_resp;
45 if (adapter->ccw_device)
46 qdio_free(adapter->ccw_device);
48 sbal_req = adapter->req_q.sbal;
49 sbal_resp = adapter->resp_q.sbal;
51 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
52 free_page((unsigned long) sbal_req[p]);
53 free_page((unsigned long) sbal_resp[p]);
57 static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
59 dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n");
61 zfcp_erp_adapter_reopen(adapter,
62 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
63 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
66 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status,
67 unsigned int qdio_err, unsigned int siga_err,
68 unsigned int queue_no, int first, int count,
71 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
72 struct zfcp_qdio_queue *queue = &adapter->req_q;
74 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
75 zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
77 zfcp_qdio_handler_error(adapter, 140);
81 /* cleanup all SBALs being program-owned now */
82 zfcp_qdio_zero_sbals(queue->sbal, first, count);
84 atomic_add(count, &queue->count);
85 wake_up(&adapter->request_wq);
88 static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
89 unsigned long req_id, int sbal_idx)
91 struct zfcp_fsf_req *fsf_req;
94 spin_lock_irqsave(&adapter->req_list_lock, flags);
95 fsf_req = zfcp_reqlist_find(adapter, req_id);
99 * Unknown request means that we have potentially memory
100 * corruption and must stop the machine immediatly.
102 panic("error: unknown request id (%lx) on adapter %s.\n",
103 req_id, zfcp_get_busid_by_adapter(adapter));
105 zfcp_reqlist_remove(adapter, fsf_req);
106 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
108 fsf_req->sbal_response = sbal_idx;
109 zfcp_fsf_req_complete(fsf_req);
112 static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
114 struct zfcp_qdio_queue *queue = &adapter->resp_q;
115 struct ccw_device *cdev = adapter->ccw_device;
116 u8 count, start = queue->first;
119 count = atomic_read(&queue->count) + processed;
121 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
122 0, start, count, NULL);
124 if (unlikely(retval)) {
125 atomic_set(&queue->count, count);
126 /* FIXME: Recover this with an adapter reopen? */
128 queue->first += count;
129 queue->first %= QDIO_MAX_BUFFERS_PER_Q;
130 atomic_set(&queue->count, 0);
134 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
135 unsigned int qdio_err, unsigned int siga_err,
136 unsigned int queue_no, int first, int count,
139 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
140 struct zfcp_qdio_queue *queue = &adapter->resp_q;
141 volatile struct qdio_buffer_element *sbale;
142 int sbal_idx, sbale_idx, sbal_no;
144 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
145 zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
147 zfcp_qdio_handler_error(adapter, 147);
152 * go through all SBALs from input queue currently
153 * returned by QDIO layer
155 for (sbal_no = 0; sbal_no < count; sbal_no++) {
156 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
158 /* go through all SBALEs of SBAL */
159 for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER;
161 sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx);
162 zfcp_qdio_reqid_check(adapter,
163 (unsigned long) sbale->addr,
165 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
169 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
170 dev_warn(&adapter->ccw_device->dev,
171 "Protocol violation by adapter. "
172 "Continuing operations.\n");
176 * put range of SBALs back to response queue
177 * (including SBALs which have already been free before)
179 zfcp_qdio_resp_put_back(adapter, count);
183 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
184 * @fsf_req: pointer to struct fsf_req
185 * Returns: pointer to qdio_buffer_element (SBALE) structure
187 volatile struct qdio_buffer_element *
188 zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
190 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
194 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
195 * @fsf_req: pointer to struct fsf_req
196 * Returns: pointer to qdio_buffer_element (SBALE) structure
198 volatile struct qdio_buffer_element *
199 zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
201 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
205 static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
207 int count = atomic_read(&fsf_req->adapter->req_q.count);
208 count = min(count, max_sbals);
209 fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1)
210 % QDIO_MAX_BUFFERS_PER_Q;
213 static volatile struct qdio_buffer_element *
214 zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
216 volatile struct qdio_buffer_element *sbale;
218 /* set last entry flag in current SBALE of current SBAL */
219 sbale = zfcp_qdio_sbale_curr(fsf_req);
220 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
222 /* don't exceed last allowed SBAL */
223 if (fsf_req->sbal_last == fsf_req->sbal_limit)
226 /* set chaining flag in first SBALE of current SBAL */
227 sbale = zfcp_qdio_sbale_req(fsf_req);
228 sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
230 /* calculate index of next SBAL */
231 fsf_req->sbal_last++;
232 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
234 /* keep this requests number of SBALs up-to-date */
235 fsf_req->sbal_number++;
237 /* start at first SBALE of new SBAL */
238 fsf_req->sbale_curr = 0;
240 /* set storage-block type for new SBAL */
241 sbale = zfcp_qdio_sbale_curr(fsf_req);
242 sbale->flags |= sbtype;
247 static volatile struct qdio_buffer_element *
248 zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
250 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
251 return zfcp_qdio_sbal_chain(fsf_req, sbtype);
252 fsf_req->sbale_curr++;
253 return zfcp_qdio_sbale_curr(fsf_req);
256 static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req)
258 struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal;
259 int first = fsf_req->sbal_first;
260 int last = fsf_req->sbal_last;
261 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
262 QDIO_MAX_BUFFERS_PER_Q + 1;
263 zfcp_qdio_zero_sbals(sbal, first, count);
266 static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
267 unsigned int sbtype, void *start_addr,
268 unsigned int total_length)
270 volatile struct qdio_buffer_element *sbale;
271 unsigned long remaining, length;
274 /* split segment up */
275 for (addr = start_addr, remaining = total_length; remaining > 0;
276 addr += length, remaining -= length) {
277 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
279 zfcp_qdio_undo_sbals(fsf_req);
283 /* new piece must not exceed next page boundary */
284 length = min(remaining,
285 (PAGE_SIZE - ((unsigned long)addr &
288 sbale->length = length;
294 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
295 * @fsf_req: request to be processed
296 * @sbtype: SBALE flags
297 * @sg: scatter-gather list
298 * @max_sbals: upper bound for number of SBALs to be used
299 * Returns: number of bytes, or error (negativ)
301 int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
302 struct scatterlist *sg, int max_sbals)
304 volatile struct qdio_buffer_element *sbale;
305 int retval, bytes = 0;
307 /* figure out last allowed SBAL */
308 zfcp_qdio_sbal_limit(fsf_req, max_sbals);
310 /* set storage-block type for this request */
311 sbale = zfcp_qdio_sbale_req(fsf_req);
312 sbale->flags |= sbtype;
314 for (; sg; sg = sg_next(sg)) {
315 retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg),
322 /* assume that no other SBALEs are to follow in the same SBAL */
323 sbale = zfcp_qdio_sbale_curr(fsf_req);
324 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
330 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
331 * @fsf_req: pointer to struct zfcp_fsf_req
332 * Returns: 0 on success, error otherwise
334 int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
336 struct zfcp_adapter *adapter = fsf_req->adapter;
337 struct zfcp_qdio_queue *req_q = &adapter->req_q;
338 int first = fsf_req->sbal_first;
339 int count = fsf_req->sbal_number;
340 int retval, pci, pci_batch;
341 volatile struct qdio_buffer_element *sbale;
343 /* acknowledgements for transferred buffers */
344 pci_batch = req_q->pci_batch + count;
345 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
346 pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
347 pci = first + count - (pci_batch + 1);
348 pci %= QDIO_MAX_BUFFERS_PER_Q;
349 sbale = zfcp_qdio_sbale(req_q, pci, 0);
350 sbale->flags |= SBAL_FLAGS0_PCI;
353 retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
355 if (unlikely(retval)) {
356 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
360 /* account for transferred buffers */
361 atomic_sub(count, &req_q->count);
362 req_q->first += count;
363 req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
364 req_q->pci_batch = pci_batch;
369 * zfcp_qdio_zero_sbals - zero all sbals of the specified area and queue
370 * @buf: pointer to array of SBALS
371 * @first: integer specifying the SBAL number to start
372 * @count: integer specifying the number of SBALS to process
374 void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int count)
378 for (i = first; i < first + count; i++) {
379 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
380 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
385 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
386 * @adapter: pointer to struct zfcp_adapter
387 * Returns: -ENOMEM on memory allocation error or return value from
390 int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
392 struct qdio_initialize *init_data;
394 if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) ||
395 zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal))
398 init_data = &adapter->qdio_init_data;
400 init_data->cdev = adapter->ccw_device;
401 init_data->q_format = QDIO_ZFCP_QFMT;
402 memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8);
403 ASCEBC(init_data->adapter_name, 8);
404 init_data->qib_param_field_format = 0;
405 init_data->qib_param_field = NULL;
406 init_data->input_slib_elements = NULL;
407 init_data->output_slib_elements = NULL;
408 init_data->min_input_threshold = 1;
409 init_data->max_input_threshold = 5000;
410 init_data->min_output_threshold = 1;
411 init_data->max_output_threshold = 1000;
412 init_data->no_input_qs = 1;
413 init_data->no_output_qs = 1;
414 init_data->input_handler = zfcp_qdio_int_resp;
415 init_data->output_handler = zfcp_qdio_int_req;
416 init_data->int_parm = (unsigned long) adapter;
417 init_data->flags = QDIO_INBOUND_0COPY_SBALS |
418 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
419 init_data->input_sbal_addr_array =
420 (void **) (adapter->resp_q.sbal);
421 init_data->output_sbal_addr_array =
422 (void **) (adapter->req_q.sbal);
424 return qdio_allocate(init_data);
428 * zfcp_close_qdio - close qdio queues for an adapter
430 void zfcp_qdio_close(struct zfcp_adapter *adapter)
432 struct zfcp_qdio_queue *req_q;
435 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
438 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
439 req_q = &adapter->req_q;
440 write_lock_irq(&req_q->lock);
441 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
442 write_unlock_irq(&req_q->lock);
444 while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
448 /* cleanup used outbound sbals */
449 count = atomic_read(&req_q->count);
450 if (count < QDIO_MAX_BUFFERS_PER_Q) {
451 first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q;
452 count = QDIO_MAX_BUFFERS_PER_Q - count;
453 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
456 atomic_set(&req_q->count, 0);
457 req_q->pci_batch = 0;
458 adapter->resp_q.first = 0;
459 atomic_set(&adapter->resp_q.count, 0);
463 * zfcp_qdio_open - prepare and initialize response queue
464 * @adapter: pointer to struct zfcp_adapter
465 * Returns: 0 on success, otherwise -EIO
467 int zfcp_qdio_open(struct zfcp_adapter *adapter)
469 volatile struct qdio_buffer_element *sbale;
472 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
475 if (qdio_establish(&adapter->qdio_init_data)) {
476 dev_err(&adapter->ccw_device->dev,
477 "Establish of QDIO queues failed.\n");
481 if (qdio_activate(adapter->ccw_device, 0)) {
482 dev_err(&adapter->ccw_device->dev,
483 "Activate of QDIO queues failed.\n");
487 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
488 sbale = &(adapter->resp_q.sbal[cc]->element[0]);
490 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
494 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
495 QDIO_MAX_BUFFERS_PER_Q, NULL)) {
496 dev_err(&adapter->ccw_device->dev,
497 "Init of QDIO response queue failed.\n");
501 /* set index of first avalable SBALS / number of available SBALS */
502 adapter->req_q.first = 0;
503 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
504 adapter->req_q.pci_batch = 0;
509 while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)