2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
8 * QE UCC Slow API Set - UCC Slow specific routines implementations.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/stddef.h>
20 #include <linux/interrupt.h>
23 #include <asm/immap_qe.h>
27 #include <asm/ucc_slow.h>
29 u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
32 case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
33 case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
34 case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
35 case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
36 case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
37 case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
38 case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
39 case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
40 default: return QE_CR_SUBBLOCK_INVALID;
44 void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
46 out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD);
49 void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
51 struct ucc_slow_info *us_info = uccs->us_info;
54 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
55 qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
56 QE_CR_PROTOCOL_UNSPECIFIED, 0);
59 void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
61 struct ucc_slow_info *us_info = uccs->us_info;
64 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
65 qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
68 void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
70 struct ucc_slow_info *us_info = uccs->us_info;
73 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
74 qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
77 void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
79 struct ucc_slow *us_regs;
82 us_regs = uccs->us_regs;
84 /* Enable reception and/or transmission on this UCC. */
85 gumr_l = in_be32(&us_regs->gumr_l);
86 if (mode & COMM_DIR_TX) {
87 gumr_l |= UCC_SLOW_GUMR_L_ENT;
90 if (mode & COMM_DIR_RX) {
91 gumr_l |= UCC_SLOW_GUMR_L_ENR;
94 out_be32(&us_regs->gumr_l, gumr_l);
97 void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
99 struct ucc_slow *us_regs;
102 us_regs = uccs->us_regs;
104 /* Disable reception and/or transmission on this UCC. */
105 gumr_l = in_be32(&us_regs->gumr_l);
106 if (mode & COMM_DIR_TX) {
107 gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
108 uccs->enabled_tx = 0;
110 if (mode & COMM_DIR_RX) {
111 gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
112 uccs->enabled_rx = 0;
114 out_be32(&us_regs->gumr_l, gumr_l);
117 int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
119 struct ucc_slow_private *uccs;
121 struct ucc_slow *us_regs;
131 /* check if the UCC port number is in range. */
132 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
133 printk(KERN_ERR "%s: illegal UCC number", __FUNCTION__);
139 * Check that 'max_rx_buf_length' is properly aligned (4), unless
140 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
141 * case when QE accepts 32 bits at a time.
143 if ((!us_info->rfw) &&
144 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
145 printk(KERN_ERR "max_rx_buf_length not aligned.");
149 uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
151 printk(KERN_ERR "%s: Cannot allocate private data", __FUNCTION__);
155 /* Fill slow UCC structure */
156 uccs->us_info = us_info;
157 /* Set the PHY base address */
158 uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
159 if (uccs->us_regs == NULL) {
160 printk(KERN_ERR "%s: Cannot map UCC registers", __FUNCTION__);
164 uccs->saved_uccm = 0;
165 uccs->p_rx_frame = 0;
166 us_regs = uccs->us_regs;
167 uccs->p_ucce = (u16 *) & (us_regs->ucce);
168 uccs->p_uccm = (u16 *) & (us_regs->uccm);
172 uccs->rx_discarded = 0;
173 #endif /* STATISTICS */
176 uccs->us_pram_offset =
177 qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
178 if (IS_MURAM_ERR(uccs->us_pram_offset)) {
179 printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __FUNCTION__);
183 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
184 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, QE_CR_PROTOCOL_UNSPECIFIED,
185 uccs->us_pram_offset);
187 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
189 /* Init Guemr register */
190 if ((ret = ucc_init_guemr((struct ucc_common *) us_regs))) {
191 printk(KERN_ERR "%s: cannot init GUEMR", __FUNCTION__);
196 /* Set UCC to slow type */
197 if ((ret = ucc_set_type(us_info->ucc_num,
198 (struct ucc_common *) us_regs,
199 UCC_SPEED_TYPE_SLOW))) {
200 printk(KERN_ERR "%s: cannot set UCC type", __FUNCTION__);
205 out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
207 INIT_LIST_HEAD(&uccs->confQ);
210 uccs->rx_base_offset =
211 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
213 if (IS_MURAM_ERR(uccs->rx_base_offset)) {
214 printk(KERN_ERR "%s: cannot allocate RX BDs", __FUNCTION__);
215 uccs->rx_base_offset = 0;
220 uccs->tx_base_offset =
221 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
223 if (IS_MURAM_ERR(uccs->tx_base_offset)) {
224 printk(KERN_ERR "%s: cannot allocate TX BDs", __FUNCTION__);
225 uccs->tx_base_offset = 0;
231 bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
232 for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
233 /* clear bd buffer */
234 out_be32(&bd->buf, 0);
235 /* set bd status and length */
236 out_be32((u32 *) bd, 0);
239 /* for last BD set Wrap bit */
240 out_be32(&bd->buf, 0);
241 out_be32((u32 *) bd, cpu_to_be32(T_W));
244 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
245 for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
246 /* set bd status and length */
247 out_be32((u32*)bd, 0);
248 /* clear bd buffer */
249 out_be32(&bd->buf, 0);
252 /* for last BD set Wrap bit */
253 out_be32((u32*)bd, cpu_to_be32(R_W));
254 out_be32(&bd->buf, 0);
256 /* Set GUMR (For more details see the hardware spec.). */
258 gumr = us_info->tcrc;
260 gumr |= UCC_SLOW_GUMR_H_CDP;
262 gumr |= UCC_SLOW_GUMR_H_CTSP;
264 gumr |= UCC_SLOW_GUMR_H_CDS;
266 gumr |= UCC_SLOW_GUMR_H_CTSS;
268 gumr |= UCC_SLOW_GUMR_H_TFL;
270 gumr |= UCC_SLOW_GUMR_H_RFW;
272 gumr |= UCC_SLOW_GUMR_H_TXSY;
274 gumr |= UCC_SLOW_GUMR_H_RTSM;
275 out_be32(&us_regs->gumr_h, gumr);
278 gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
279 us_info->diag | us_info->mode;
281 gumr |= UCC_SLOW_GUMR_L_TCI;
283 gumr |= UCC_SLOW_GUMR_L_RINV;
285 gumr |= UCC_SLOW_GUMR_L_TINV;
287 gumr |= UCC_SLOW_GUMR_L_TEND;
288 out_be32(&us_regs->gumr_l, gumr);
290 /* Function code registers */
292 /* if the data is in cachable memory, the 'global' */
293 /* in the function code should be set. */
294 uccs->us_pram->tfcr = uccs->us_pram->rfcr =
295 us_info->data_mem_part | QE_BMR_BYTE_ORDER_BO_MOT;
297 /* rbase, tbase are offsets from MURAM base */
298 out_be16(&uccs->us_pram->rbase, uccs->us_pram_offset);
299 out_be16(&uccs->us_pram->tbase, uccs->us_pram_offset);
303 ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
304 /* Breakpoint Support */
305 ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
306 /* Set Tsa or NMSI mode. */
307 ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
308 /* If NMSI (not Tsa), set Tx and Rx clock. */
310 /* Rx clock routing */
311 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
313 printk(KERN_ERR "%s: illegal value for RX clock",
318 /* Tx clock routing */
319 if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
321 printk(KERN_ERR "%s: illegal value for TX clock",
328 /* Set interrupt mask register at UCC level. */
329 out_be16(&us_regs->uccm, us_info->uccm_mask);
331 /* First, clear anything pending at UCC level,
332 * otherwise, old garbage may come through
333 * as soon as the dam is opened. */
335 /* Writing '1' clears */
336 out_be16(&us_regs->ucce, 0xffff);
338 /* Issue QE Init command */
339 if (us_info->init_tx && us_info->init_rx)
340 command = QE_INIT_TX_RX;
341 else if (us_info->init_tx)
342 command = QE_INIT_TX;
344 command = QE_INIT_RX; /* We know at least one is TRUE */
345 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
346 qe_issue_cmd(command, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
352 void ucc_slow_free(struct ucc_slow_private * uccs)
357 if (uccs->rx_base_offset)
358 qe_muram_free(uccs->rx_base_offset);
360 if (uccs->tx_base_offset)
361 qe_muram_free(uccs->tx_base_offset);
364 qe_muram_free(uccs->us_pram_offset);
365 uccs->us_pram = NULL;