2 * SN Platform GRU Driver
4 * GRU HANDLE DEFINITION
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifndef __GRUHANDLES_H__
24 #define __GRUHANDLES_H__
25 #include "gru_instructions.h"
28 * Manifest constants for GRU Memory Map
30 #define GRU_GSEG0_BASE 0
31 #define GRU_MCS_BASE (64 * 1024 * 1024)
32 #define GRU_SIZE (128UL * 1024 * 1024)
34 /* Handle & resource counts */
35 #define GRU_NUM_CB 128
36 #define GRU_NUM_DSR_BYTES (32 * 1024)
37 #define GRU_NUM_TFM 16
38 #define GRU_NUM_TGH 24
39 #define GRU_NUM_CBE 128
40 #define GRU_NUM_TFH 128
41 #define GRU_NUM_CCH 16
44 /* Maximum resource counts that can be reserved by user programs */
45 #define GRU_NUM_USER_CBR GRU_NUM_CBE
46 #define GRU_NUM_USER_DSR_BYTES GRU_NUM_DSR_BYTES
48 /* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles
50 #define GRU_HANDLE_BYTES 64
51 #define GRU_HANDLE_STRIDE 256
53 /* Base addresses of handles */
54 #define GRU_TFM_BASE (GRU_MCS_BASE + 0x00000)
55 #define GRU_TGH_BASE (GRU_MCS_BASE + 0x08000)
56 #define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000)
57 #define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000)
58 #define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000)
59 #define GRU_GSH_BASE (GRU_MCS_BASE + 0x30000)
61 /* User gseg constants */
62 #define GRU_GSEG_STRIDE (4 * 1024 * 1024)
63 #define GSEG_BASE(a) ((a) & ~(GRU_GSEG_PAGESIZE - 1))
65 /* Data segment constants */
66 #define GRU_DSR_AU_BYTES 1024
67 #define GRU_DSR_CL (GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES)
68 #define GRU_DSR_AU_CL (GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES)
69 #define GRU_DSR_AU (GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES)
71 /* Control block constants */
72 #define GRU_CBR_AU_SIZE 2
73 #define GRU_CBR_AU (GRU_NUM_CBE / GRU_CBR_AU_SIZE)
75 /* Convert resource counts to the number of AU */
76 #define GRU_DS_BYTES_TO_AU(n) DIV_ROUND_UP(n, GRU_DSR_AU_BYTES)
77 #define GRU_CB_COUNT_TO_AU(n) DIV_ROUND_UP(n, GRU_CBR_AU_SIZE)
80 #define GRU_CHIPLETS_PER_HUB 2
81 #define GRU_HUBS_PER_BLADE 1
82 #define GRU_CHIPLETS_PER_BLADE (GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB)
84 /* User GRU Gseg offsets */
86 #define GRU_CB_LIMIT (GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE)
87 #define GRU_DS_BASE 0x20000
88 #define GRU_DS_LIMIT (GRU_DS_BASE + GRU_NUM_DSR_BYTES)
90 /* Convert a GRU physical address to the chiplet offset */
91 #define GSEGPOFF(h) ((h) & (GRU_SIZE - 1))
93 /* Convert an arbitrary handle address to the beginning of the GRU segment */
95 #define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
97 extern void *gmu_grubase(void *h);
98 #define GRUBASE(h) gmu_grubase(h)
101 /* General addressing macros. */
102 static inline void *get_gseg_base_address(void *base, int ctxnum)
104 return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum);
107 static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line)
109 return (void *)(get_gseg_base_address(base, ctxnum) +
110 GRU_CB_BASE + GRU_HANDLE_STRIDE * line);
113 static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line)
115 return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE +
116 GRU_CACHE_LINE_BYTES * line);
119 static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum)
121 return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE +
122 ctxnum * GRU_HANDLE_STRIDE);
125 static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum)
127 return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE +
128 ctxnum * GRU_HANDLE_STRIDE);
131 static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum)
133 return (struct gru_control_block_extended *)(base + GRU_CBE_BASE +
134 ctxnum * GRU_HANDLE_STRIDE);
137 static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum)
139 return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE +
140 ctxnum * GRU_HANDLE_STRIDE);
143 static inline struct gru_context_configuration_handle *get_cch(void *base,
146 return (struct gru_context_configuration_handle *)(base +
147 GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE);
150 static inline unsigned long get_cb_number(void *cb)
152 return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) /
156 /* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/
157 static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode,
160 return paddr + GRU_SIZE * (2 * pnode + chiplet);
163 static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
165 return vaddr + GRU_SIZE * (2 * pnode + chiplet);
171 * Global TLB Fault Map
172 * Bitmap of outstanding TLB misses needing interrupt/polling service.
175 struct gru_tlb_fault_map {
176 unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
177 unsigned long fill0[2];
178 unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
179 unsigned long fill1[2];
183 * TGH - TLB Global Handle
184 * Used for TLB flushing.
187 struct gru_tlb_global_handle {
188 unsigned int cmd:1; /* DW 0 */
189 unsigned int delresp:1;
191 unsigned int fill1:5;
193 unsigned int fill2:8;
195 unsigned int status:2;
196 unsigned long fill3:2;
197 unsigned int state:3;
198 unsigned long fill4:1;
200 unsigned int cause:3;
201 unsigned long fill5:37;
203 unsigned long vaddr:64; /* DW 1 */
205 unsigned int asid:24; /* DW 2 */
206 unsigned int fill6:8;
208 unsigned int pagesize:5;
209 unsigned int fill7:11;
211 unsigned int global:1;
212 unsigned int fill8:15;
214 unsigned long vaddrmask:39; /* DW 3 */
215 unsigned int fill9:9;
217 unsigned int fill10:6;
219 unsigned int ctxbitmap:16; /* DW4 */
220 unsigned long fill11[3];
232 enum gru_tgh_status {
241 TGHSTATE_INTERRUPT_INVAL,
243 TGHSTATE_RESTART_CTX,
247 * TFH - TLB Global Handle
248 * Used for TLB dropins into the GRU TLB.
251 struct gru_tlb_fault_handle {
252 unsigned int cmd:1; /* DW 0 - low 32*/
253 unsigned int delresp:1;
254 unsigned int fill0:2;
256 unsigned int fill1:9;
258 unsigned int status:2;
259 unsigned int fill2:1;
260 unsigned int color:1;
261 unsigned int state:3;
262 unsigned int fill3:1;
264 unsigned int cause:7; /* DW 0 - high 32 */
265 unsigned int fill4:1;
267 unsigned int indexway:12;
268 unsigned int fill5:4;
270 unsigned int ctxnum:4;
271 unsigned int fill6:12;
273 unsigned long missvaddr:64; /* DW 1 */
275 unsigned int missasid:24; /* DW 2 */
276 unsigned int fill7:8;
277 unsigned int fillasid:24;
278 unsigned int dirty:1;
280 unsigned long fill8:5;
282 unsigned long pfn:41; /* DW 3 */
283 unsigned int fill9:7;
284 unsigned int pagesize:5;
285 unsigned int fill10:11;
287 unsigned long fillvaddr:64; /* DW 4 */
289 unsigned long fill11[3];
298 TFHOP_USER_POLLING_MODE = 7,
314 TFHSTATE_RESTART_CBR,
322 TFHCAUSE_HW_ERROR_RR,
323 TFHCAUSE_HW_ERROR_MAIN_ARRAY,
324 TFHCAUSE_HW_ERROR_VALID,
325 TFHCAUSE_HW_ERROR_PAGESIZE,
326 TFHCAUSE_INSTRUCTION_EXCEPTION,
327 TFHCAUSE_UNCORRECTIBLE_ERROR,
332 #define GAA_NCRAM 0x2
334 #define GAA_REGISTER 0x3
336 /* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */
337 #define GRU_PADDR_SHIFT 12
340 * Context Configuration handle
341 * Used to allocate resources to a GSEG context.
344 struct gru_context_configuration_handle {
345 unsigned int cmd:1; /* DW0 */
346 unsigned int delresp:1;
348 unsigned int unmap_enable:1;
349 unsigned int req_slice_set_enable:1;
350 unsigned int req_slice:2;
351 unsigned int cb_int_enable:1;
352 unsigned int tlb_int_enable:1;
353 unsigned int tfm_fault_bit_enable:1;
354 unsigned int tlb_int_select:4;
356 unsigned int status:2;
357 unsigned int state:2;
358 unsigned int reserved2:4;
360 unsigned int cause:4;
361 unsigned int tfm_done_bit_enable:1;
362 unsigned int unused:3;
364 unsigned int dsr_allocation_map;
366 unsigned long cbr_allocation_map; /* DW1 */
368 unsigned int asid[8]; /* DW 2 - 5 */
369 unsigned short sizeavail[8]; /* DW 6 - 7 */
370 } __attribute__ ((packed));
377 CCHOP_INTERRUPT_SYNC,
380 enum gru_cch_status {
390 CCHSTATE_INTERRUPTED,
393 /* CCH Exception cause */
395 CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1,
396 CCHCAUSE_ILLEGAL_OPCODE = 2,
397 CCHCAUSE_INVALID_START_REQUEST = 3,
398 CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4,
399 CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5,
400 CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6,
401 CCHCAUSE_CCH_BUSY = 7,
402 CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8,
403 CCHCAUSE_BAD_TFM_CONFIG = 9,
404 CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10,
405 CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11,
406 CCHCAUSE_CBR_DEALLOCATION_ERROR = 12,
409 * CBE - Control Block Extended
410 * Maintains internal GRU state for active CBs.
413 struct gru_control_block_extended {
414 unsigned int reserved0:1; /* DW 0 - low */
415 unsigned int imacpy:3;
416 unsigned int reserved1:4;
417 unsigned int xtypecpy:3;
418 unsigned int iaa0cpy:2;
419 unsigned int iaa1cpy:2;
420 unsigned int reserved2:1;
421 unsigned int opccpy:8;
422 unsigned int exopccpy:8;
424 unsigned int idef2cpy:22; /* DW 0 - high */
425 unsigned int reserved3:10;
427 unsigned int idef4cpy:22; /* DW 1 */
428 unsigned int reserved4:10;
429 unsigned int idef4upd:22;
430 unsigned int reserved5:10;
432 unsigned long idef1upd:64; /* DW 2 */
434 unsigned long idef5cpy:64; /* DW 3 */
436 unsigned long idef6cpy:64; /* DW 4 */
438 unsigned long idef3upd:64; /* DW 5 */
440 unsigned long idef5upd:64; /* DW 6 */
442 unsigned int idef2upd:22; /* DW 7 */
443 unsigned int reserved6:10;
445 unsigned int ecause:20;
446 unsigned int cbrstate:4;
447 unsigned int cbrexecstatus:8;
455 CBRSTATE_WAIT_RESPONSE,
456 CBRSTATE_INTERRUPTED,
457 CBRSTATE_INTERRUPTED_MISS_FMM,
458 CBRSTATE_BUSY_INTERRUPT_MISS_FMM,
459 CBRSTATE_INTERRUPTED_MISS_UPM,
460 CBRSTATE_BUSY_INTERRUPTED_MISS_UPM,
461 CBRSTATE_REQUEST_ISSUE,
462 CBRSTATE_BUSY_INTERRUPT,
465 /* CBE cbrexecstatus bits */
466 #define CBR_EXS_ABORT_OCC_BIT 0
467 #define CBR_EXS_INT_OCC_BIT 1
468 #define CBR_EXS_PENDING_BIT 2
469 #define CBR_EXS_QUEUED_BIT 3
470 #define CBR_EXS_TLBHW_BIT 4
471 #define CBR_EXS_EXCEPTION_BIT 5
473 #define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
474 #define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
475 #define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
476 #define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
477 #define CBR_EXS_TLBHW (1 << CBR_EXS_TLBHW_BIT)
478 #define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
480 /* CBE ecause bits - defined in gru_instructions.h */
483 * Convert a processor pagesize into the strange encoded pagesize used by the
484 * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT)
485 * pagesize log pagesize grupagesize
497 #define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6)
498 #define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh))
500 /* minimum TLB purge count to ensure a full purge */
501 #define GRUMAXINVAL 1024UL
504 /* Extract the status field from a kernel handle */
505 #define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3)
507 static inline void start_instruction(void *h)
509 unsigned long *w0 = h;
511 wmb(); /* setting CMD bit must be last */
516 static inline int wait_instruction_complete(void *h)
523 status = GET_MSEG_HANDLE_STATUS(h);
524 } while (status == CCHSTATUS_ACTIVE);
528 #if defined CONFIG_IA64
529 static inline void cch_allocate_set_asids(
530 struct gru_context_configuration_handle *cch, int asidval)
534 for (i = 0; i <= RGN_HPAGE; i++) { /* assume HPAGE is last region */
535 cch->asid[i] = (asidval++);
537 /* ZZZ hugepages not supported yet */
539 cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift);
542 cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT);
545 #elif defined CONFIG_X86_64
546 static inline void cch_allocate_set_asids(
547 struct gru_context_configuration_handle *cch, int asidval)
551 for (i = 0; i < 8; i++) {
552 cch->asid[i] = asidval++;
553 cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) |
559 static inline int cch_allocate(struct gru_context_configuration_handle *cch,
560 int asidval, unsigned long cbrmap,
561 unsigned long dsrmap)
563 cch_allocate_set_asids(cch, asidval);
564 cch->dsr_allocation_map = dsrmap;
565 cch->cbr_allocation_map = cbrmap;
566 cch->opc = CCHOP_ALLOCATE;
567 start_instruction(cch);
568 return wait_instruction_complete(cch);
571 static inline int cch_start(struct gru_context_configuration_handle *cch)
573 cch->opc = CCHOP_START;
574 start_instruction(cch);
575 return wait_instruction_complete(cch);
578 static inline int cch_interrupt(struct gru_context_configuration_handle *cch)
580 cch->opc = CCHOP_INTERRUPT;
581 start_instruction(cch);
582 return wait_instruction_complete(cch);
585 static inline int cch_deallocate(struct gru_context_configuration_handle *cch)
587 cch->opc = CCHOP_DEALLOCATE;
588 start_instruction(cch);
589 return wait_instruction_complete(cch);
592 static inline int cch_interrupt_sync(struct gru_context_configuration_handle
595 cch->opc = CCHOP_INTERRUPT_SYNC;
596 start_instruction(cch);
597 return wait_instruction_complete(cch);
600 static inline int tgh_invalidate(struct gru_tlb_global_handle *tgh,
601 unsigned long vaddr, unsigned long vaddrmask,
602 int asid, int pagesize, int global, int n,
603 unsigned short ctxbitmap)
607 tgh->pagesize = pagesize;
609 tgh->global = global;
610 tgh->vaddrmask = vaddrmask;
611 tgh->ctxbitmap = ctxbitmap;
612 tgh->opc = TGHOP_TLBINV;
613 start_instruction(tgh);
614 return wait_instruction_complete(tgh);
617 static inline void tfh_write_only(struct gru_tlb_fault_handle *tfh,
618 unsigned long pfn, unsigned long vaddr,
619 int asid, int dirty, int pagesize)
621 tfh->fillasid = asid;
622 tfh->fillvaddr = vaddr;
625 tfh->pagesize = pagesize;
626 tfh->opc = TFHOP_WRITE_ONLY;
627 start_instruction(tfh);
630 static inline void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
631 unsigned long paddr, int gaa,
632 unsigned long vaddr, int asid, int dirty,
635 tfh->fillasid = asid;
636 tfh->fillvaddr = vaddr;
637 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
640 tfh->pagesize = pagesize;
641 tfh->opc = TFHOP_WRITE_RESTART;
642 start_instruction(tfh);
645 static inline void tfh_restart(struct gru_tlb_fault_handle *tfh)
647 tfh->opc = TFHOP_RESTART;
648 start_instruction(tfh);
651 static inline void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
653 tfh->opc = TFHOP_USER_POLLING_MODE;
654 start_instruction(tfh);
657 static inline void tfh_exception(struct gru_tlb_fault_handle *tfh)
659 tfh->opc = TFHOP_EXCEPTION;
660 start_instruction(tfh);
663 #endif /* __GRUHANDLES_H__ */