/[dynamips]/upstream/dynamips-0.2.5/dev_pa_a1.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.5/dev_pa_a1.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1 - (show annotations)
Sat Oct 6 16:01:44 2007 UTC (12 years, 2 months ago) by dpavlin
File MIME type: text/plain
File size: 54239 byte(s)
import 0.2.5 from upstream

1 /*
2 * Cisco C7200 (Predator) Simulation Platform.
3 * Copyright (C) 2005,2006 Christophe Fillot. All rights reserved.
4 *
5 * PA-A1 ATM interface based on TI1570 and PLX 9060-ES.
6 *
7 * EEPROM types:
8 * - 0x17: PA-A1-OC3MM
9 * - 0x2C: PA-A1-OC3SM
10 * - 0x2D: PA-A1-OC3UTP
11 *
12 * IOS command: "sh controller atm2/0"
13 *
14 * Manuals:
15 *
16 * Texas Instruments TNETA1570 ATM segmentation and reassembly device
17 * with integrated 64-bit PCI-host interface
18 * http://focus.ti.com/docs/prod/folders/print/tneta1570.html
19 *
20 * PLX 9060-ES
21 * http://www.plxtech.com/products/io_accelerators/PCI9060/default.htm
22 *
23 * TODO:
24 * - RX error handling and RX AAL5-related stuff
25 * - HEC and AAL5 CRC fields.
26 *
27 * Cell trains for faster NETIO communications ?
28 */
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <unistd.h>
34 #include <errno.h>
35
36 #include "crc.h"
37 #include "atm.h"
38 #include "mips64.h"
39 #include "dynamips.h"
40 #include "memory.h"
41 #include "device.h"
42 #include "ptask.h"
43 #include "dev_c7200.h"
44
45 /* Debugging flags */
46 #define DEBUG_ACCESS 0
47 #define DEBUG_UNKNOWN 0
48 #define DEBUG_TRANSMIT 0
49 #define DEBUG_RECEIVE 0
50 #define DEBUG_TX_DMA 0
51
52 /* PCI vendor/product codes */
53 #define TI1570_PCI_VENDOR_ID 0x104c
54 #define TI1570_PCI_PRODUCT_ID 0xa001
55
56 #define PLX_9060ES_PCI_VENDOR_ID 0x10b5
57 #define PLX_9060ES_PCI_PRODUCT_ID 0x906e
58
59 /* Number of buffers transmitted at each TX DMA ring scan pass */
60 #define TI1570_TXDMA_PASS_COUNT 16
61
62 /* TI1570 Internal Registers (p.58 of doc) */
63 #define TI1570_REG_CONFIG 0x0000 /* Configuration registers */
64 #define TI1570_REG_STATUS 0x0001 /* Status register */
65 #define TI1570_REG_IMASK 0x0002 /* Interrupt-mask register */
66 #define TI1570_REG_RGT_RAT 0x0003 /* RGT + RAT cycle-counter */
67 #define TI1570_REG_RX_UNKNOWN 0x0004 /* RX Unknown Register */
68 #define TI1570_REG_TX_CRING_SIZE 0x0005 /* TX Completion ring sizes */
69 #define TI1570_REG_RX_CRING_SIZE 0x0006 /* RX Completion ring sizes */
70 #define TI1570_REG_TX_PSR_SIZE 0x0007 /* TX Pkt-seg ring size + FIFO */
71 #define TI1570_REG_HEC_AAL5_DISC 0x0008 /* HEC err + AAL5 CPCS discard */
72 #define TI1570_REG_UNK_PROTO_CNT 0x0009 /* Unknown-protocols counter */
73 #define TI1570_REG_RX_ATM_COUNT 0x000A /* ATM-cells-received counter */
74 #define TI1570_REG_TX_ATM_COUNT 0x000B /* ATM-cells-tranmitted counter */
75 #define TI1570_REG_TX_RX_FIFO 0x000C /* TX/RX FIFO occupancy, VCI mask */
76 #define TI1570_REG_SCHED_SIZE 0x000D /* Scheduler Table size */
77 #define TI1570_REG_SOFT_RESET 0x000E /* Software Reset */
78 #define TI1570_REG_TCR_WOI_ADDR 0x0080 /* TX Compl. Ring w/o IRQ addr. */
79 #define TI1570_REG_TCR_WI_ADDR 0x0081 /* TX Compl. Ring w/ IRQ addr. */
80 #define TI1570_REG_RCR_WOI_ADDR 0x0082 /* RX Compl. Ring w/o IRQ addr. */
81 #define TI1570_REG_RCR_WI_ADDR 0x0083 /* RX Compl. Ring w/ IRQ addr. */
82
83 /* TI1570 configuration register (p.59) */
84 #define TI1570_CFG_EN_RAT 0x00000001 /* Reassembly Aging */
85 #define TI1570_CFG_BP_SEL 0x00000002 /* IRQ on packet or buffer */
86 #define TI1570_CFG_EN_RX 0x00000010 /* RX enable */
87 #define TI1570_CFG_EN_TX 0x00000020 /* TX enable */
88 #define TI1570_CFG_SMALL_MAP 0x00000040 /* Small map */
89
90 /* TI1570 status register (p.61) */
91 #define TI1570_STAT_CP_TX 0x00000001 /* Transmit completion ring */
92 #define TI1570_STAT_RX_IRR 0x00000040 /* Receive unknown reg set */
93 #define TI1570_STAT_CP_RX 0x00000080 /* Receive completion ring */
94 #define TI1570_STAT_TX_FRZ 0x00000100 /* TX Freeze */
95 #define TI1570_STAT_RX_FRZ 0x00000200 /* RX Freeze */
96
97 /* Mask for RX/TX completion-ring sizes */
98 #define TI1570_TCR_SIZE_MASK 0x00001FFF /* TX compl. ring size mask */
99 #define TI1570_RCR_SIZE_MASK 0x000003FF /* RX compl. ring size mask */
100
101 /* TI1750 TX packet segmentation ring register */
102 #define TI1570_PSR_SIZE_MASK 0x000000FF /* pkt-seg ring size */
103
104 /* Total size of the TI1570 Control Memory */
105 #define TI1570_CTRL_MEM_SIZE 0x100000
106
107 /* Offsets of the TI1570 structures (p.66) */
108 #define TI1570_TX_SCHED_OFFSET 0x0000 /* TX scheduler table */
109 #define TI1570_INTERNAL_REGS_OFFSET 0x3200 /* Internal Registers */
110 #define TI1570_FREE_BUFFERS_OFFSET 0x3800 /* Free-Buffer Pointers */
111 #define TI1570_RX_DMA_PTR_TABLE_OFFSET 0x4000 /* RX VPI/VCI pointer table */
112 #define TI1570_TX_DMA_TABLE_OFFSET 0x8000 /* TX DMA state table */
113 #define TI1570_RX_DMA_TABLE_OFFSET 0x10000 /* RX DMA state table */
114
115 /* TX scheduler table */
116 #define TI1570_TX_SCHED_ENTRY_COUNT 6200
117 #define TI1570_TX_SCHED_ENTRY_MASK 0x3FF /* Entry mask */
118 #define TI1570_TX_SCHED_E0_SHIFT 0 /* Shift for entry 0 */
119 #define TI1570_TX_SCHED_E1_SHIFT 16 /* Shift for entry 0 */
120
121 /* TX DMA state table */
122 #define TI1570_TX_DMA_ACT 0x80000000 /* ACTive (word 0) */
123 #define TI1570_TX_DMA_SOP 0x40000000 /* Start of Packet (SOP) */
124 #define TI1570_TX_DMA_EOP 0x20000000 /* End of Packet (EOP) */
125 #define TI1570_TX_DMA_ABORT 0x10000000 /* Abort */
126 #define TI1570_TX_DMA_TCR_SELECT 0x02000000 /* TX comp. ring selection */
127 #define TI1570_TX_DMA_AAL_TYPE_MASK 0x0C000000 /* AAL-type mask */
128
129 #define TI1570_TX_DMA_AAL_TRWPTI 0x00000000 /* Transp. AAL w/ PTI set */
130 #define TI1570_TX_DMA_AAL_AAL5 0x04000000 /* AAL5 */
131 #define TI1570_TX_DMA_AAL_TRWOPTI 0x08000000 /* Transp. AAL w/o PTI set */
132
133 #define TI1570_TX_DMA_OFFSET_MASK 0x00FF0000
134 #define TI1570_TX_DMA_OFFSET_SHIFT 16
135 #define TI1570_TX_DMA_DCOUNT_MASK 0x0000FFFF
136
137 #define TI1570_TX_DMA_ON 0x80000000 /* DMA state (word 3) */
138 #define TI1570_TX_DMA_RING_OFFSET_MASK 0x3FFFFF00
139 #define TI1570_TX_DMA_RING_OFFSET_SHIFT 8
140 #define TI1570_TX_DMA_RING_INDEX_MASK 0x000000FF
141
142 #define TI1570_TX_DMA_RING_AAL5_LEN_MASK 0x0000FFFF
143
144 typedef struct ti1570_tx_dma_entry ti1570_tx_dma_entry_t;
145 struct ti1570_tx_dma_entry {
146 m_uint32_t ctrl_buf; /* Ctrl, Buffer Offset, Buffer data-byte count */
147 m_uint32_t cb_addr; /* Current Buffer Address */
148 m_uint32_t atm_hdr; /* 4-byte ATM header */
149 m_uint32_t dma_state; /* DMA state + Packet segmentation ring address */
150 m_uint32_t nb_addr; /* Next Buffer address */
151 m_uint32_t sb_addr; /* Start of Buffer address */
152 m_uint32_t aal5_crc; /* Partial AAL5-transmit CRC */
153 m_uint32_t aal5_ctrl; /* AAL5-control field and length field */
154 };
155
156 /* TX Packet-Segmentation Rings */
157 #define TI1570_TX_RING_OWN 0x80000000 /* If set, packet is ready */
158 #define TI1570_TX_RING_PTR_MASK 0x3FFFFFFF /* Buffer pointer */
159
160 /* TX Data Buffers */
161 #define TI1570_TX_BUFFER_RDY 0x80000000 /* If set, buffer is ready */
162 #define TI1570_TX_BUFFER_SOP 0x40000000 /* First buffer of packet */
163 #define TI1570_TX_BUFFER_EOP 0x20000000 /* Last buffer of packet */
164 #define TI1570_TX_BUFFER_ABORT 0x10000000 /* Abort */
165
166 #define TI1570_TX_BUFFER_OFFSET_MASK 0x00FF0000
167 #define TI1570_TX_BUFFER_OFFSET_SHIFT 16
168 #define TI1570_TX_BUFFER_DCOUNT_MASK 0x0000FFFF
169
170 typedef struct ti1570_tx_buffer ti1570_tx_buffer_t;
171 struct ti1570_tx_buffer {
172 m_uint32_t ctrl_buf; /* Ctrl, Buffer offset, Buffer data-byte count */
173 m_uint32_t nb_addr; /* Start-of-next buffer pointer */
174 m_uint32_t atm_hdr; /* 4-byte ATM header */
175 m_uint32_t aal5_ctrl; /* PCS-UU/CPI field (AAL5 control field) */
176 };
177
178 /* TX completion-ring */
179 #define TI1570_TCR_OWN 0x80000000 /* OWNner bit */
180 #define TI1570_TCR_ABORT 0x40000000 /* Abort */
181
182 /* RX VPI/VCI DMA pointer table */
183 #define TI1570_RX_VPI_ENABLE 0x80000000 /* VPI enabled ? */
184 #define TI1570_RX_BASE_PTR_MASK 0x7FFF0000 /* Base pointer mask */
185 #define TI1570_RX_BASE_PTR_SHIFT 16 /* Base pointer shift */
186 #define TI1570_RX_VCI_RANGE_MASK 0x0000FFFF /* Valid VCI range */
187
188 /* RX DMA state table (p.36) */
189 #define TI1570_RX_DMA_ACT 0x80000000 /* ACTive (word 0) */
190 #define TI1570_RX_DMA_RCR_SELECT 0x20000000 /* RX comp. ring selection */
191 #define TI1570_RX_DMA_WAIT_EOP 0x10000000 /* Wait for EOP */
192 #define TI1570_RX_DMA_AAL_TYPE_MASK 0x0C000000 /* AAL-type mask */
193
194 #define TI1570_RX_DMA_AAL_PTI 0x00000000 /* PTI based tr. AAL pkt */
195 #define TI1570_RX_DMA_AAL_AAL5 0x04000000 /* AAL5 */
196 #define TI1570_RX_DMA_AAL_CNT 0x08000000 /* Cnt based tr. AAL pkt */
197
198 #define TI1570_RX_DMA_FIFO 0x02000000 /* FIFO used for free bufs */
199
200 #define TI1570_RX_DMA_TR_CNT_MASK 0xFFFF0000 /* Cnt-based Tr-AAL */
201 #define TI1570_RX_DMA_TR_CNT_SHIFT 16
202 #define TI1570_RX_DMA_CB_LEN_MASK 0x0000FFFF /* Current buffer length */
203
204 #define TI1570_RX_DMA_ON 0x80000000 /* DMA state (word 6) */
205 #define TI1570_RX_DMA_FILTER 0x40000000 /* Filter */
206
207 #define TI1570_RX_DMA_FB_PTR_MASK 0x3FFFFFFF /* Free-buffer ptr mask */
208 #define TI1570_RX_DMA_FB_INDEX_MASK 0x000000FF /* Index with Free-buf ring */
209
210 typedef struct ti1570_rx_dma_entry ti1570_rx_dma_entry_t;
211 struct ti1570_rx_dma_entry {
212 m_uint32_t ctrl; /* Control field, EFCN cell cnt, pkt length */
213 m_uint32_t cb_addr; /* Current Buffer Address */
214 m_uint32_t sb_addr; /* Start of Buffer address */
215 m_uint32_t cb_len; /* Transp-AAL pkt counter, current buf length */
216 m_uint32_t sp_ptr; /* Start-of-packet pointer */
217 m_uint32_t aal5_crc; /* Partial AAL5-receive CRC */
218 m_uint32_t fbr_entry; /* Free-buffer ring-pointer table entry */
219 m_uint32_t timeout; /* Timeout value, current timeout count */
220 };
221
222 /* RX free-buffer ring pointer table entry (p.39) */
223 #define TI1570_RX_FBR_PTR_MASK 0xFFFFFFFC
224 #define TI1570_RX_FBR_BS_MASK 0xFFFF0000 /* Buffer size mask */
225 #define TI1570_RX_FBR_BS_SHIFT 16
226 #define TI1570_RX_FBR_RS_MASK 0x0000FC00 /* Ring size mask */
227 #define TI1570_RX_FBR_RS_SHIFT 10
228 #define TI1570_RX_FBR_IDX_MASK 0x000003FF /* Current index mask */
229
230 typedef struct ti1570_rx_fbr_entry ti1570_rx_fbr_entry_t;
231 struct ti1570_rx_fbr_entry {
232 m_uint32_t fbr_ptr; /* RX free-buffer ring pointer */
233 m_uint32_t ring_size; /* Ring size and buffer size */
234 };
235
236 /* RX buffer pointer (p.41) */
237 #define TI1570_RX_BUFPTR_OWN 0x80000000 /* If set, buffer is ready */
238 #define TI1570_RX_BUFPTR_MASK 0x3FFFFFFF /* Buffer address mask */
239
240 /* RX data buffer (p.42) */
241 #define TI1570_RX_BUFFER_SOP 0x80000000 /* Start-of-Packet buffer */
242 #define TI1570_RX_BUFFER_EOP 0x40000000 /* End-of-Packet buffer */
243
244 typedef struct ti1570_rx_buffer ti1570_rx_buffer_t;
245 struct ti1570_rx_buffer {
246 m_uint32_t reserved; /* Reserved, not used by the TI1570 */
247 m_uint32_t ctrl; /* Control field, Start of next buffer pointer */
248 m_uint32_t atm_hdr; /* ATM header */
249 m_uint32_t user; /* User-defined value */
250 };
251
252 /* Internal structure to hold free buffer info */
253 typedef struct ti1570_rx_buf_holder ti1570_rx_buf_holder_t;
254 struct ti1570_rx_buf_holder {
255 m_uint32_t buf_addr;
256 m_uint32_t buf_size;
257 ti1570_rx_buffer_t rx_buf;
258 };
259
260 /* RX completion ring entry */
261 #define TI1570_RCR_PKT_OVFLW 0x80000000 /* Packet overflow (word 0) */
262 #define TI1570_RCR_CRC_ERROR 0x40000000 /* CRC error */
263 #define TI1570_RCR_BUF_STARV 0x20000000 /* Buffer starvation */
264 #define TI1570_RCR_TIMEOUT 0x10000000 /* Reassembly timeout */
265 #define TI1570_RCR_ABORT 0x08000000 /* Abort condition */
266 #define TI1570_RCR_AAL5 0x04000000 /* AAL5 indicator */
267
268 #define TI1570_RCR_VALID 0x80000000 /* Start-ptr valid (word 2) */
269
270 #define TI1570_RCR_OWN 0x80000000 /* Buffer ready (word 4) */
271 #define TI1570_RCR_ERROR 0x40000000 /* Error entry */
272
273 typedef struct ti1570_rcr_entry ti1570_rcr_entry_t;
274 struct ti1570_rcr_entry {
275 m_uint32_t atm_hdr; /* ATM header */
276 m_uint32_t error; /* Error Indicator + Congestion cell count */
277 m_uint32_t sp_addr; /* Start of packet */
278 m_uint32_t aal5_trailer; /* AAL5 trailer */
279 m_uint32_t fbr_entry; /* Free-buffer ring-pointer table entry */
280 m_uint32_t res[3]; /* Reserved, not used by the TI1570 */
281 };
282
283 /* TI1570 Data */
284 struct pa_a1_data {
285 char *name;
286
287 /* Control Memory pointer */
288 m_uint32_t *ctrl_mem_ptr;
289
290 /* TI1570 internal registers */
291 m_uint32_t *iregs;
292
293 /* TX FIFO cell */
294 m_uint8_t txfifo_cell[ATM_CELL_SIZE];
295 m_uint32_t txfifo_avail,txfifo_pos;
296
297 /* TX Scheduler table */
298 m_uint32_t *tx_sched_table;
299
300 /* TX DMA state table */
301 ti1570_tx_dma_entry_t *tx_dma_table;
302
303 /* TX/RX completion ring current position */
304 m_uint32_t tcr_wi_pos,tcr_woi_pos;
305 m_uint32_t rcr_wi_pos,rcr_woi_pos;
306
307 /* RX VPI/VCI DMA pointer table */
308 m_uint32_t *rx_vpi_vci_dma_table;
309
310 /* RX DMA state table */
311 ti1570_rx_dma_entry_t *rx_dma_table;
312
313 /* RX Free-buffer ring pointer table */
314 ti1570_rx_fbr_entry_t *rx_fbr_table;
315
316 /* Virtual device */
317 struct vdevice *dev;
318
319 /* PCI device information */
320 struct pci_device *pci_dev_ti,*pci_dev_plx;
321
322 /* Virtual machine */
323 vm_instance_t *vm;
324
325 /* NetIO descriptor */
326 netio_desc_t *nio;
327
328 /* TX ring scanner task id */
329 ptask_id_t tx_tid;
330 };
331
332 /* EEPROM definition */
333 static const m_uint16_t eeprom_pa_a1_data[64] = {
334 0x0117, 0x010F, 0xffff, 0xffff, 0x4906, 0x2E07, 0x0000, 0x0000,
335 0x5000, 0x0000, 0x0010, 0x2400, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
336 };
337
338 static const struct c7200_eeprom eeprom_pa_a1 = {
339 "PA-A1-OC3MM", (m_uint16_t *)eeprom_pa_a1_data, sizeof(eeprom_pa_a1_data)/2,
340 };
341
342 /* Log a TI1570 message */
343 #define TI1570_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
344
345 /* Reset the TI1570 (forward declaration) */
346 static void ti1570_reset(struct pa_a1_data *d,int clear_ctrl_mem);
347
348 /*
349 * dev_pa_a1_access()
350 */
351 void *dev_pa_a1_access(cpu_mips_t *cpu,struct vdevice *dev,m_uint32_t offset,
352 u_int op_size,u_int op_type,m_uint64_t *data)
353 {
354 struct pa_a1_data *d = dev->priv_data;
355
356 if (op_type == MTS_READ)
357 *data = 0;
358
359 #if DEBUG_ACCESS
360 if (op_type == MTS_READ) {
361 cpu_log(cpu,"TI1570","read access to offset = 0x%x, pc = 0x%llx\n",
362 offset,cpu->pc);
363 } else {
364 cpu_log(cpu,"TI1570","write access to vaddr = 0x%x, pc = 0x%llx, "
365 "val = 0x%llx\n",offset,cpu->pc,*data);
366 }
367 #endif
368
369 /* Specific cases */
370 switch(offset) {
371 case 0x3238:
372 TI1570_LOG(d,"reset issued.\n");
373 ti1570_reset(d,FALSE);
374 break;
375
376 case 0x18000c:
377 if (op_type == MTS_READ) {
378 *data = 0xa6;
379 return NULL;
380 }
381 break;
382 }
383
384 /* Control Memory access */
385 if (offset < TI1570_CTRL_MEM_SIZE) {
386 if (op_type == MTS_READ)
387 *data = d->ctrl_mem_ptr[offset >> 2];
388 else
389 d->ctrl_mem_ptr[offset >> 2] = *data;
390 return NULL;
391 }
392
393 /* Unknown offset */
394 #if DEBUG_UNKNOWN
395 if (op_type == MTS_READ) {
396 cpu_log(cpu,d->name,"read from unknown addr 0x%x, pc=0x%llx (size=%u)\n",
397 offset,cpu->pc,op_size);
398 } else {
399 cpu_log(cpu,d->name,"write to unknown addr 0x%x, value=0x%llx, "
400 "pc=0x%llx (size=%u)\n",offset,*data,cpu->pc,op_size);
401 }
402 #endif
403 return NULL;
404 }
405
406 /* Fetch a TX data buffer from host memory */
407 static void ti1570_read_tx_buffer(struct pa_a1_data *d,m_uint32_t addr,
408 ti1570_tx_buffer_t *tx_buf)
409 {
410 physmem_copy_from_vm(d->vm,tx_buf,addr,sizeof(ti1570_tx_buffer_t));
411
412 /* byte-swapping */
413 tx_buf->ctrl_buf = vmtoh32(tx_buf->ctrl_buf);
414 tx_buf->nb_addr = vmtoh32(tx_buf->nb_addr);
415 tx_buf->atm_hdr = vmtoh32(tx_buf->atm_hdr);
416 tx_buf->aal5_ctrl = vmtoh32(tx_buf->aal5_ctrl);
417 }
418
419 /* Acquire a TX buffer */
420 static int ti1570_acquire_tx_buffer(struct pa_a1_data *d,
421 ti1570_tx_dma_entry_t *tde,
422 m_uint32_t buf_addr)
423 {
424 ti1570_tx_buffer_t tx_buf;
425 m_uint32_t buf_offset;
426
427 #if DEBUG_TRANSMIT
428 TI1570_LOG(d,"ti1570_acquire_tx_buffer: acquiring buffer at address 0x%x\n",
429 buf_addr);
430 #endif
431
432 /* Read the TX buffer from host memory */
433 ti1570_read_tx_buffer(d,buf_addr,&tx_buf);
434
435 /* The buffer must be ready to be acquired */
436 if (!(tx_buf.ctrl_buf & TI1570_TX_BUFFER_RDY))
437 return(FALSE);
438
439 /* Put the TX buffer data into the TX DMA state entry */
440 tde->ctrl_buf = tx_buf.ctrl_buf;
441 tde->nb_addr = tx_buf.nb_addr << 2;
442
443 /* Read the ATM header only from the first buffer */
444 if (tx_buf.ctrl_buf & TI1570_TX_BUFFER_SOP) {
445 tde->atm_hdr = tx_buf.atm_hdr;
446 tde->aal5_ctrl = tx_buf.aal5_ctrl;
447 tde->aal5_crc = 0xFFFFFFFF;
448 }
449
450 /* Compute the current-buffer-data address */
451 buf_offset = tx_buf.ctrl_buf & TI1570_TX_BUFFER_OFFSET_MASK;
452 buf_offset >>= TI1570_TX_BUFFER_OFFSET_SHIFT;
453 tde->cb_addr = buf_addr + sizeof(tx_buf) + buf_offset;
454
455 /* Remember the start address of the buffer */
456 tde->sb_addr = buf_addr;
457 return(TRUE);
458 }
459
460 /* Returns TRUE if the TX DMA entry is for an AAL5 packet */
461 static inline int ti1570_is_tde_aal5(ti1570_tx_dma_entry_t *tde)
462 {
463 m_uint32_t pkt_type;
464
465 pkt_type = tde->ctrl_buf & TI1570_TX_DMA_AAL_TYPE_MASK;
466 return(pkt_type == TI1570_TX_DMA_AAL_AAL5);
467 }
468
469 /* Update the AAL5 partial CRC */
470 static void ti1570_update_aal5_crc(struct pa_a1_data *d,
471 ti1570_tx_dma_entry_t *tde)
472 {
473 tde->aal5_crc = crc32_compute(tde->aal5_crc,
474 &d->txfifo_cell[ATM_HDR_SIZE],
475 ATM_PAYLOAD_SIZE);
476 }
477
478 /*
479 * Update the TX DMA entry buffer offset and count when "data_len" bytes
480 * have been transmitted.
481 */
482 static void ti1570_update_tx_dma_bufinfo(ti1570_tx_dma_entry_t *tde,
483 m_uint32_t buf_size,
484 m_uint32_t data_len)
485 {
486 m_uint32_t tmp,tot_len;
487
488 /* update the current buffer address */
489 tde->cb_addr += data_len;
490
491 /* set the remaining byte count */
492 tmp = tde->ctrl_buf & ~TI1570_TX_BUFFER_DCOUNT_MASK;
493 tde->ctrl_buf = tmp + (buf_size - data_len);
494
495 /* update the AAL5 count */
496 if (ti1570_is_tde_aal5(tde)) {
497 tot_len = tde->aal5_ctrl & TI1570_TX_DMA_RING_AAL5_LEN_MASK;
498 tot_len += data_len;
499
500 tmp = (tde->aal5_ctrl & ~TI1570_TX_DMA_RING_AAL5_LEN_MASK) + tot_len;
501 tde->aal5_ctrl = tmp;
502 }
503 }
504
505 /* Clear the TX fifo */
506 static void ti1570_clear_tx_fifo(struct pa_a1_data *d)
507 {
508 d->txfifo_avail = ATM_PAYLOAD_SIZE;
509 d->txfifo_pos = ATM_HDR_SIZE;
510 memset(d->txfifo_cell,0,ATM_CELL_SIZE);
511 }
512
513 /*
514 * Transmit the TX FIFO cell through the NETIO infrastructure if
515 * it is full.
516 */
517 static void ti1570_send_tx_fifo(struct pa_a1_data *d,
518 ti1570_tx_dma_entry_t *tde,
519 int update_aal5_crc)
520 {
521 if (d->txfifo_avail == 0) {
522 #if DEBUG_TRANSMIT
523 TI1570_LOG(d,"ti1570_transmit_cell: transmitting to NETIO device\n");
524 mem_dump(log_file,d->txfifo_cell,ATM_CELL_SIZE);
525 #endif
526 if (update_aal5_crc)
527 ti1570_update_aal5_crc(d,tde);
528
529 netio_send(d->nio,d->txfifo_cell,ATM_CELL_SIZE);
530 ti1570_clear_tx_fifo(d);
531 }
532 }
533
534 /* Add padding to the FIFO */
535 static void ti1570_add_tx_padding(struct pa_a1_data *d,m_uint32_t len)
536 {
537 if (len > d->txfifo_avail) {
538 TI1570_LOG(d,"ti1570_add_tx_padding: trying to add too large "
539 "padding (avail: 0x%x, pad: 0x%x)\n",d->txfifo_avail,len);
540 len = d->txfifo_avail;
541 }
542
543 memset(&d->txfifo_cell[d->txfifo_pos],0,len);
544 d->txfifo_pos += len;
545 d->txfifo_avail -= len;
546 }
547
548 /* Initialize an ATM cell for tranmitting */
549 static m_uint32_t ti1570_init_tx_atm_cell(struct pa_a1_data *d,
550 ti1570_tx_dma_entry_t *tde,
551 int set_pti)
552 {
553 m_uint32_t buf_size,len,atm_hdr;
554
555 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
556 len = m_min(buf_size,d->txfifo_avail);
557
558 #if DEBUG_TRANSMIT
559 TI1570_LOG(d,"ti1570_init_tx_atm_cell: data ptr=0x%x, "
560 "buf_size=%u (0x%x), len=%u (0x%x), atm_hdr=0x%x\n",
561 tde->cb_addr,buf_size,buf_size,len,len,tde->atm_hdr);
562 #endif
563
564 /* copy the ATM header */
565 atm_hdr = tde->atm_hdr;
566
567 if (set_pti) {
568 atm_hdr &= ~ATM_PTI_NETWORK;
569 atm_hdr |= ATM_PTI_EOP;
570 }
571
572 *(m_uint32_t *)d->txfifo_cell = htonl(atm_hdr);
573
574 /* compute HEC field */
575 atm_insert_hec(d->txfifo_cell);
576
577 /* copy the payload and try to transmit if the FIFO is full */
578 if (len > 0) {
579 physmem_copy_from_vm(d->vm,&d->txfifo_cell[d->txfifo_pos],
580 tde->cb_addr,len);
581 d->txfifo_pos += len;
582 d->txfifo_avail -= len;
583 }
584
585 ti1570_update_tx_dma_bufinfo(tde,buf_size,len);
586 return(len);
587 }
588
589 /*
590 * Transmit an Transparent-AAL ATM cell through the NETIO infrastructure.
591 */
592 static int ti1570_transmit_transp_cell(struct pa_a1_data *d,
593 ti1570_tx_dma_entry_t *tde,
594 int atm_set_eop,int *buf_end)
595 {
596 m_uint32_t buf_size,len;
597 int pkt_end,last_cell;
598
599 pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
600 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
601 last_cell = FALSE;
602
603 if (!pkt_end) {
604 len = ti1570_init_tx_atm_cell(d,tde,FALSE);
605 ti1570_send_tx_fifo(d,tde,FALSE);
606
607 if ((buf_size - len) == 0)
608 *buf_end = TRUE;
609
610 return(FALSE);
611 }
612
613 /* this is the end of packet and the last buffer */
614 if (buf_size <= d->txfifo_avail)
615 last_cell = TRUE;
616
617 len = ti1570_init_tx_atm_cell(d,tde,last_cell & atm_set_eop);
618 if (last_cell) ti1570_add_tx_padding(d,d->txfifo_avail);
619 ti1570_send_tx_fifo(d,tde,FALSE);
620 return(last_cell);
621 }
622
623 /* Add the AAL5 trailer to the TX FIFO */
624 static void ti1570_add_aal5_trailer(struct pa_a1_data *d,
625 ti1570_tx_dma_entry_t *tde)
626 {
627 m_uint8_t *trailer;
628
629 trailer = &d->txfifo_cell[ATM_AAL5_TRAILER_POS];
630
631 /* Control field + Length */
632 *(m_uint32_t *)trailer = htonl(tde->aal5_ctrl);
633
634 /* Final CRC-32 computation */
635 tde->aal5_crc = crc32_compute(tde->aal5_crc,
636 &d->txfifo_cell[ATM_HDR_SIZE],
637 ATM_PAYLOAD_SIZE - 4);
638
639 *(m_uint32_t *)(trailer+4) = htonl(~tde->aal5_crc);
640
641 /* Consider the FIFO as full */
642 d->txfifo_avail = 0;
643 }
644
645 /*
646 * Tranmit an AAL5 cell through the NETIO infrastructure.
647 *
648 * Returns TRUE if this is the real end of packet.
649 */
650 static int ti1570_transmit_aal5_cell(struct pa_a1_data *d,
651 ti1570_tx_dma_entry_t *tde,
652 int *buf_end)
653 {
654 m_uint32_t buf_size,len;
655 int pkt_end;
656
657 pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
658 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
659
660 #if DEBUG_TRANSMIT
661 TI1570_LOG(d,"ti1570_transmit_aal5_cell: data ptr=0x%x, "
662 "buf_size=0x%x (%u)\n",tde->cb_addr,buf_size,buf_size);
663 #endif
664
665 /* If this is not the end of packet, transmit the cell normally */
666 if (!pkt_end) {
667 len = ti1570_init_tx_atm_cell(d,tde,FALSE);
668 ti1570_send_tx_fifo(d,tde,TRUE);
669
670 if ((buf_size - len) == 0)
671 *buf_end = TRUE;
672
673 return(FALSE);
674 }
675
676 /*
677 * This is the end of packet, check if we need to emit a special cell
678 * for the AAL5 trailer.
679 */
680 if ((buf_size + ATM_AAL5_TRAILER_SIZE) <= d->txfifo_avail) {
681 len = ti1570_init_tx_atm_cell(d,tde,TRUE);
682
683 /* add the padding */
684 ti1570_add_tx_padding(d,d->txfifo_avail - ATM_AAL5_TRAILER_SIZE);
685
686 /* add the AAL5 trailer at offset 40 */
687 ti1570_add_aal5_trailer(d,tde);
688
689 /* we can transmit the cell */
690 ti1570_send_tx_fifo(d,tde,FALSE);
691
692 *buf_end = TRUE;
693 return(TRUE);
694 }
695
696 /* Transmit the cell normally */
697 len = ti1570_init_tx_atm_cell(d,tde,FALSE);
698 ti1570_add_tx_padding(d,d->txfifo_avail);
699 ti1570_send_tx_fifo(d,tde,TRUE);
700 return(FALSE);
701 }
702
703 /* Update the TX completion ring */
704 static void ti1570_update_tx_cring(struct pa_a1_data *d,
705 ti1570_tx_dma_entry_t *tde)
706 {
707 m_uint32_t tcr_addr,tcr_end,val;
708
709 if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT) {
710 /* TX completion ring with interrupt */
711 tcr_addr = d->iregs[TI1570_REG_TCR_WI_ADDR] + (d->tcr_wi_pos * 4);
712 } else {
713 /* TX completion ring without interrupt */
714 tcr_addr = d->iregs[TI1570_REG_TCR_WOI_ADDR] + (d->tcr_woi_pos * 4);
715 }
716
717 #if DEBUG_TRANSMIT
718 TI1570_LOG(d,"ti1570_update_tx_cring: posting 0x%x at address 0x%x\n",
719 tde->sb_addr,tcr_addr);
720
721 physmem_dump_vm(d->vm,tde->sb_addr,sizeof(ti1570_tx_buffer_t) >> 2);
722 #endif
723
724 /* we have a TX freeze if the buffer belongs to the host */
725 val = physmem_copy_u32_from_vm(d->vm,tcr_addr);
726 if (!(val & TI1570_TCR_OWN)) {
727 d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_TX_FRZ;
728 return;
729 }
730
731 /* put the buffer address in the ring */
732 val = tde->sb_addr >> 2;
733
734 if (tde->ctrl_buf & TI1570_TX_DMA_ABORT)
735 val |= TI1570_TCR_ABORT;
736
737 physmem_copy_u32_to_vm(d->vm,tcr_addr,val);
738
739 /* update the internal position pointer */
740 if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT) {
741 tcr_end = d->iregs[TI1570_REG_TX_CRING_SIZE] & TI1570_TCR_SIZE_MASK;
742
743 if ((d->tcr_wi_pos++) == tcr_end)
744 d->tcr_wi_pos = 0;
745 } else {
746 tcr_end = (d->iregs[TI1570_REG_TX_CRING_SIZE] >> 16);
747 tcr_end &= TI1570_TCR_SIZE_MASK;
748
749 if ((d->tcr_woi_pos++) == tcr_end)
750 d->tcr_woi_pos = 0;
751 }
752 }
753
754 /* Analyze a TX DMA state table entry */
755 static int ti1570_scan_tx_dma_entry_single(struct pa_a1_data *d,
756 m_uint32_t index)
757 {
758 ti1570_tx_dma_entry_t *tde;
759 m_uint32_t psr_base,psr_addr,psr_entry,psr_end;
760 m_uint32_t buf_addr,buf_size,pkt_type,tmp;
761 m_uint32_t psr_index;
762 int atm_set_eop = 0;
763 int pkt_end,buf_end = 0;
764
765 tde = &d->tx_dma_table[index];
766
767 /* The DMA channel state flag must be ON */
768 if (!(tde->dma_state & TI1570_TX_DMA_ON))
769 return(FALSE);
770
771 #if DEBUG_TX_DMA
772 /* We have a running DMA channel */
773 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: TX DMA entry %u is ON "
774 "(ctrl_buf = 0x%x)\n",index,tde->ctrl_buf);
775 #endif
776
777 /* Is this the start of a new packet ? */
778 if (!(tde->ctrl_buf & TI1570_TX_DMA_ACT))
779 {
780 #if DEBUG_TX_DMA
781 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: TX DMA entry %u is not ACT\n",
782 index);
783 #endif
784
785 /* No packet yet, fetch it from the packet-segmentation ring */
786 psr_base = tde->dma_state & TI1570_TX_DMA_RING_OFFSET_MASK;
787 psr_index = tde->dma_state & TI1570_TX_DMA_RING_INDEX_MASK;
788
789 /* Compute address of the current packet segmentation ring entry */
790 psr_addr = (psr_base + psr_index) << 2;
791 psr_entry = physmem_copy_u32_from_vm(d->vm,psr_addr);
792
793 #if DEBUG_TX_DMA
794 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: psr_addr = 0x%x, "
795 "psr_entry = 0x%x\n",psr_addr,psr_entry);
796 #endif
797
798 /* The packet-segmentation-ring entry is owned by host, quit now */
799 if (!(psr_entry & TI1570_TX_RING_OWN))
800 return(FALSE);
801
802 /* Acquire the first buffer (it MUST be in the ready state) */
803 buf_addr = (psr_entry & TI1570_TX_RING_PTR_MASK) << 2;
804
805 if (!ti1570_acquire_tx_buffer(d,tde,buf_addr)) {
806 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: PSR entry with OWN bit set "
807 "but buffer without RDY bit set.\n");
808 return(FALSE);
809 }
810
811 /* Set ACT bit for the DMA channel */
812 tde->ctrl_buf |= TI1570_TX_DMA_ACT;
813 }
814
815 /* Compute the remaining size and determine the packet type */
816 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
817 pkt_type = tde->ctrl_buf & TI1570_TX_DMA_AAL_TYPE_MASK;
818 pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
819
820 #if DEBUG_TRANSMIT
821 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: ctrl_buf=0x%8.8x, "
822 "cb_addr=0x%8.8x, atm_hdr=0x%8.8x, dma_state=0x%8.8x\n",
823 tde->ctrl_buf, tde->cb_addr, tde->atm_hdr, tde->dma_state);
824
825 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: nb_addr=0x%8.8x, "
826 "sb_addr=0x%8.8x, aal5_crc=0x%8.8x, aal5_ctrl=0x%8.8x\n",
827 tde->nb_addr, tde->sb_addr, tde->aal5_crc, tde->aal5_ctrl);
828 #endif
829
830 /*
831 * If the current buffer is now empty and if this is not the last
832 * buffer in the current packet, try to fetch a new buffer.
833 * If the next buffer is not yet ready, we have finished.
834 */
835 if (!buf_size && !pkt_end && !ti1570_acquire_tx_buffer(d,tde,tde->nb_addr))
836 return(FALSE);
837
838 switch(pkt_type) {
839 case TI1570_TX_DMA_AAL_TRWPTI:
840 atm_set_eop = 1;
841
842 case TI1570_TX_DMA_AAL_TRWOPTI:
843 /* Transmit the ATM cell transparently */
844 pkt_end = ti1570_transmit_transp_cell(d,tde,atm_set_eop,&buf_end);
845 break;
846
847 case TI1570_TX_DMA_AAL_AAL5:
848 pkt_end = ti1570_transmit_aal5_cell(d,tde,&buf_end);
849 break;
850
851 default:
852 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: invalid AAL-type\n");
853 return(FALSE);
854 }
855
856 /* Re-read the remaining buffer size */
857 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
858
859 /* Put the buffer address in the transmit completion ring */
860 if (buf_end) ti1570_update_tx_cring(d,tde);
861
862 /*
863 * If we have reached end of packet (EOP): clear the ACT bit,
864 * give back the packet-segmentation ring entry to the host,
865 * and increment the PSR index.
866 */
867 if (pkt_end) {
868 tde->ctrl_buf &= ~TI1570_TX_DMA_ACT;
869
870 /* Clear the OWN bit of the packet-segmentation ring entry */
871 psr_base = tde->dma_state & TI1570_TX_DMA_RING_OFFSET_MASK;
872 psr_index = (tde->dma_state & TI1570_TX_DMA_RING_INDEX_MASK);
873 psr_addr = (psr_base + psr_index) << 2;
874
875 psr_entry = physmem_copy_u32_from_vm(d->vm,psr_addr);
876 psr_entry &= ~TI1570_TX_RING_OWN;
877 physmem_copy_u32_to_vm(d->vm,psr_addr,psr_entry);
878
879 /* Increment the packet-segmentation ring index */
880 psr_index++;
881 psr_end = d->iregs[TI1570_REG_TX_PSR_SIZE] >> 16;
882 psr_end &= TI1570_PSR_SIZE_MASK;
883
884 if (psr_index > psr_end) {
885 psr_index = 0;
886 #if DEBUG_TX_DMA
887 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: PSR ring rotation "
888 "(psr_end = %u)\n",psr_end);
889 #endif
890 }
891
892 tmp = (tde->dma_state & ~TI1570_TX_DMA_RING_INDEX_MASK);
893 tmp |= (psr_index & TI1570_TX_DMA_RING_INDEX_MASK);
894 tde->dma_state = tmp;
895 }
896
897 /* Generate an interrupt if required */
898 if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT)
899 {
900 if (((d->iregs[TI1570_REG_STATUS] & TI1570_CFG_BP_SEL) && buf_end) ||
901 pkt_end)
902 {
903 d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_CP_TX;
904 pci_dev_trigger_irq(d->vm,d->pci_dev_ti);
905 }
906 }
907
908 return(TRUE);
909 }
910
911 /* Analyze a TX DMA state table entry */
912 static void ti1570_scan_tx_dma_entry(struct pa_a1_data *d,m_uint32_t index)
913 {
914 int i;
915
916 for(i=0;i<TI1570_TXDMA_PASS_COUNT;i++)
917 if (!ti1570_scan_tx_dma_entry_single(d,index))
918 break;
919 }
920
921 /* Analyze the TX schedule table */
922 static void ti1570_scan_tx_sched_table(struct pa_a1_data *d)
923 {
924 m_uint32_t cw,index0,index1;
925 u_int i;
926
927 for(i=0;i<TI1570_TX_SCHED_ENTRY_COUNT>>1;i++) {
928 cw = d->tx_sched_table[i];
929
930 /* We have 2 index in TX DMA state table per word */
931 index0 = (cw >> TI1570_TX_SCHED_E0_SHIFT) & TI1570_TX_SCHED_ENTRY_MASK;
932 index1 = (cw >> TI1570_TX_SCHED_E1_SHIFT) & TI1570_TX_SCHED_ENTRY_MASK;
933
934 /* Scan the two entries (null entry => nothing to do) */
935 if (index0) ti1570_scan_tx_dma_entry(d,index0);
936 if (index1) ti1570_scan_tx_dma_entry(d,index1);
937 }
938 }
939
940 /*
941 * Read a RX buffer from the host memory.
942 */
943 static void ti1570_read_rx_buffer(struct pa_a1_data *d,m_uint32_t addr,
944 ti1570_rx_buffer_t *rx_buf)
945 {
946 physmem_copy_from_vm(d->vm,rx_buf,addr,sizeof(ti1570_rx_buffer_t));
947
948 /* byte-swapping */
949 rx_buf->reserved = vmtoh32(rx_buf->reserved);
950 rx_buf->ctrl = vmtoh32(rx_buf->ctrl);
951 rx_buf->atm_hdr = vmtoh32(rx_buf->atm_hdr);
952 rx_buf->user = vmtoh32(rx_buf->user);
953 }
954
955 /* Update the RX completion ring */
956 static void ti1570_update_rx_cring(struct pa_a1_data *d,
957 ti1570_rx_dma_entry_t *rde,
958 m_uint32_t atm_hdr,
959 m_uint32_t aal5_trailer,
960 m_uint32_t err_ind,
961 m_uint32_t fbuf_valid)
962 {
963 m_uint32_t rcr_addr,rcr_end,aal_type,ptr,val;
964 ti1570_rcr_entry_t rcre;
965
966 if (rde->ctrl & TI1570_RX_DMA_RCR_SELECT) {
967 /* RX completion ring with interrupt */
968 rcr_addr = d->iregs[TI1570_REG_RCR_WI_ADDR];
969 rcr_addr += (d->rcr_wi_pos * sizeof(rcre));
970 } else {
971 /* RX completion ring without interrupt */
972 rcr_addr = d->iregs[TI1570_REG_RCR_WOI_ADDR];
973 rcr_addr += (d->rcr_woi_pos * sizeof(rcre));
974 }
975
976 #if DEBUG_RECEIVE
977 TI1570_LOG(d,"ti1570_update_rx_cring: posting 0x%x at address 0x%x\n",
978 (rde->sp_ptr << 2),rcr_addr);
979
980 physmem_dump_vm(d->vm,rde->sp_ptr<<2,sizeof(ti1570_rx_buffer_t) >> 2);
981 #endif
982
983 /* we have a RX freeze if the buffer belongs to the host */
984 ptr = rcr_addr + OFFSET(ti1570_rcr_entry_t,fbr_entry);
985 val = physmem_copy_u32_from_vm(d->vm,ptr);
986
987 if (!(val & TI1570_RCR_OWN)) {
988 TI1570_LOG(d,"ti1570_update_rx_cring: RX freeze...\n");
989 d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_RX_FRZ;
990 return;
991 }
992
993 /* fill the RX completion ring entry and write it back to the host */
994 memset(&rcre,0,sizeof(rcre));
995
996 /* word 0: atm header from last cell received */
997 rcre.atm_hdr = atm_hdr;
998
999 /* word 1: error indicator */
1000 aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
1001 if (aal_type == TI1570_RX_DMA_AAL_AAL5)
1002 rcre.error |= TI1570_RCR_AAL5;
1003
1004 rcre.error |= err_ind;
1005
1006 /* word 2: Start of packet */
1007 if (fbuf_valid)
1008 rcre.sp_addr = TI1570_RCR_VALID | rde->sp_ptr;
1009
1010 /* word 3: AAL5 trailer */
1011 rcre.aal5_trailer = aal5_trailer;
1012
1013 /* word 4: OWN + error entry + free-buffer ring pointer */
1014 rcre.fbr_entry = rde->fbr_entry & TI1570_RX_DMA_FB_PTR_MASK;
1015 if (err_ind) rcre.fbr_entry |= TI1570_RCR_ERROR;
1016
1017 /* byte-swap and write this back to the host memory */
1018 rcre.atm_hdr = htonl(rcre.atm_hdr);
1019 rcre.error = htonl(rcre.error);
1020 rcre.sp_addr = htonl(rcre.sp_addr);
1021 rcre.aal5_trailer = htonl(rcre.aal5_trailer);
1022 rcre.fbr_entry = htonl(rcre.fbr_entry);
1023 physmem_copy_to_vm(d->vm,&rcre,rcr_addr,sizeof(rcre));
1024
1025 /* clear the active bit of the RX DMA entry */
1026 rde->ctrl &= ~TI1570_RX_DMA_ACT;
1027
1028 /* update the internal position pointer */
1029 if (rde->ctrl & TI1570_RX_DMA_RCR_SELECT) {
1030 rcr_end = d->iregs[TI1570_REG_RX_CRING_SIZE] & TI1570_RCR_SIZE_MASK;
1031
1032 if ((d->rcr_wi_pos++) == rcr_end)
1033 d->rcr_wi_pos = 0;
1034
1035 /* generate the appropriate IRQ */
1036 d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_CP_RX;
1037 pci_dev_trigger_irq(d->vm,d->pci_dev_ti);
1038 } else {
1039 rcr_end = (d->iregs[TI1570_REG_RX_CRING_SIZE] >> 16);
1040 rcr_end &= TI1570_RCR_SIZE_MASK;
1041
1042 if ((d->rcr_woi_pos++) == rcr_end)
1043 d->rcr_woi_pos = 0;
1044 }
1045 }
1046
1047 /*
1048 * Acquire a free RX buffer.
1049 *
1050 * Returns FALSE if no buffer is available (buffer starvation).
1051 */
1052 static int ti1570_acquire_rx_buffer(struct pa_a1_data *d,
1053 ti1570_rx_dma_entry_t *rde,
1054 ti1570_rx_buf_holder_t *rbh,
1055 m_uint32_t atm_hdr)
1056 {
1057 ti1570_rx_fbr_entry_t *fbr_entry = NULL;
1058 m_uint32_t bp_addr,buf_addr,buf_size,buf_idx;
1059 m_uint32_t ring_index,ring_size;
1060 m_uint32_t buf_ptr,val;
1061 int fifo = FALSE;
1062
1063 /* To keep this fucking compiler quiet */
1064 ring_size = 0;
1065 buf_idx = 0;
1066
1067 if (rde->ctrl & TI1570_RX_DMA_FIFO) {
1068 bp_addr = (rde->fbr_entry & TI1570_RX_DMA_FB_PTR_MASK) << 2;
1069 buf_ptr = physmem_copy_u32_from_vm(d->vm,bp_addr);
1070 buf_size = d->iregs[TI1570_REG_TX_PSR_SIZE] & 0xFFFF;
1071 fifo = TRUE;
1072
1073 #if DEBUG_RECEIVE
1074 TI1570_LOG(d,"ti1570_acquire_rx_buffer: acquiring FIFO buffer\n");
1075 #endif
1076 }
1077 else
1078 {
1079 ring_index = rde->fbr_entry & TI1570_RX_DMA_FB_INDEX_MASK;
1080 fbr_entry = &d->rx_fbr_table[ring_index];
1081
1082 #if DEBUG_RECEIVE
1083 TI1570_LOG(d,"ti1570_acquire_rx_buffer: acquiring non-FIFO buffer, "
1084 "ring index=%u (0x%x)\n",ring_index,ring_index);
1085 #endif
1086
1087 /* Compute the number of entries in ring */
1088 ring_size = fbr_entry->ring_size & TI1570_RX_FBR_RS_MASK;
1089 ring_size >>= TI1570_RX_FBR_RS_SHIFT;
1090 ring_size = (ring_size << 4) + 15 + 1;
1091
1092 /* Compute the buffer size */
1093 buf_size = fbr_entry->ring_size & TI1570_RX_FBR_BS_MASK;
1094 buf_size >>= TI1570_RX_FBR_BS_SHIFT;
1095
1096 /* Compute the buffer address */
1097 buf_idx = fbr_entry->ring_size & TI1570_RX_FBR_IDX_MASK;
1098 bp_addr = fbr_entry->fbr_ptr + (buf_idx << 2);
1099
1100 #if DEBUG_RECEIVE
1101 TI1570_LOG(d,"ti1570_acquire_rx_buffer: ring size=%u (0x%x), "
1102 "buf size=%u ATM cells\n",ring_size,ring_size,buf_size);
1103
1104 TI1570_LOG(d,"ti1570_acquire_rx_buffer: buffer index=%u (0x%x), "
1105 "buffer ptr address = 0x%x\n",buf_idx,buf_idx,bp_addr);
1106 #endif
1107
1108 buf_ptr = physmem_copy_u32_from_vm(d->vm,bp_addr);
1109 }
1110
1111 #if DEBUG_RECEIVE
1112 TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_ptr = 0x%x\n",buf_ptr);
1113 #endif
1114
1115 /* The TI1570 must own the buffer */
1116 if (!(buf_ptr & TI1570_RX_BUFPTR_OWN)) {
1117 TI1570_LOG(d,"ti1570_acquire_rx_buffer: no free buffer available.\n");
1118 return(FALSE);
1119 }
1120
1121 /*
1122 * If we are using a ring, we have to clear the OWN bit and increment
1123 * the index field.
1124 */
1125 if (!fifo) {
1126 buf_ptr &= ~TI1570_RX_BUFPTR_OWN;
1127 physmem_copy_u32_to_vm(d->vm,bp_addr,buf_ptr);
1128
1129 if (++buf_idx == ring_size) {
1130 #if DEBUG_RECEIVE
1131 TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_idx=0x%x, "
1132 "ring_size=0x%x -> resetting buf_idx\n",
1133 buf_idx-1,ring_size);
1134 #endif
1135 buf_idx = 0;
1136 }
1137
1138 val = fbr_entry->ring_size & ~TI1570_RX_FBR_IDX_MASK;
1139 val |= buf_idx;
1140 fbr_entry->ring_size = val;
1141 }
1142
1143 /* Get the buffer address */
1144 buf_addr = (buf_ptr & TI1570_RX_BUFPTR_MASK) << 2;
1145
1146 #if DEBUG_RECEIVE
1147 TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_addr = 0x%x\n",buf_addr);
1148 #endif
1149
1150 /* Read the buffer descriptor itself and store info for caller */
1151 rbh->buf_addr = buf_addr;
1152 rbh->buf_size = buf_size;
1153 ti1570_read_rx_buffer(d,buf_addr,&rbh->rx_buf);
1154
1155 /* Clear the control field */
1156 physmem_copy_u32_to_vm(d->vm,buf_addr+OFFSET(ti1570_rx_buffer_t,ctrl),0);
1157
1158 /* Store the ATM header in data buffer */
1159 physmem_copy_u32_to_vm(d->vm,buf_addr+OFFSET(ti1570_rx_buffer_t,atm_hdr),
1160 atm_hdr);
1161 return(TRUE);
1162 }
1163
1164 /* Insert a new free buffer in a RX DMA entry */
1165 static void ti1570_insert_rx_free_buf(struct pa_a1_data *d,
1166 ti1570_rx_dma_entry_t *rde,
1167 ti1570_rx_buf_holder_t *rbh)
1168 {
1169 m_uint32_t val,aal_type;
1170
1171 aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
1172
1173 /* Set current and start of buffer addresses */
1174 rde->cb_addr = rbh->buf_addr + sizeof(ti1570_rx_buffer_t);
1175 rde->sb_addr = rbh->buf_addr >> 2;
1176
1177 /* Set the buffer length */
1178 val = rbh->buf_size;
1179
1180 if (aal_type == TI1570_RX_DMA_AAL_CNT)
1181 val |= (rde->aal5_crc & 0xFFFF) << 16;
1182
1183 rde->cb_len = val;
1184 }
1185
1186 /* Store a RX cell */
1187 static int ti1570_store_rx_cell(struct pa_a1_data *d,
1188 ti1570_rx_dma_entry_t *rde,
1189 m_uint8_t *atm_cell)
1190 {
1191 m_uint32_t aal_type,atm_hdr,aal5_trailer,pti,real_eop,pti_eop;
1192 m_uint32_t prev_buf_addr,buf_len,val,ptr,cnt;
1193 ti1570_rx_buf_holder_t rbh;
1194
1195 real_eop = pti_eop = FALSE;
1196 aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
1197
1198 /* Extract PTI from the ATM header */
1199 atm_hdr = ntohl(*(m_uint32_t *)&atm_cell[0]);
1200 pti = (atm_hdr & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1201
1202 /* PTI == 0x1 => EOP */
1203 if ((pti == 0x01) || (pti == 0x03))
1204 pti_eop = TRUE;
1205
1206 if (rde->ctrl & TI1570_RX_DMA_WAIT_EOP) {
1207 TI1570_LOG(d,"ti1570_store_rx_cell: EOP processing, not handled yet.\n");
1208 return(FALSE);
1209 }
1210
1211 /* AAL5 special processing */
1212 if (aal_type == TI1570_RX_DMA_AAL_AAL5)
1213 {
1214 /* Check that we don't exceed 1366 cells for AAL5 */
1215 /* XXX TODO */
1216 }
1217 else
1218 {
1219 /* EOP processing for non counter-based transparent-AAL packets */
1220 if ((rde->ctrl & TI1570_RX_DMA_WAIT_EOP) && pti_eop)
1221 {
1222 /* XXX TODO */
1223 }
1224 }
1225
1226 /* do we have enough room in buffer ? */
1227 buf_len = rde->cb_len & TI1570_RX_DMA_CB_LEN_MASK;
1228
1229 if (!buf_len) {
1230 prev_buf_addr = rde->sb_addr << 2;
1231
1232 /* acquire a new free buffer */
1233 if (!ti1570_acquire_rx_buffer(d,rde,&rbh,atm_hdr)) {
1234 rde->ctrl |= TI1570_RX_DMA_WAIT_EOP;
1235 return(FALSE);
1236 }
1237
1238 /* insert the free buffer in the RX DMA structure */
1239 ti1570_insert_rx_free_buf(d,rde,&rbh);
1240
1241 /* chain the buffers (keep SOP/EOP bits intact) */
1242 ptr = prev_buf_addr + OFFSET(ti1570_rx_buffer_t,ctrl);
1243
1244 val = physmem_copy_u32_from_vm(d->vm,ptr);
1245 val |= rde->sb_addr;
1246 physmem_copy_u32_to_vm(d->vm,ptr,val);
1247
1248 /* read the new buffer length */
1249 buf_len = rde->cb_len & TI1570_RX_DMA_CB_LEN_MASK;
1250 }
1251
1252 /* copy the ATM payload */
1253 #if DEBUG_RECEIVE
1254 TI1570_LOG(d,"ti1570_store_rx_cell: storing cell payload at 0x%x "
1255 "(buf_addr=0x%x)\n",rde->cb_addr,rde->sb_addr << 2);
1256 #endif
1257
1258 physmem_copy_to_vm(d->vm,&atm_cell[ATM_HDR_SIZE],
1259 rde->cb_addr,ATM_PAYLOAD_SIZE);
1260 rde->cb_addr += ATM_PAYLOAD_SIZE;
1261
1262 /* update the current buffer length */
1263 val = rde->cb_len & ~TI1570_RX_DMA_CB_LEN_MASK;
1264 rde->cb_len = val | (--buf_len);
1265
1266 #if DEBUG_RECEIVE
1267 TI1570_LOG(d,"ti1570_store_rx_cell: new rde->cb_len = 0x%x, "
1268 "buf_len=0x%x\n",rde->cb_len,buf_len);
1269 #endif
1270
1271 /* determine if this is the end of the packet (EOP) */
1272 if (aal_type == TI1570_RX_DMA_AAL_CNT)
1273 {
1274 /* counter-based tranparent-AAL packets */
1275 cnt = rde->cb_len & TI1570_RX_DMA_TR_CNT_MASK;
1276 cnt >>= TI1570_RX_DMA_TR_CNT_SHIFT;
1277
1278 /* if the counter reaches 0, this is the EOP */
1279 if (--cnt == 0)
1280 real_eop = TRUE;
1281
1282 val = rde->cb_len & ~TI1570_RX_DMA_TR_CNT_MASK;
1283 val |= cnt << TI1570_RX_DMA_TR_CNT_SHIFT;
1284 }
1285 else {
1286 /* PTI-based transparent AAL packets or AAL5 */
1287 if (pti_eop)
1288 real_eop = TRUE;
1289 }
1290
1291 if (real_eop) {
1292 /* mark the buffer as EOP */
1293 ptr = (rde->sb_addr << 2) + OFFSET(ti1570_rx_buffer_t,ctrl);
1294 val = physmem_copy_u32_from_vm(d->vm,ptr);
1295 val |= TI1570_RX_BUFFER_EOP;
1296 physmem_copy_u32_to_vm(d->vm,ptr,val);
1297
1298 /* get the aal5 trailer */
1299 aal5_trailer = ntohl(*(m_uint32_t *)&atm_cell[ATM_AAL5_TRAILER_POS]);
1300
1301 /* post the entry into the appropriate RX completion ring */
1302 ti1570_update_rx_cring(d,rde,atm_hdr,aal5_trailer,0,TRUE);
1303 }
1304
1305 return(TRUE);
1306 }
1307
1308 /* Handle a received ATM cell */
1309 static int ti1570_handle_rx_cell(netio_desc_t *nio,
1310 u_char *atm_cell,ssize_t cell_len,
1311 struct pa_a1_data *d)
1312 {
1313 m_uint32_t atm_hdr,vpi,vci,vci_idx,vci_mask;
1314 m_uint32_t vci_max,rvd_entry,bptr,pti,ptr;
1315 ti1570_rx_dma_entry_t *rde = NULL;
1316 ti1570_rx_buf_holder_t rbh;
1317
1318 if (cell_len != ATM_CELL_SIZE) {
1319 TI1570_LOG(d,"invalid RX cell size (%ld)\n",(long)cell_len);
1320 return(FALSE);
1321 }
1322
1323 /* Extract the VPI/VCI used as index in the RX VPI/VCI DMA pointer table */
1324 atm_hdr = ntohl(*(m_uint32_t *)&atm_cell[0]);
1325 vpi = (atm_hdr & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1326 vci = (atm_hdr & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1327 pti = (atm_hdr & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1328
1329 #if DEBUG_RECEIVE
1330 TI1570_LOG(d,"ti1570_handle_rx_cell: received cell with VPI/VCI=%u/%u\n",
1331 vpi,vci);
1332 #endif
1333
1334 /* Get the entry corresponding to this VPI in RX VPI/VCI dma ptr table */
1335 rvd_entry = d->rx_vpi_vci_dma_table[vpi];
1336
1337 if (!(rvd_entry & TI1570_RX_VPI_ENABLE)) {
1338 TI1570_LOG(d,"ti1570_handle_rx_cell: received cell with "
1339 "unknown VPI %u (VCI=%u)\n",vpi,vci);
1340 return(FALSE);
1341 }
1342
1343 /*
1344 * Special routing for OAM F4 cells:
1345 * - VCI 3 : OAM F4 segment cell
1346 * - VCI 4 : OAM F4 end-to-end cell
1347 */
1348 if ((vci == 3) || (vci == 4))
1349 rde = &d->rx_dma_table[2];
1350 else {
1351 if ((atm_hdr & ATM_PTI_NETWORK) != 0) {
1352 switch(pti) {
1353 case 0x04: /* OAM F5-segment cell */
1354 case 0x05: /* OAM F5 end-to-end cell */
1355 rde = &d->rx_dma_table[0];
1356 break;
1357
1358 case 0x06:
1359 case 0x07:
1360 rde = &d->rx_dma_table[1];
1361 break;
1362 }
1363 } else {
1364 /*
1365 * Standard VPI/VCI.
1366 * Apply the VCI mask if we don't have an OAM cell.
1367 */
1368 if (!(atm_hdr & ATM_PTI_NETWORK)) {
1369 vci_mask = d->iregs[TI1570_REG_TX_RX_FIFO] >> 16;
1370 vci_idx = vci & (~vci_mask);
1371
1372 vci_max = rvd_entry & TI1570_RX_VCI_RANGE_MASK;
1373
1374 if (vci_idx > vci_max) {
1375 TI1570_LOG(d,"ti1570_handle_rx_cell: out-of-range VCI %u "
1376 "(VPI=%u,vci_mask=%u,vci_max=%u)\n",
1377 vci,vpi,vci_mask,vci_max);
1378 return(FALSE);
1379 }
1380
1381 #if DEBUG_RECEIVE
1382 TI1570_LOG(d,"ti1570_handle_rx_cell: VPI/VCI=%u/%u, "
1383 "vci_mask=0x%x, vci_idx=%u (0x%x), vci_max=%u (0x%x)\n",
1384 vpi,vci,vci_mask,vci_idx,vci_idx,vci_max,vci_max);
1385 #endif
1386 bptr = (rvd_entry & TI1570_RX_BASE_PTR_MASK);
1387 bptr >>= TI1570_RX_BASE_PTR_SHIFT;
1388 bptr = (bptr + vci) * sizeof(ti1570_rx_dma_entry_t);
1389
1390 if (bptr < TI1570_RX_DMA_TABLE_OFFSET) {
1391 TI1570_LOG(d,"ti1570_handle_rx_cell: inconsistency in "
1392 "RX VPI/VCI table, VPI/VCI=%u/u, bptr=0x%x\n",
1393 vpi,vci,bptr);
1394 return(FALSE);
1395 }
1396
1397 bptr -= TI1570_RX_DMA_TABLE_OFFSET;
1398 rde = &d->rx_dma_table[bptr / sizeof(ti1570_rx_dma_entry_t)];
1399 }
1400 }
1401 }
1402
1403 if (!rde) {
1404 TI1570_LOG(d,"ti1570_handle_rx_cell: no RX DMA table entry found!\n");
1405 return(FALSE);
1406 }
1407
1408 /* The entry must be active */
1409 if (!(rde->fbr_entry & TI1570_RX_DMA_ON))
1410 return(FALSE);
1411
1412 /* Is this the start of a new packet ? */
1413 if (!(rde->ctrl & TI1570_RX_DMA_ACT))
1414 {
1415 /* Try to acquire a free buffer */
1416 if (!ti1570_acquire_rx_buffer(d,rde,&rbh,atm_hdr)) {
1417 rde->ctrl |= TI1570_RX_DMA_WAIT_EOP;
1418 return(FALSE);
1419 }
1420
1421 /* Insert the free buffer in the RX DMA structure */
1422 ti1570_insert_rx_free_buf(d,rde,&rbh);
1423 rde->sp_ptr = rde->sb_addr;
1424
1425 /* Mark the RX buffer as the start of packet (SOP) */
1426 ptr = (rde->sb_addr << 2) + OFFSET(ti1570_rx_buffer_t,ctrl);
1427 physmem_copy_u32_to_vm(d->vm,ptr,TI1570_RX_BUFFER_SOP);
1428
1429 /* Set ACT bit for the DMA channel */
1430 rde->ctrl |= TI1570_RX_DMA_ACT;
1431 }
1432
1433 /* Store the received cell */
1434 ti1570_store_rx_cell(d,rde,atm_cell);
1435 return(TRUE);
1436 }
1437
1438 /*
1439 * pci_ti1570_read()
1440 */
1441 static m_uint32_t pci_ti1570_read(cpu_mips_t *cpu,struct pci_device *dev,
1442 int reg)
1443 {
1444 struct pa_a1_data *d = dev->priv_data;
1445
1446 #if DEBUG_ACCESS
1447 TI1570_LOG(d,"pci_ti1570_read: read reg 0x%x\n",reg);
1448 #endif
1449
1450 switch(reg) {
1451 case PCI_REG_BAR0:
1452 return(d->dev->phys_addr);
1453 default:
1454 return(0);
1455 }
1456 }
1457
1458 /*
1459 * pci_ti1570_write()
1460 */
1461 static void pci_ti1570_write(cpu_mips_t *cpu,struct pci_device *dev,
1462 int reg,m_uint32_t value)
1463 {
1464 struct pa_a1_data *d = dev->priv_data;
1465
1466 #if DEBUG_ACCESS
1467 TI1570_LOG(d,"pci_ti1570_write: write reg 0x%x, value 0x%x\n",reg,value);
1468 #endif
1469
1470 switch(reg) {
1471 case PCI_REG_BAR0:
1472 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
1473 TI1570_LOG(d,"registers are mapped at 0x%x\n",value);
1474 break;
1475 }
1476 }
1477
1478 /*
1479 * pci_plx9060es_read()
1480 */
1481 static m_uint32_t pci_plx9060es_read(cpu_mips_t *cpu,struct pci_device *dev,
1482 int reg)
1483 {
1484 #if DEBUG_ACCESS
1485 TI1570_LOG(d,"PLX9060ES","read reg 0x%x\n",reg);
1486 #endif
1487 switch(reg) {
1488 default:
1489 return(0);
1490 }
1491 }
1492
1493 /*
1494 * pci_plx9060es_write()
1495 */
1496 static void pci_plx9060es_write(cpu_mips_t *cpu,struct pci_device *dev,
1497 int reg,m_uint32_t value)
1498 {
1499 #if DEBUG_ACCESS
1500 TI1570_LOG(d,"PLX9060ES","write reg 0x%x, value 0x%x\n",reg,value);
1501 #endif
1502
1503 switch(reg) {
1504 }
1505 }
1506
1507 /* Reset the TI1570 */
1508 static void ti1570_reset(struct pa_a1_data *d,int clear_ctrl_mem)
1509 {
1510 ti1570_clear_tx_fifo(d);
1511
1512 d->tcr_wi_pos = d->tcr_woi_pos = 0;
1513 d->rcr_wi_pos = d->rcr_woi_pos = 0;
1514
1515 if (clear_ctrl_mem)
1516 memset(d->ctrl_mem_ptr,0,TI1570_CTRL_MEM_SIZE);
1517 }
1518
1519 /*
1520 * dev_c7200_pa_a1_init()
1521 *
1522 * Add a PA-A1 port adapter into specified slot.
1523 */
1524 int dev_c7200_pa_a1_init(c7200_t *router,char *name,u_int pa_bay)
1525 {
1526 struct pci_device *pci_dev_ti,*pci_dev_plx;
1527 struct pa_a1_data *d;
1528 struct vdevice *dev;
1529 m_uint8_t *p;
1530
1531 /* Allocate the private data structure for TI1570 chip */
1532 if (!(d = malloc(sizeof(*d)))) {
1533 fprintf(stderr,"%s (TI1570): out of memory\n",name);
1534 return(-1);
1535 }
1536
1537 memset(d,0,sizeof(*d));
1538
1539 /* Set the EEPROM */
1540 c7200_pa_set_eeprom(router,pa_bay,&eeprom_pa_a1);
1541
1542 /* Add PCI device TI1570 */
1543 pci_dev_ti = pci_dev_add(router->pa_bay[pa_bay].pci_map,name,
1544 TI1570_PCI_VENDOR_ID,TI1570_PCI_PRODUCT_ID,
1545 0,0,C7200_NETIO_IRQ,d,
1546 NULL,pci_ti1570_read,pci_ti1570_write);
1547
1548 if (!pci_dev_ti) {
1549 fprintf(stderr,"%s (TI1570): unable to create PCI device TI1570.\n",
1550 name);
1551 return(-1);
1552 }
1553
1554 /* Add PCI device PLX9060ES */
1555 pci_dev_plx = pci_dev_add(router->pa_bay[pa_bay].pci_map,name,
1556 PLX_9060ES_PCI_VENDOR_ID,
1557 PLX_9060ES_PCI_PRODUCT_ID,
1558 1,0,C7200_NETIO_IRQ,d,
1559 NULL,pci_plx9060es_read,pci_plx9060es_write);
1560
1561 if (!pci_dev_plx) {
1562 fprintf(stderr,"%s (PLX_9060ES): unable to create PCI device "
1563 "PLX 9060ES.\n",name);
1564 return(-1);
1565 }
1566
1567 /* Create the TI1570 structure */
1568 d->name = name;
1569 d->vm = router->vm;
1570 d->pci_dev_ti = pci_dev_ti;
1571 d->pci_dev_plx = pci_dev_plx;
1572
1573 /* Allocate the control memory */
1574 if (!(d->ctrl_mem_ptr = malloc(TI1570_CTRL_MEM_SIZE))) {
1575 fprintf(stderr,"%s (PA-A1): unable to create control memory.\n",name);
1576 return(-1);
1577 }
1578
1579 /* Standard tables for the TI1570 */
1580 p = (m_uint8_t *)d->ctrl_mem_ptr;
1581
1582 d->iregs = (m_uint32_t *)(p + TI1570_INTERNAL_REGS_OFFSET);
1583 d->tx_sched_table = (m_uint32_t *)(p + TI1570_TX_SCHED_OFFSET);
1584 d->tx_dma_table = (ti1570_tx_dma_entry_t *)(p + TI1570_TX_DMA_TABLE_OFFSET);
1585 d->rx_vpi_vci_dma_table = (m_uint32_t *)(p+TI1570_RX_DMA_PTR_TABLE_OFFSET);
1586 d->rx_dma_table = (ti1570_rx_dma_entry_t *)(p + TI1570_RX_DMA_TABLE_OFFSET);
1587 d->rx_fbr_table = (ti1570_rx_fbr_entry_t *)(p + TI1570_FREE_BUFFERS_OFFSET);
1588
1589 ti1570_reset(d,TRUE);
1590
1591 /* Create the device itself */
1592 if (!(dev = dev_create(name))) {
1593 fprintf(stderr,"%s (PA-A1): unable to create device.\n",name);
1594 return(-1);
1595 }
1596
1597 dev->phys_addr = 0;
1598 dev->phys_len = 0x200000;
1599 dev->handler = dev_pa_a1_access;
1600
1601 /* Store device info */
1602 dev->priv_data = d;
1603 d->dev = dev;
1604
1605 /* Store device info into the router structure */
1606 return(c7200_pa_set_drvinfo(router,pa_bay,d));
1607 }
1608
1609 /* Remove a PA-A1 from the specified slot */
1610 int dev_c7200_pa_a1_shutdown(c7200_t *router,u_int pa_bay)
1611 {
1612 struct c7200_pa_bay *bay;
1613 struct pa_a1_data *d;
1614
1615 if (!(bay = c7200_pa_get_info(router,pa_bay)))
1616 return(-1);
1617
1618 d = bay->drv_info;
1619
1620 /* Remove the PA EEPROM */
1621 c7200_pa_unset_eeprom(router,pa_bay);
1622
1623 /* Remove the PCI devices */
1624 pci_dev_remove(d->pci_dev_ti);
1625 pci_dev_remove(d->pci_dev_plx);
1626
1627 /* Remove the device from the VM address space */
1628 vm_unbind_device(router->vm,d->dev);
1629 cpu_group_rebuild_mts(router->vm->cpu_group);
1630
1631 /* Free the control memory */
1632 free(d->ctrl_mem_ptr);
1633
1634 /* Free the device structure itself */
1635 free(d->dev);
1636 free(d);
1637 return(0);
1638 }
1639
1640 /* Bind a Network IO descriptor to a specific port */
1641 int dev_c7200_pa_a1_set_nio(c7200_t *router,u_int pa_bay,u_int port_id,
1642 netio_desc_t *nio)
1643 {
1644 struct pa_a1_data *d;
1645
1646 if ((port_id > 0) || !(d = c7200_pa_get_drvinfo(router,pa_bay)))
1647 return(-1);
1648
1649 if (d->nio != NULL)
1650 return(-1);
1651
1652 d->nio = nio;
1653 d->tx_tid = ptask_add((ptask_callback)ti1570_scan_tx_sched_table,d,NULL);
1654 netio_rxl_add(nio,(netio_rx_handler_t)ti1570_handle_rx_cell,d,NULL);
1655 return(0);
1656 }
1657
1658 /* Unbind a Network IO descriptor to a specific port */
1659 int dev_c7200_pa_a1_unset_nio(c7200_t *router,u_int pa_bay,u_int port_id)
1660 {
1661 struct pa_a1_data *d;
1662
1663 if ((port_id > 0) || !(d = c7200_pa_get_drvinfo(router,pa_bay)))
1664 return(-1);
1665
1666 if (d->nio) {
1667 ptask_remove(d->tx_tid);
1668 netio_rxl_remove(d->nio);
1669 d->nio = NULL;
1670 }
1671 return(0);
1672 }
1673
1674 /* PA-A1 driver */
1675 struct c7200_pa_driver dev_c7200_pa_a1_driver = {
1676 "PA-A1", 1,
1677 dev_c7200_pa_a1_init,
1678 dev_c7200_pa_a1_shutdown,
1679 dev_c7200_pa_a1_set_nio,
1680 dev_c7200_pa_a1_unset_nio,
1681 };

  ViewVC Help
Powered by ViewVC 1.1.26