/[dynamips]/upstream/dynamips-0.2.7-RC3/dev_pa_a1.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.7-RC3/dev_pa_a1.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 9 - (show annotations)
Sat Oct 6 16:26:06 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 54760 byte(s)
dynamips-0.2.7-RC3

1 /*
2 * Cisco router simulation platform.
3 * Copyright (C) 2005,2006 Christophe Fillot. All rights reserved.
4 *
5 * PA-A1 ATM interface based on TI1570 and PLX 9060-ES.
6 *
7 * EEPROM types:
8 * - 0x17: PA-A1-OC3MM
9 * - 0x2C: PA-A1-OC3SM
10 * - 0x2D: PA-A1-OC3UTP
11 *
12 * IOS command: "sh controller atm2/0"
13 *
14 * Manuals:
15 *
16 * Texas Instruments TNETA1570 ATM segmentation and reassembly device
17 * with integrated 64-bit PCI-host interface
18 * http://focus.ti.com/docs/prod/folders/print/tneta1570.html
19 *
20 * PLX 9060-ES
21 * http://www.plxtech.com/products/io_accelerators/PCI9060/default.htm
22 *
23 * TODO:
24 * - RX error handling and RX AAL5-related stuff
25 * - HEC and AAL5 CRC fields.
26 *
27 * Cell trains for faster NETIO communications ?
28 */
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <unistd.h>
34 #include <errno.h>
35
36 #include "crc.h"
37 #include "atm.h"
38 #include "cpu.h"
39 #include "vm.h"
40 #include "dynamips.h"
41 #include "memory.h"
42 #include "device.h"
43 #include "ptask.h"
44 #include "dev_c7200.h"
45
46 /* Debugging flags */
47 #define DEBUG_ACCESS 0
48 #define DEBUG_UNKNOWN 0
49 #define DEBUG_TRANSMIT 0
50 #define DEBUG_RECEIVE 0
51 #define DEBUG_TX_DMA 0
52
53 /* PCI vendor/product codes */
54 #define TI1570_PCI_VENDOR_ID 0x104c
55 #define TI1570_PCI_PRODUCT_ID 0xa001
56
57 #define PLX_9060ES_PCI_VENDOR_ID 0x10b5
58 #define PLX_9060ES_PCI_PRODUCT_ID 0x906e
59
60 /* Number of buffers transmitted at each TX DMA ring scan pass */
61 #define TI1570_TXDMA_PASS_COUNT 16
62
63 /* TI1570 Internal Registers (p.58 of doc) */
64 #define TI1570_REG_CONFIG 0x0000 /* Configuration registers */
65 #define TI1570_REG_STATUS 0x0001 /* Status register */
66 #define TI1570_REG_IMASK 0x0002 /* Interrupt-mask register */
67 #define TI1570_REG_RGT_RAT 0x0003 /* RGT + RAT cycle-counter */
68 #define TI1570_REG_RX_UNKNOWN 0x0004 /* RX Unknown Register */
69 #define TI1570_REG_TX_CRING_SIZE 0x0005 /* TX Completion ring sizes */
70 #define TI1570_REG_RX_CRING_SIZE 0x0006 /* RX Completion ring sizes */
71 #define TI1570_REG_TX_PSR_SIZE 0x0007 /* TX Pkt-seg ring size + FIFO */
72 #define TI1570_REG_HEC_AAL5_DISC 0x0008 /* HEC err + AAL5 CPCS discard */
73 #define TI1570_REG_UNK_PROTO_CNT 0x0009 /* Unknown-protocols counter */
74 #define TI1570_REG_RX_ATM_COUNT 0x000A /* ATM-cells-received counter */
75 #define TI1570_REG_TX_ATM_COUNT 0x000B /* ATM-cells-tranmitted counter */
76 #define TI1570_REG_TX_RX_FIFO 0x000C /* TX/RX FIFO occupancy, VCI mask */
77 #define TI1570_REG_SCHED_SIZE 0x000D /* Scheduler Table size */
78 #define TI1570_REG_SOFT_RESET 0x000E /* Software Reset */
79 #define TI1570_REG_TCR_WOI_ADDR 0x0080 /* TX Compl. Ring w/o IRQ addr. */
80 #define TI1570_REG_TCR_WI_ADDR 0x0081 /* TX Compl. Ring w/ IRQ addr. */
81 #define TI1570_REG_RCR_WOI_ADDR 0x0082 /* RX Compl. Ring w/o IRQ addr. */
82 #define TI1570_REG_RCR_WI_ADDR 0x0083 /* RX Compl. Ring w/ IRQ addr. */
83
84 /* TI1570 configuration register (p.59) */
85 #define TI1570_CFG_EN_RAT 0x00000001 /* Reassembly Aging */
86 #define TI1570_CFG_BP_SEL 0x00000002 /* IRQ on packet or buffer */
87 #define TI1570_CFG_EN_RX 0x00000010 /* RX enable */
88 #define TI1570_CFG_EN_TX 0x00000020 /* TX enable */
89 #define TI1570_CFG_SMALL_MAP 0x00000040 /* Small map */
90
91 /* TI1570 status register (p.61) */
92 #define TI1570_STAT_CP_TX 0x00000001 /* Transmit completion ring */
93 #define TI1570_STAT_RX_IRR 0x00000040 /* Receive unknown reg set */
94 #define TI1570_STAT_CP_RX 0x00000080 /* Receive completion ring */
95 #define TI1570_STAT_TX_FRZ 0x00000100 /* TX Freeze */
96 #define TI1570_STAT_RX_FRZ 0x00000200 /* RX Freeze */
97
98 /* Mask for RX/TX completion-ring sizes */
99 #define TI1570_TCR_SIZE_MASK 0x00001FFF /* TX compl. ring size mask */
100 #define TI1570_RCR_SIZE_MASK 0x000003FF /* RX compl. ring size mask */
101
102 /* TI1750 TX packet segmentation ring register */
103 #define TI1570_PSR_SIZE_MASK 0x000000FF /* pkt-seg ring size */
104
105 /* Total size of the TI1570 Control Memory */
106 #define TI1570_CTRL_MEM_SIZE 0x100000
107
108 /* Offsets of the TI1570 structures (p.66) */
109 #define TI1570_TX_SCHED_OFFSET 0x0000 /* TX scheduler table */
110 #define TI1570_INTERNAL_REGS_OFFSET 0x3200 /* Internal Registers */
111 #define TI1570_FREE_BUFFERS_OFFSET 0x3800 /* Free-Buffer Pointers */
112 #define TI1570_RX_DMA_PTR_TABLE_OFFSET 0x4000 /* RX VPI/VCI pointer table */
113 #define TI1570_TX_DMA_TABLE_OFFSET 0x8000 /* TX DMA state table */
114 #define TI1570_RX_DMA_TABLE_OFFSET 0x10000 /* RX DMA state table */
115
116 /* TX scheduler table */
117 #define TI1570_TX_SCHED_ENTRY_COUNT 6200
118 #define TI1570_TX_SCHED_ENTRY_MASK 0x3FF /* Entry mask */
119 #define TI1570_TX_SCHED_E0_SHIFT 0 /* Shift for entry 0 */
120 #define TI1570_TX_SCHED_E1_SHIFT 16 /* Shift for entry 0 */
121
122 /* TX DMA state table */
123 #define TI1570_TX_DMA_ACT 0x80000000 /* ACTive (word 0) */
124 #define TI1570_TX_DMA_SOP 0x40000000 /* Start of Packet (SOP) */
125 #define TI1570_TX_DMA_EOP 0x20000000 /* End of Packet (EOP) */
126 #define TI1570_TX_DMA_ABORT 0x10000000 /* Abort */
127 #define TI1570_TX_DMA_TCR_SELECT 0x02000000 /* TX comp. ring selection */
128 #define TI1570_TX_DMA_AAL_TYPE_MASK 0x0C000000 /* AAL-type mask */
129
130 #define TI1570_TX_DMA_AAL_TRWPTI 0x00000000 /* Transp. AAL w/ PTI set */
131 #define TI1570_TX_DMA_AAL_AAL5 0x04000000 /* AAL5 */
132 #define TI1570_TX_DMA_AAL_TRWOPTI 0x08000000 /* Transp. AAL w/o PTI set */
133
134 #define TI1570_TX_DMA_OFFSET_MASK 0x00FF0000
135 #define TI1570_TX_DMA_OFFSET_SHIFT 16
136 #define TI1570_TX_DMA_DCOUNT_MASK 0x0000FFFF
137
138 #define TI1570_TX_DMA_ON 0x80000000 /* DMA state (word 3) */
139 #define TI1570_TX_DMA_RING_OFFSET_MASK 0x3FFFFF00
140 #define TI1570_TX_DMA_RING_OFFSET_SHIFT 8
141 #define TI1570_TX_DMA_RING_INDEX_MASK 0x000000FF
142
143 #define TI1570_TX_DMA_RING_AAL5_LEN_MASK 0x0000FFFF
144
145 typedef struct ti1570_tx_dma_entry ti1570_tx_dma_entry_t;
146 struct ti1570_tx_dma_entry {
147 m_uint32_t ctrl_buf; /* Ctrl, Buffer Offset, Buffer data-byte count */
148 m_uint32_t cb_addr; /* Current Buffer Address */
149 m_uint32_t atm_hdr; /* 4-byte ATM header */
150 m_uint32_t dma_state; /* DMA state + Packet segmentation ring address */
151 m_uint32_t nb_addr; /* Next Buffer address */
152 m_uint32_t sb_addr; /* Start of Buffer address */
153 m_uint32_t aal5_crc; /* Partial AAL5-transmit CRC */
154 m_uint32_t aal5_ctrl; /* AAL5-control field and length field */
155 };
156
157 /* TX Packet-Segmentation Rings */
158 #define TI1570_TX_RING_OWN 0x80000000 /* If set, packet is ready */
159 #define TI1570_TX_RING_PTR_MASK 0x3FFFFFFF /* Buffer pointer */
160
161 /* TX Data Buffers */
162 #define TI1570_TX_BUFFER_RDY 0x80000000 /* If set, buffer is ready */
163 #define TI1570_TX_BUFFER_SOP 0x40000000 /* First buffer of packet */
164 #define TI1570_TX_BUFFER_EOP 0x20000000 /* Last buffer of packet */
165 #define TI1570_TX_BUFFER_ABORT 0x10000000 /* Abort */
166
167 #define TI1570_TX_BUFFER_OFFSET_MASK 0x00FF0000
168 #define TI1570_TX_BUFFER_OFFSET_SHIFT 16
169 #define TI1570_TX_BUFFER_DCOUNT_MASK 0x0000FFFF
170
171 typedef struct ti1570_tx_buffer ti1570_tx_buffer_t;
172 struct ti1570_tx_buffer {
173 m_uint32_t ctrl_buf; /* Ctrl, Buffer offset, Buffer data-byte count */
174 m_uint32_t nb_addr; /* Start-of-next buffer pointer */
175 m_uint32_t atm_hdr; /* 4-byte ATM header */
176 m_uint32_t aal5_ctrl; /* PCS-UU/CPI field (AAL5 control field) */
177 };
178
179 /* TX completion-ring */
180 #define TI1570_TCR_OWN 0x80000000 /* OWNner bit */
181 #define TI1570_TCR_ABORT 0x40000000 /* Abort */
182
183 /* RX VPI/VCI DMA pointer table */
184 #define TI1570_RX_VPI_ENABLE 0x80000000 /* VPI enabled ? */
185 #define TI1570_RX_BASE_PTR_MASK 0x7FFF0000 /* Base pointer mask */
186 #define TI1570_RX_BASE_PTR_SHIFT 16 /* Base pointer shift */
187 #define TI1570_RX_VCI_RANGE_MASK 0x0000FFFF /* Valid VCI range */
188
189 /* RX DMA state table (p.36) */
190 #define TI1570_RX_DMA_ACT 0x80000000 /* ACTive (word 0) */
191 #define TI1570_RX_DMA_RCR_SELECT 0x20000000 /* RX comp. ring selection */
192 #define TI1570_RX_DMA_WAIT_EOP 0x10000000 /* Wait for EOP */
193 #define TI1570_RX_DMA_AAL_TYPE_MASK 0x0C000000 /* AAL-type mask */
194
195 #define TI1570_RX_DMA_AAL_PTI 0x00000000 /* PTI based tr. AAL pkt */
196 #define TI1570_RX_DMA_AAL_AAL5 0x04000000 /* AAL5 */
197 #define TI1570_RX_DMA_AAL_CNT 0x08000000 /* Cnt based tr. AAL pkt */
198
199 #define TI1570_RX_DMA_FIFO 0x02000000 /* FIFO used for free bufs */
200
201 #define TI1570_RX_DMA_TR_CNT_MASK 0xFFFF0000 /* Cnt-based Tr-AAL */
202 #define TI1570_RX_DMA_TR_CNT_SHIFT 16
203 #define TI1570_RX_DMA_CB_LEN_MASK 0x0000FFFF /* Current buffer length */
204
205 #define TI1570_RX_DMA_ON 0x80000000 /* DMA state (word 6) */
206 #define TI1570_RX_DMA_FILTER 0x40000000 /* Filter */
207
208 #define TI1570_RX_DMA_FB_PTR_MASK 0x3FFFFFFF /* Free-buffer ptr mask */
209 #define TI1570_RX_DMA_FB_INDEX_MASK 0x000000FF /* Index with Free-buf ring */
210
211 typedef struct ti1570_rx_dma_entry ti1570_rx_dma_entry_t;
212 struct ti1570_rx_dma_entry {
213 m_uint32_t ctrl; /* Control field, EFCN cell cnt, pkt length */
214 m_uint32_t cb_addr; /* Current Buffer Address */
215 m_uint32_t sb_addr; /* Start of Buffer address */
216 m_uint32_t cb_len; /* Transp-AAL pkt counter, current buf length */
217 m_uint32_t sp_ptr; /* Start-of-packet pointer */
218 m_uint32_t aal5_crc; /* Partial AAL5-receive CRC */
219 m_uint32_t fbr_entry; /* Free-buffer ring-pointer table entry */
220 m_uint32_t timeout; /* Timeout value, current timeout count */
221 };
222
223 /* RX free-buffer ring pointer table entry (p.39) */
224 #define TI1570_RX_FBR_PTR_MASK 0xFFFFFFFC
225 #define TI1570_RX_FBR_BS_MASK 0xFFFF0000 /* Buffer size mask */
226 #define TI1570_RX_FBR_BS_SHIFT 16
227 #define TI1570_RX_FBR_RS_MASK 0x0000FC00 /* Ring size mask */
228 #define TI1570_RX_FBR_RS_SHIFT 10
229 #define TI1570_RX_FBR_IDX_MASK 0x000003FF /* Current index mask */
230
231 typedef struct ti1570_rx_fbr_entry ti1570_rx_fbr_entry_t;
232 struct ti1570_rx_fbr_entry {
233 m_uint32_t fbr_ptr; /* RX free-buffer ring pointer */
234 m_uint32_t ring_size; /* Ring size and buffer size */
235 };
236
237 /* RX buffer pointer (p.41) */
238 #define TI1570_RX_BUFPTR_OWN 0x80000000 /* If set, buffer is ready */
239 #define TI1570_RX_BUFPTR_MASK 0x3FFFFFFF /* Buffer address mask */
240
241 /* RX data buffer (p.42) */
242 #define TI1570_RX_BUFFER_SOP 0x80000000 /* Start-of-Packet buffer */
243 #define TI1570_RX_BUFFER_EOP 0x40000000 /* End-of-Packet buffer */
244
245 typedef struct ti1570_rx_buffer ti1570_rx_buffer_t;
246 struct ti1570_rx_buffer {
247 m_uint32_t reserved; /* Reserved, not used by the TI1570 */
248 m_uint32_t ctrl; /* Control field, Start of next buffer pointer */
249 m_uint32_t atm_hdr; /* ATM header */
250 m_uint32_t user; /* User-defined value */
251 };
252
253 /* Internal structure to hold free buffer info */
254 typedef struct ti1570_rx_buf_holder ti1570_rx_buf_holder_t;
255 struct ti1570_rx_buf_holder {
256 m_uint32_t buf_addr;
257 m_uint32_t buf_size;
258 ti1570_rx_buffer_t rx_buf;
259 };
260
261 /* RX completion ring entry */
262 #define TI1570_RCR_PKT_OVFLW 0x80000000 /* Packet overflow (word 0) */
263 #define TI1570_RCR_CRC_ERROR 0x40000000 /* CRC error */
264 #define TI1570_RCR_BUF_STARV 0x20000000 /* Buffer starvation */
265 #define TI1570_RCR_TIMEOUT 0x10000000 /* Reassembly timeout */
266 #define TI1570_RCR_ABORT 0x08000000 /* Abort condition */
267 #define TI1570_RCR_AAL5 0x04000000 /* AAL5 indicator */
268
269 #define TI1570_RCR_VALID 0x80000000 /* Start-ptr valid (word 2) */
270
271 #define TI1570_RCR_OWN 0x80000000 /* Buffer ready (word 4) */
272 #define TI1570_RCR_ERROR 0x40000000 /* Error entry */
273
274 typedef struct ti1570_rcr_entry ti1570_rcr_entry_t;
275 struct ti1570_rcr_entry {
276 m_uint32_t atm_hdr; /* ATM header */
277 m_uint32_t error; /* Error Indicator + Congestion cell count */
278 m_uint32_t sp_addr; /* Start of packet */
279 m_uint32_t aal5_trailer; /* AAL5 trailer */
280 m_uint32_t fbr_entry; /* Free-buffer ring-pointer table entry */
281 m_uint32_t res[3]; /* Reserved, not used by the TI1570 */
282 };
283
284 /* TI1570 Data */
285 struct pa_a1_data {
286 char *name;
287
288 /* IRQ clearing counter */
289 u_int irq_clear_count;
290
291 /* Control Memory pointer */
292 m_uint32_t *ctrl_mem_ptr;
293
294 /* TI1570 internal registers */
295 m_uint32_t *iregs;
296
297 /* TX FIFO cell */
298 m_uint8_t txfifo_cell[ATM_CELL_SIZE];
299 m_uint32_t txfifo_avail,txfifo_pos;
300
301 /* TX Scheduler table */
302 m_uint32_t *tx_sched_table;
303
304 /* TX DMA state table */
305 ti1570_tx_dma_entry_t *tx_dma_table;
306
307 /* TX/RX completion ring current position */
308 m_uint32_t tcr_wi_pos,tcr_woi_pos;
309 m_uint32_t rcr_wi_pos,rcr_woi_pos;
310
311 /* RX VPI/VCI DMA pointer table */
312 m_uint32_t *rx_vpi_vci_dma_table;
313
314 /* RX DMA state table */
315 ti1570_rx_dma_entry_t *rx_dma_table;
316
317 /* RX Free-buffer ring pointer table */
318 ti1570_rx_fbr_entry_t *rx_fbr_table;
319
320 /* Virtual device */
321 struct vdevice *dev;
322
323 /* PCI device information */
324 struct pci_device *pci_dev_ti,*pci_dev_plx;
325
326 /* Virtual machine */
327 vm_instance_t *vm;
328
329 /* NetIO descriptor */
330 netio_desc_t *nio;
331
332 /* TX ring scanner task id */
333 ptask_id_t tx_tid;
334 };
335
336 /* Log a TI1570 message */
337 #define TI1570_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
338
339 /* Reset the TI1570 (forward declaration) */
340 static void ti1570_reset(struct pa_a1_data *d,int clear_ctrl_mem);
341
342 /* Update the interrupt status */
343 static inline void dev_pa_a1_update_irq_status(struct pa_a1_data *d)
344 {
345 if (d->iregs[TI1570_REG_STATUS] & d->iregs[TI1570_REG_IMASK]) {
346 pci_dev_trigger_irq(d->vm,d->pci_dev_ti);
347 } else {
348 pci_dev_clear_irq(d->vm,d->pci_dev_ti);
349 }
350 }
351
352 /*
353 * dev_pa_a1_access()
354 */
355 void *dev_pa_a1_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
356 u_int op_size,u_int op_type,m_uint64_t *data)
357 {
358 struct pa_a1_data *d = dev->priv_data;
359
360 if (op_type == MTS_READ)
361 *data = 0;
362
363 #if DEBUG_ACCESS
364 if (op_type == MTS_READ) {
365 cpu_log(cpu,"TI1570","read access to offset = 0x%x, pc = 0x%llx\n",
366 offset,cpu_get_pc(cpu));
367 } else {
368 cpu_log(cpu,"TI1570","write access to vaddr = 0x%x, pc = 0x%llx, "
369 "val = 0x%llx\n",offset,cpu_get_pc(cpu),*data);
370 }
371 #endif
372
373 /* Specific cases */
374 switch(offset) {
375 /* Status register */
376 case 0x3204:
377 if (op_type == MTS_READ) {
378 *data = d->iregs[TI1570_REG_STATUS];
379
380 if (++d->irq_clear_count == 2) {
381 d->iregs[TI1570_REG_STATUS] &= ~0x3FF;
382 d->irq_clear_count = 0;
383 }
384
385 dev_pa_a1_update_irq_status(d);
386 }
387 break;
388
389 /* Software Reset register */
390 case 0x3238:
391 TI1570_LOG(d,"reset issued.\n");
392 ti1570_reset(d,FALSE);
393 break;
394
395 case 0x18000c:
396 if (op_type == MTS_READ) {
397 *data = 0xa6;
398 return NULL;
399 }
400 break;
401 }
402
403 /* Control Memory access */
404 if (offset < TI1570_CTRL_MEM_SIZE) {
405 if (op_type == MTS_READ)
406 *data = d->ctrl_mem_ptr[offset >> 2];
407 else
408 d->ctrl_mem_ptr[offset >> 2] = *data;
409 return NULL;
410 }
411
412 /* Unknown offset */
413 #if DEBUG_UNKNOWN
414 if (op_type == MTS_READ) {
415 cpu_log(cpu,d->name,"read from unknown addr 0x%x, pc=0x%llx (size=%u)\n",
416 offset,cpu_get_pc(cpu),op_size);
417 } else {
418 cpu_log(cpu,d->name,"write to unknown addr 0x%x, value=0x%llx, "
419 "pc=0x%llx (size=%u)\n",offset,*data,cpu_get_pc(cpu),op_size);
420 }
421 #endif
422 return NULL;
423 }
424
425 /* Fetch a TX data buffer from host memory */
426 static void ti1570_read_tx_buffer(struct pa_a1_data *d,m_uint32_t addr,
427 ti1570_tx_buffer_t *tx_buf)
428 {
429 physmem_copy_from_vm(d->vm,tx_buf,addr,sizeof(ti1570_tx_buffer_t));
430
431 /* byte-swapping */
432 tx_buf->ctrl_buf = vmtoh32(tx_buf->ctrl_buf);
433 tx_buf->nb_addr = vmtoh32(tx_buf->nb_addr);
434 tx_buf->atm_hdr = vmtoh32(tx_buf->atm_hdr);
435 tx_buf->aal5_ctrl = vmtoh32(tx_buf->aal5_ctrl);
436 }
437
438 /* Acquire a TX buffer */
439 static int ti1570_acquire_tx_buffer(struct pa_a1_data *d,
440 ti1570_tx_dma_entry_t *tde,
441 m_uint32_t buf_addr)
442 {
443 ti1570_tx_buffer_t tx_buf;
444 m_uint32_t buf_offset;
445
446 #if DEBUG_TRANSMIT
447 TI1570_LOG(d,"ti1570_acquire_tx_buffer: acquiring buffer at address 0x%x\n",
448 buf_addr);
449 #endif
450
451 /* Read the TX buffer from host memory */
452 ti1570_read_tx_buffer(d,buf_addr,&tx_buf);
453
454 /* The buffer must be ready to be acquired */
455 if (!(tx_buf.ctrl_buf & TI1570_TX_BUFFER_RDY))
456 return(FALSE);
457
458 /* Put the TX buffer data into the TX DMA state entry */
459 tde->ctrl_buf = tx_buf.ctrl_buf;
460 tde->nb_addr = tx_buf.nb_addr << 2;
461
462 /* Read the ATM header only from the first buffer */
463 if (tx_buf.ctrl_buf & TI1570_TX_BUFFER_SOP) {
464 tde->atm_hdr = tx_buf.atm_hdr;
465 tde->aal5_ctrl = tx_buf.aal5_ctrl;
466 tde->aal5_crc = 0xFFFFFFFF;
467 }
468
469 /* Compute the current-buffer-data address */
470 buf_offset = tx_buf.ctrl_buf & TI1570_TX_BUFFER_OFFSET_MASK;
471 buf_offset >>= TI1570_TX_BUFFER_OFFSET_SHIFT;
472 tde->cb_addr = buf_addr + sizeof(tx_buf) + buf_offset;
473
474 /* Remember the start address of the buffer */
475 tde->sb_addr = buf_addr;
476 return(TRUE);
477 }
478
479 /* Returns TRUE if the TX DMA entry is for an AAL5 packet */
480 static inline int ti1570_is_tde_aal5(ti1570_tx_dma_entry_t *tde)
481 {
482 m_uint32_t pkt_type;
483
484 pkt_type = tde->ctrl_buf & TI1570_TX_DMA_AAL_TYPE_MASK;
485 return(pkt_type == TI1570_TX_DMA_AAL_AAL5);
486 }
487
488 /* Update the AAL5 partial CRC */
489 static void ti1570_update_aal5_crc(struct pa_a1_data *d,
490 ti1570_tx_dma_entry_t *tde)
491 {
492 tde->aal5_crc = crc32_compute(tde->aal5_crc,
493 &d->txfifo_cell[ATM_HDR_SIZE],
494 ATM_PAYLOAD_SIZE);
495 }
496
497 /*
498 * Update the TX DMA entry buffer offset and count when "data_len" bytes
499 * have been transmitted.
500 */
501 static void ti1570_update_tx_dma_bufinfo(ti1570_tx_dma_entry_t *tde,
502 m_uint32_t buf_size,
503 m_uint32_t data_len)
504 {
505 m_uint32_t tmp,tot_len;
506
507 /* update the current buffer address */
508 tde->cb_addr += data_len;
509
510 /* set the remaining byte count */
511 tmp = tde->ctrl_buf & ~TI1570_TX_BUFFER_DCOUNT_MASK;
512 tde->ctrl_buf = tmp + (buf_size - data_len);
513
514 /* update the AAL5 count */
515 if (ti1570_is_tde_aal5(tde)) {
516 tot_len = tde->aal5_ctrl & TI1570_TX_DMA_RING_AAL5_LEN_MASK;
517 tot_len += data_len;
518
519 tmp = (tde->aal5_ctrl & ~TI1570_TX_DMA_RING_AAL5_LEN_MASK) + tot_len;
520 tde->aal5_ctrl = tmp;
521 }
522 }
523
524 /* Clear the TX fifo */
525 static void ti1570_clear_tx_fifo(struct pa_a1_data *d)
526 {
527 d->txfifo_avail = ATM_PAYLOAD_SIZE;
528 d->txfifo_pos = ATM_HDR_SIZE;
529 memset(d->txfifo_cell,0,ATM_CELL_SIZE);
530 }
531
532 /*
533 * Transmit the TX FIFO cell through the NETIO infrastructure if
534 * it is full.
535 */
536 static void ti1570_send_tx_fifo(struct pa_a1_data *d,
537 ti1570_tx_dma_entry_t *tde,
538 int update_aal5_crc)
539 {
540 if (d->txfifo_avail == 0) {
541 #if DEBUG_TRANSMIT
542 TI1570_LOG(d,"ti1570_transmit_cell: transmitting to NETIO device\n");
543 mem_dump(log_file,d->txfifo_cell,ATM_CELL_SIZE);
544 #endif
545 if (update_aal5_crc)
546 ti1570_update_aal5_crc(d,tde);
547
548 netio_send(d->nio,d->txfifo_cell,ATM_CELL_SIZE);
549 ti1570_clear_tx_fifo(d);
550 }
551 }
552
553 /* Add padding to the FIFO */
554 static void ti1570_add_tx_padding(struct pa_a1_data *d,m_uint32_t len)
555 {
556 if (len > d->txfifo_avail) {
557 TI1570_LOG(d,"ti1570_add_tx_padding: trying to add too large "
558 "padding (avail: 0x%x, pad: 0x%x)\n",d->txfifo_avail,len);
559 len = d->txfifo_avail;
560 }
561
562 memset(&d->txfifo_cell[d->txfifo_pos],0,len);
563 d->txfifo_pos += len;
564 d->txfifo_avail -= len;
565 }
566
567 /* Initialize an ATM cell for tranmitting */
568 static m_uint32_t ti1570_init_tx_atm_cell(struct pa_a1_data *d,
569 ti1570_tx_dma_entry_t *tde,
570 int set_pti)
571 {
572 m_uint32_t buf_size,len,atm_hdr;
573
574 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
575 len = m_min(buf_size,d->txfifo_avail);
576
577 #if DEBUG_TRANSMIT
578 TI1570_LOG(d,"ti1570_init_tx_atm_cell: data ptr=0x%x, "
579 "buf_size=%u (0x%x), len=%u (0x%x), atm_hdr=0x%x\n",
580 tde->cb_addr,buf_size,buf_size,len,len,tde->atm_hdr);
581 #endif
582
583 /* copy the ATM header */
584 atm_hdr = tde->atm_hdr;
585
586 if (set_pti) {
587 atm_hdr &= ~ATM_PTI_NETWORK;
588 atm_hdr |= ATM_PTI_EOP;
589 }
590
591 *(m_uint32_t *)d->txfifo_cell = htonl(atm_hdr);
592
593 /* compute HEC field */
594 atm_insert_hec(d->txfifo_cell);
595
596 /* copy the payload and try to transmit if the FIFO is full */
597 if (len > 0) {
598 physmem_copy_from_vm(d->vm,&d->txfifo_cell[d->txfifo_pos],
599 tde->cb_addr,len);
600 d->txfifo_pos += len;
601 d->txfifo_avail -= len;
602 }
603
604 ti1570_update_tx_dma_bufinfo(tde,buf_size,len);
605 return(len);
606 }
607
608 /*
609 * Transmit an Transparent-AAL ATM cell through the NETIO infrastructure.
610 */
611 static int ti1570_transmit_transp_cell(struct pa_a1_data *d,
612 ti1570_tx_dma_entry_t *tde,
613 int atm_set_eop,int *buf_end)
614 {
615 m_uint32_t buf_size,len;
616 int pkt_end,last_cell;
617
618 pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
619 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
620 last_cell = FALSE;
621
622 if (!pkt_end) {
623 len = ti1570_init_tx_atm_cell(d,tde,FALSE);
624 ti1570_send_tx_fifo(d,tde,FALSE);
625
626 if ((buf_size - len) == 0)
627 *buf_end = TRUE;
628
629 return(FALSE);
630 }
631
632 /* this is the end of packet and the last buffer */
633 if (buf_size <= d->txfifo_avail)
634 last_cell = TRUE;
635
636 len = ti1570_init_tx_atm_cell(d,tde,last_cell & atm_set_eop);
637 if (last_cell) ti1570_add_tx_padding(d,d->txfifo_avail);
638 ti1570_send_tx_fifo(d,tde,FALSE);
639 return(last_cell);
640 }
641
642 /* Add the AAL5 trailer to the TX FIFO */
643 static void ti1570_add_aal5_trailer(struct pa_a1_data *d,
644 ti1570_tx_dma_entry_t *tde)
645 {
646 m_uint8_t *trailer;
647
648 trailer = &d->txfifo_cell[ATM_AAL5_TRAILER_POS];
649
650 /* Control field + Length */
651 *(m_uint32_t *)trailer = htonl(tde->aal5_ctrl);
652
653 /* Final CRC-32 computation */
654 tde->aal5_crc = crc32_compute(tde->aal5_crc,
655 &d->txfifo_cell[ATM_HDR_SIZE],
656 ATM_PAYLOAD_SIZE - 4);
657
658 *(m_uint32_t *)(trailer+4) = htonl(~tde->aal5_crc);
659
660 /* Consider the FIFO as full */
661 d->txfifo_avail = 0;
662 }
663
664 /*
665 * Tranmit an AAL5 cell through the NETIO infrastructure.
666 *
667 * Returns TRUE if this is the real end of packet.
668 */
669 static int ti1570_transmit_aal5_cell(struct pa_a1_data *d,
670 ti1570_tx_dma_entry_t *tde,
671 int *buf_end)
672 {
673 m_uint32_t buf_size,len;
674 int pkt_end;
675
676 pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
677 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
678
679 #if DEBUG_TRANSMIT
680 TI1570_LOG(d,"ti1570_transmit_aal5_cell: data ptr=0x%x, "
681 "buf_size=0x%x (%u)\n",tde->cb_addr,buf_size,buf_size);
682 #endif
683
684 /* If this is not the end of packet, transmit the cell normally */
685 if (!pkt_end) {
686 len = ti1570_init_tx_atm_cell(d,tde,FALSE);
687 ti1570_send_tx_fifo(d,tde,TRUE);
688
689 if ((buf_size - len) == 0)
690 *buf_end = TRUE;
691
692 return(FALSE);
693 }
694
695 /*
696 * This is the end of packet, check if we need to emit a special cell
697 * for the AAL5 trailer.
698 */
699 if ((buf_size + ATM_AAL5_TRAILER_SIZE) <= d->txfifo_avail) {
700 len = ti1570_init_tx_atm_cell(d,tde,TRUE);
701
702 /* add the padding */
703 ti1570_add_tx_padding(d,d->txfifo_avail - ATM_AAL5_TRAILER_SIZE);
704
705 /* add the AAL5 trailer at offset 40 */
706 ti1570_add_aal5_trailer(d,tde);
707
708 /* we can transmit the cell */
709 ti1570_send_tx_fifo(d,tde,FALSE);
710
711 *buf_end = TRUE;
712 return(TRUE);
713 }
714
715 /* Transmit the cell normally */
716 len = ti1570_init_tx_atm_cell(d,tde,FALSE);
717 ti1570_add_tx_padding(d,d->txfifo_avail);
718 ti1570_send_tx_fifo(d,tde,TRUE);
719 return(FALSE);
720 }
721
722 /* Update the TX completion ring */
723 static void ti1570_update_tx_cring(struct pa_a1_data *d,
724 ti1570_tx_dma_entry_t *tde)
725 {
726 m_uint32_t tcr_addr,tcr_end,val;
727
728 if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT) {
729 /* TX completion ring with interrupt */
730 tcr_addr = d->iregs[TI1570_REG_TCR_WI_ADDR] + (d->tcr_wi_pos * 4);
731 } else {
732 /* TX completion ring without interrupt */
733 tcr_addr = d->iregs[TI1570_REG_TCR_WOI_ADDR] + (d->tcr_woi_pos * 4);
734 }
735
736 #if DEBUG_TRANSMIT
737 TI1570_LOG(d,"ti1570_update_tx_cring: posting 0x%x at address 0x%x\n",
738 tde->sb_addr,tcr_addr);
739
740 physmem_dump_vm(d->vm,tde->sb_addr,sizeof(ti1570_tx_buffer_t) >> 2);
741 #endif
742
743 /* we have a TX freeze if the buffer belongs to the host */
744 val = physmem_copy_u32_from_vm(d->vm,tcr_addr);
745 if (!(val & TI1570_TCR_OWN)) {
746 d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_TX_FRZ;
747 return;
748 }
749
750 /* put the buffer address in the ring */
751 val = tde->sb_addr >> 2;
752
753 if (tde->ctrl_buf & TI1570_TX_DMA_ABORT)
754 val |= TI1570_TCR_ABORT;
755
756 physmem_copy_u32_to_vm(d->vm,tcr_addr,val);
757
758 /* update the internal position pointer */
759 if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT) {
760 tcr_end = d->iregs[TI1570_REG_TX_CRING_SIZE] & TI1570_TCR_SIZE_MASK;
761
762 if ((d->tcr_wi_pos++) == tcr_end)
763 d->tcr_wi_pos = 0;
764 } else {
765 tcr_end = (d->iregs[TI1570_REG_TX_CRING_SIZE] >> 16);
766 tcr_end &= TI1570_TCR_SIZE_MASK;
767
768 if ((d->tcr_woi_pos++) == tcr_end)
769 d->tcr_woi_pos = 0;
770 }
771 }
772
773 /* Analyze a TX DMA state table entry */
774 static int ti1570_scan_tx_dma_entry_single(struct pa_a1_data *d,
775 m_uint32_t index)
776 {
777 ti1570_tx_dma_entry_t *tde;
778 m_uint32_t psr_base,psr_addr,psr_entry,psr_end;
779 m_uint32_t buf_addr,buf_size,pkt_type,tmp;
780 m_uint32_t psr_index;
781 int atm_set_eop = 0;
782 int pkt_end,buf_end = 0;
783
784 tde = &d->tx_dma_table[index];
785
786 /* The DMA channel state flag must be ON */
787 if (!(tde->dma_state & TI1570_TX_DMA_ON))
788 return(FALSE);
789
790 #if DEBUG_TX_DMA
791 /* We have a running DMA channel */
792 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: TX DMA entry %u is ON "
793 "(ctrl_buf = 0x%x)\n",index,tde->ctrl_buf);
794 #endif
795
796 /* Is this the start of a new packet ? */
797 if (!(tde->ctrl_buf & TI1570_TX_DMA_ACT))
798 {
799 #if DEBUG_TX_DMA
800 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: TX DMA entry %u is not ACT\n",
801 index);
802 #endif
803
804 /* No packet yet, fetch it from the packet-segmentation ring */
805 psr_base = tde->dma_state & TI1570_TX_DMA_RING_OFFSET_MASK;
806 psr_index = tde->dma_state & TI1570_TX_DMA_RING_INDEX_MASK;
807
808 /* Compute address of the current packet segmentation ring entry */
809 psr_addr = (psr_base + psr_index) << 2;
810 psr_entry = physmem_copy_u32_from_vm(d->vm,psr_addr);
811
812 #if DEBUG_TX_DMA
813 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: psr_addr = 0x%x, "
814 "psr_entry = 0x%x\n",psr_addr,psr_entry);
815 #endif
816
817 /* The packet-segmentation-ring entry is owned by host, quit now */
818 if (!(psr_entry & TI1570_TX_RING_OWN))
819 return(FALSE);
820
821 /* Acquire the first buffer (it MUST be in the ready state) */
822 buf_addr = (psr_entry & TI1570_TX_RING_PTR_MASK) << 2;
823
824 if (!ti1570_acquire_tx_buffer(d,tde,buf_addr)) {
825 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: PSR entry with OWN bit set "
826 "but buffer without RDY bit set.\n");
827 return(FALSE);
828 }
829
830 /* Set ACT bit for the DMA channel */
831 tde->ctrl_buf |= TI1570_TX_DMA_ACT;
832 }
833
834 /* Compute the remaining size and determine the packet type */
835 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
836 pkt_type = tde->ctrl_buf & TI1570_TX_DMA_AAL_TYPE_MASK;
837 pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
838
839 #if DEBUG_TRANSMIT
840 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: ctrl_buf=0x%8.8x, "
841 "cb_addr=0x%8.8x, atm_hdr=0x%8.8x, dma_state=0x%8.8x\n",
842 tde->ctrl_buf, tde->cb_addr, tde->atm_hdr, tde->dma_state);
843
844 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: nb_addr=0x%8.8x, "
845 "sb_addr=0x%8.8x, aal5_crc=0x%8.8x, aal5_ctrl=0x%8.8x\n",
846 tde->nb_addr, tde->sb_addr, tde->aal5_crc, tde->aal5_ctrl);
847 #endif
848
849 /*
850 * If the current buffer is now empty and if this is not the last
851 * buffer in the current packet, try to fetch a new buffer.
852 * If the next buffer is not yet ready, we have finished.
853 */
854 if (!buf_size && !pkt_end && !ti1570_acquire_tx_buffer(d,tde,tde->nb_addr))
855 return(FALSE);
856
857 switch(pkt_type) {
858 case TI1570_TX_DMA_AAL_TRWPTI:
859 atm_set_eop = 1;
860
861 case TI1570_TX_DMA_AAL_TRWOPTI:
862 /* Transmit the ATM cell transparently */
863 pkt_end = ti1570_transmit_transp_cell(d,tde,atm_set_eop,&buf_end);
864 break;
865
866 case TI1570_TX_DMA_AAL_AAL5:
867 pkt_end = ti1570_transmit_aal5_cell(d,tde,&buf_end);
868 break;
869
870 default:
871 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: invalid AAL-type\n");
872 return(FALSE);
873 }
874
875 /* Re-read the remaining buffer size */
876 buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
877
878 /* Put the buffer address in the transmit completion ring */
879 if (buf_end) ti1570_update_tx_cring(d,tde);
880
881 /*
882 * If we have reached end of packet (EOP): clear the ACT bit,
883 * give back the packet-segmentation ring entry to the host,
884 * and increment the PSR index.
885 */
886 if (pkt_end) {
887 tde->ctrl_buf &= ~TI1570_TX_DMA_ACT;
888
889 /* Clear the OWN bit of the packet-segmentation ring entry */
890 psr_base = tde->dma_state & TI1570_TX_DMA_RING_OFFSET_MASK;
891 psr_index = (tde->dma_state & TI1570_TX_DMA_RING_INDEX_MASK);
892 psr_addr = (psr_base + psr_index) << 2;
893
894 psr_entry = physmem_copy_u32_from_vm(d->vm,psr_addr);
895 psr_entry &= ~TI1570_TX_RING_OWN;
896 physmem_copy_u32_to_vm(d->vm,psr_addr,psr_entry);
897
898 /* Increment the packet-segmentation ring index */
899 psr_index++;
900 psr_end = d->iregs[TI1570_REG_TX_PSR_SIZE] >> 16;
901 psr_end &= TI1570_PSR_SIZE_MASK;
902
903 if (psr_index > psr_end) {
904 psr_index = 0;
905 #if DEBUG_TX_DMA
906 TI1570_LOG(d,"ti1570_scan_tx_dma_entry: PSR ring rotation "
907 "(psr_end = %u)\n",psr_end);
908 #endif
909 }
910
911 tmp = (tde->dma_state & ~TI1570_TX_DMA_RING_INDEX_MASK);
912 tmp |= (psr_index & TI1570_TX_DMA_RING_INDEX_MASK);
913 tde->dma_state = tmp;
914 }
915
916 /* Generate an interrupt if required */
917 if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT)
918 {
919 if (((d->iregs[TI1570_REG_CONFIG] & TI1570_CFG_BP_SEL) && buf_end) ||
920 pkt_end)
921 {
922 d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_CP_TX;
923 dev_pa_a1_update_irq_status(d);
924 }
925 }
926
927 return(TRUE);
928 }
929
930 /* Analyze a TX DMA state table entry */
931 static void ti1570_scan_tx_dma_entry(struct pa_a1_data *d,m_uint32_t index)
932 {
933 int i;
934
935 for(i=0;i<TI1570_TXDMA_PASS_COUNT;i++)
936 if (!ti1570_scan_tx_dma_entry_single(d,index))
937 break;
938 }
939
940 /* Analyze the TX schedule table */
941 static void ti1570_scan_tx_sched_table(struct pa_a1_data *d)
942 {
943 m_uint32_t cw,index0,index1;
944 u_int i;
945
946 for(i=0;i<TI1570_TX_SCHED_ENTRY_COUNT>>1;i++) {
947 cw = d->tx_sched_table[i];
948
949 /* We have 2 index in TX DMA state table per word */
950 index0 = (cw >> TI1570_TX_SCHED_E0_SHIFT) & TI1570_TX_SCHED_ENTRY_MASK;
951 index1 = (cw >> TI1570_TX_SCHED_E1_SHIFT) & TI1570_TX_SCHED_ENTRY_MASK;
952
953 /* Scan the two entries (null entry => nothing to do) */
954 if (index0) ti1570_scan_tx_dma_entry(d,index0);
955 if (index1) ti1570_scan_tx_dma_entry(d,index1);
956 }
957 }
958
959 /*
960 * Read a RX buffer from the host memory.
961 */
962 static void ti1570_read_rx_buffer(struct pa_a1_data *d,m_uint32_t addr,
963 ti1570_rx_buffer_t *rx_buf)
964 {
965 physmem_copy_from_vm(d->vm,rx_buf,addr,sizeof(ti1570_rx_buffer_t));
966
967 /* byte-swapping */
968 rx_buf->reserved = vmtoh32(rx_buf->reserved);
969 rx_buf->ctrl = vmtoh32(rx_buf->ctrl);
970 rx_buf->atm_hdr = vmtoh32(rx_buf->atm_hdr);
971 rx_buf->user = vmtoh32(rx_buf->user);
972 }
973
974 /* Update the RX completion ring */
975 static void ti1570_update_rx_cring(struct pa_a1_data *d,
976 ti1570_rx_dma_entry_t *rde,
977 m_uint32_t atm_hdr,
978 m_uint32_t aal5_trailer,
979 m_uint32_t err_ind,
980 m_uint32_t fbuf_valid)
981 {
982 m_uint32_t rcr_addr,rcr_end,aal_type,ptr,val;
983 ti1570_rcr_entry_t rcre;
984
985 if (rde->ctrl & TI1570_RX_DMA_RCR_SELECT) {
986 /* RX completion ring with interrupt */
987 rcr_addr = d->iregs[TI1570_REG_RCR_WI_ADDR];
988 rcr_addr += (d->rcr_wi_pos * sizeof(rcre));
989 } else {
990 /* RX completion ring without interrupt */
991 rcr_addr = d->iregs[TI1570_REG_RCR_WOI_ADDR];
992 rcr_addr += (d->rcr_woi_pos * sizeof(rcre));
993 }
994
995 #if DEBUG_RECEIVE
996 TI1570_LOG(d,"ti1570_update_rx_cring: posting 0x%x at address 0x%x\n",
997 (rde->sp_ptr << 2),rcr_addr);
998
999 physmem_dump_vm(d->vm,rde->sp_ptr<<2,sizeof(ti1570_rx_buffer_t) >> 2);
1000 #endif
1001
1002 /* we have a RX freeze if the buffer belongs to the host */
1003 ptr = rcr_addr + OFFSET(ti1570_rcr_entry_t,fbr_entry);
1004 val = physmem_copy_u32_from_vm(d->vm,ptr);
1005
1006 if (!(val & TI1570_RCR_OWN)) {
1007 TI1570_LOG(d,"ti1570_update_rx_cring: RX freeze...\n");
1008 d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_RX_FRZ;
1009 return;
1010 }
1011
1012 /* fill the RX completion ring entry and write it back to the host */
1013 memset(&rcre,0,sizeof(rcre));
1014
1015 /* word 0: atm header from last cell received */
1016 rcre.atm_hdr = atm_hdr;
1017
1018 /* word 1: error indicator */
1019 aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
1020 if (aal_type == TI1570_RX_DMA_AAL_AAL5)
1021 rcre.error |= TI1570_RCR_AAL5;
1022
1023 rcre.error |= err_ind;
1024
1025 /* word 2: Start of packet */
1026 if (fbuf_valid)
1027 rcre.sp_addr = TI1570_RCR_VALID | rde->sp_ptr;
1028
1029 /* word 3: AAL5 trailer */
1030 rcre.aal5_trailer = aal5_trailer;
1031
1032 /* word 4: OWN + error entry + free-buffer ring pointer */
1033 rcre.fbr_entry = rde->fbr_entry & TI1570_RX_DMA_FB_PTR_MASK;
1034 if (err_ind) rcre.fbr_entry |= TI1570_RCR_ERROR;
1035
1036 /* byte-swap and write this back to the host memory */
1037 rcre.atm_hdr = htonl(rcre.atm_hdr);
1038 rcre.error = htonl(rcre.error);
1039 rcre.sp_addr = htonl(rcre.sp_addr);
1040 rcre.aal5_trailer = htonl(rcre.aal5_trailer);
1041 rcre.fbr_entry = htonl(rcre.fbr_entry);
1042 physmem_copy_to_vm(d->vm,&rcre,rcr_addr,sizeof(rcre));
1043
1044 /* clear the active bit of the RX DMA entry */
1045 rde->ctrl &= ~TI1570_RX_DMA_ACT;
1046
1047 /* update the internal position pointer */
1048 if (rde->ctrl & TI1570_RX_DMA_RCR_SELECT) {
1049 rcr_end = d->iregs[TI1570_REG_RX_CRING_SIZE] & TI1570_RCR_SIZE_MASK;
1050
1051 if ((d->rcr_wi_pos++) == rcr_end)
1052 d->rcr_wi_pos = 0;
1053
1054 /* generate the appropriate IRQ */
1055 d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_CP_RX;
1056 dev_pa_a1_update_irq_status(d);
1057 } else {
1058 rcr_end = (d->iregs[TI1570_REG_RX_CRING_SIZE] >> 16);
1059 rcr_end &= TI1570_RCR_SIZE_MASK;
1060
1061 if ((d->rcr_woi_pos++) == rcr_end)
1062 d->rcr_woi_pos = 0;
1063 }
1064 }
1065
1066 /*
1067 * Acquire a free RX buffer.
1068 *
1069 * Returns FALSE if no buffer is available (buffer starvation).
1070 */
1071 static int ti1570_acquire_rx_buffer(struct pa_a1_data *d,
1072 ti1570_rx_dma_entry_t *rde,
1073 ti1570_rx_buf_holder_t *rbh,
1074 m_uint32_t atm_hdr)
1075 {
1076 ti1570_rx_fbr_entry_t *fbr_entry = NULL;
1077 m_uint32_t bp_addr,buf_addr,buf_size,buf_idx;
1078 m_uint32_t ring_index,ring_size;
1079 m_uint32_t buf_ptr,val;
1080 int fifo = FALSE;
1081
1082 /* To keep this fucking compiler quiet */
1083 ring_size = 0;
1084 buf_idx = 0;
1085
1086 if (rde->ctrl & TI1570_RX_DMA_FIFO) {
1087 bp_addr = (rde->fbr_entry & TI1570_RX_DMA_FB_PTR_MASK) << 2;
1088 buf_ptr = physmem_copy_u32_from_vm(d->vm,bp_addr);
1089 buf_size = d->iregs[TI1570_REG_TX_PSR_SIZE] & 0xFFFF;
1090 fifo = TRUE;
1091
1092 #if DEBUG_RECEIVE
1093 TI1570_LOG(d,"ti1570_acquire_rx_buffer: acquiring FIFO buffer\n");
1094 #endif
1095 }
1096 else
1097 {
1098 ring_index = rde->fbr_entry & TI1570_RX_DMA_FB_INDEX_MASK;
1099 fbr_entry = &d->rx_fbr_table[ring_index];
1100
1101 #if DEBUG_RECEIVE
1102 TI1570_LOG(d,"ti1570_acquire_rx_buffer: acquiring non-FIFO buffer, "
1103 "ring index=%u (0x%x)\n",ring_index,ring_index);
1104 #endif
1105
1106 /* Compute the number of entries in ring */
1107 ring_size = fbr_entry->ring_size & TI1570_RX_FBR_RS_MASK;
1108 ring_size >>= TI1570_RX_FBR_RS_SHIFT;
1109 ring_size = (ring_size << 4) + 15 + 1;
1110
1111 /* Compute the buffer size */
1112 buf_size = fbr_entry->ring_size & TI1570_RX_FBR_BS_MASK;
1113 buf_size >>= TI1570_RX_FBR_BS_SHIFT;
1114
1115 /* Compute the buffer address */
1116 buf_idx = fbr_entry->ring_size & TI1570_RX_FBR_IDX_MASK;
1117 bp_addr = fbr_entry->fbr_ptr + (buf_idx << 2);
1118
1119 #if DEBUG_RECEIVE
1120 TI1570_LOG(d,"ti1570_acquire_rx_buffer: ring size=%u (0x%x), "
1121 "buf size=%u ATM cells\n",ring_size,ring_size,buf_size);
1122
1123 TI1570_LOG(d,"ti1570_acquire_rx_buffer: buffer index=%u (0x%x), "
1124 "buffer ptr address = 0x%x\n",buf_idx,buf_idx,bp_addr);
1125 #endif
1126
1127 buf_ptr = physmem_copy_u32_from_vm(d->vm,bp_addr);
1128 }
1129
1130 #if DEBUG_RECEIVE
1131 TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_ptr = 0x%x\n",buf_ptr);
1132 #endif
1133
1134 /* The TI1570 must own the buffer */
1135 if (!(buf_ptr & TI1570_RX_BUFPTR_OWN)) {
1136 TI1570_LOG(d,"ti1570_acquire_rx_buffer: no free buffer available.\n");
1137 return(FALSE);
1138 }
1139
1140 /*
1141 * If we are using a ring, we have to clear the OWN bit and increment
1142 * the index field.
1143 */
1144 if (!fifo) {
1145 buf_ptr &= ~TI1570_RX_BUFPTR_OWN;
1146 physmem_copy_u32_to_vm(d->vm,bp_addr,buf_ptr);
1147
1148 if (++buf_idx == ring_size) {
1149 #if DEBUG_RECEIVE
1150 TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_idx=0x%x, "
1151 "ring_size=0x%x -> resetting buf_idx\n",
1152 buf_idx-1,ring_size);
1153 #endif
1154 buf_idx = 0;
1155 }
1156
1157 val = fbr_entry->ring_size & ~TI1570_RX_FBR_IDX_MASK;
1158 val |= buf_idx;
1159 fbr_entry->ring_size = val;
1160 }
1161
1162 /* Get the buffer address */
1163 buf_addr = (buf_ptr & TI1570_RX_BUFPTR_MASK) << 2;
1164
1165 #if DEBUG_RECEIVE
1166 TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_addr = 0x%x\n",buf_addr);
1167 #endif
1168
1169 /* Read the buffer descriptor itself and store info for caller */
1170 rbh->buf_addr = buf_addr;
1171 rbh->buf_size = buf_size;
1172 ti1570_read_rx_buffer(d,buf_addr,&rbh->rx_buf);
1173
1174 /* Clear the control field */
1175 physmem_copy_u32_to_vm(d->vm,buf_addr+OFFSET(ti1570_rx_buffer_t,ctrl),0);
1176
1177 /* Store the ATM header in data buffer */
1178 physmem_copy_u32_to_vm(d->vm,buf_addr+OFFSET(ti1570_rx_buffer_t,atm_hdr),
1179 atm_hdr);
1180 return(TRUE);
1181 }
1182
1183 /* Insert a new free buffer in a RX DMA entry */
1184 static void ti1570_insert_rx_free_buf(struct pa_a1_data *d,
1185 ti1570_rx_dma_entry_t *rde,
1186 ti1570_rx_buf_holder_t *rbh)
1187 {
1188 m_uint32_t val,aal_type;
1189
1190 aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
1191
1192 /* Set current and start of buffer addresses */
1193 rde->cb_addr = rbh->buf_addr + sizeof(ti1570_rx_buffer_t);
1194 rde->sb_addr = rbh->buf_addr >> 2;
1195
1196 /* Set the buffer length */
1197 val = rbh->buf_size;
1198
1199 if (aal_type == TI1570_RX_DMA_AAL_CNT)
1200 val |= (rde->aal5_crc & 0xFFFF) << 16;
1201
1202 rde->cb_len = val;
1203 }
1204
1205 /* Store a RX cell */
1206 static int ti1570_store_rx_cell(struct pa_a1_data *d,
1207 ti1570_rx_dma_entry_t *rde,
1208 m_uint8_t *atm_cell)
1209 {
1210 m_uint32_t aal_type,atm_hdr,aal5_trailer,pti,real_eop,pti_eop;
1211 m_uint32_t prev_buf_addr,buf_len,val,ptr,cnt;
1212 ti1570_rx_buf_holder_t rbh;
1213
1214 real_eop = pti_eop = FALSE;
1215 aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
1216
1217 /* Extract PTI from the ATM header */
1218 atm_hdr = ntohl(*(m_uint32_t *)&atm_cell[0]);
1219 pti = (atm_hdr & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1220
1221 /* PTI == 0x1 => EOP */
1222 if ((pti == 0x01) || (pti == 0x03))
1223 pti_eop = TRUE;
1224
1225 if (rde->ctrl & TI1570_RX_DMA_WAIT_EOP) {
1226 TI1570_LOG(d,"ti1570_store_rx_cell: EOP processing, not handled yet.\n");
1227 return(FALSE);
1228 }
1229
1230 /* AAL5 special processing */
1231 if (aal_type == TI1570_RX_DMA_AAL_AAL5)
1232 {
1233 /* Check that we don't exceed 1366 cells for AAL5 */
1234 /* XXX TODO */
1235 }
1236 else
1237 {
1238 /* EOP processing for non counter-based transparent-AAL packets */
1239 if ((rde->ctrl & TI1570_RX_DMA_WAIT_EOP) && pti_eop)
1240 {
1241 /* XXX TODO */
1242 }
1243 }
1244
1245 /* do we have enough room in buffer ? */
1246 buf_len = rde->cb_len & TI1570_RX_DMA_CB_LEN_MASK;
1247
1248 if (!buf_len) {
1249 prev_buf_addr = rde->sb_addr << 2;
1250
1251 /* acquire a new free buffer */
1252 if (!ti1570_acquire_rx_buffer(d,rde,&rbh,atm_hdr)) {
1253 rde->ctrl |= TI1570_RX_DMA_WAIT_EOP;
1254 return(FALSE);
1255 }
1256
1257 /* insert the free buffer in the RX DMA structure */
1258 ti1570_insert_rx_free_buf(d,rde,&rbh);
1259
1260 /* chain the buffers (keep SOP/EOP bits intact) */
1261 ptr = prev_buf_addr + OFFSET(ti1570_rx_buffer_t,ctrl);
1262
1263 val = physmem_copy_u32_from_vm(d->vm,ptr);
1264 val |= rde->sb_addr;
1265 physmem_copy_u32_to_vm(d->vm,ptr,val);
1266
1267 /* read the new buffer length */
1268 buf_len = rde->cb_len & TI1570_RX_DMA_CB_LEN_MASK;
1269 }
1270
1271 /* copy the ATM payload */
1272 #if DEBUG_RECEIVE
1273 TI1570_LOG(d,"ti1570_store_rx_cell: storing cell payload at 0x%x "
1274 "(buf_addr=0x%x)\n",rde->cb_addr,rde->sb_addr << 2);
1275 #endif
1276
1277 physmem_copy_to_vm(d->vm,&atm_cell[ATM_HDR_SIZE],
1278 rde->cb_addr,ATM_PAYLOAD_SIZE);
1279 rde->cb_addr += ATM_PAYLOAD_SIZE;
1280
1281 /* update the current buffer length */
1282 val = rde->cb_len & ~TI1570_RX_DMA_CB_LEN_MASK;
1283 rde->cb_len = val | (--buf_len);
1284
1285 #if DEBUG_RECEIVE
1286 TI1570_LOG(d,"ti1570_store_rx_cell: new rde->cb_len = 0x%x, "
1287 "buf_len=0x%x\n",rde->cb_len,buf_len);
1288 #endif
1289
1290 /* determine if this is the end of the packet (EOP) */
1291 if (aal_type == TI1570_RX_DMA_AAL_CNT)
1292 {
1293 /* counter-based tranparent-AAL packets */
1294 cnt = rde->cb_len & TI1570_RX_DMA_TR_CNT_MASK;
1295 cnt >>= TI1570_RX_DMA_TR_CNT_SHIFT;
1296
1297 /* if the counter reaches 0, this is the EOP */
1298 if (--cnt == 0)
1299 real_eop = TRUE;
1300
1301 val = rde->cb_len & ~TI1570_RX_DMA_TR_CNT_MASK;
1302 val |= cnt << TI1570_RX_DMA_TR_CNT_SHIFT;
1303 }
1304 else {
1305 /* PTI-based transparent AAL packets or AAL5 */
1306 if (pti_eop)
1307 real_eop = TRUE;
1308 }
1309
1310 if (real_eop) {
1311 /* mark the buffer as EOP */
1312 ptr = (rde->sb_addr << 2) + OFFSET(ti1570_rx_buffer_t,ctrl);
1313 val = physmem_copy_u32_from_vm(d->vm,ptr);
1314 val |= TI1570_RX_BUFFER_EOP;
1315 physmem_copy_u32_to_vm(d->vm,ptr,val);
1316
1317 /* get the aal5 trailer */
1318 aal5_trailer = ntohl(*(m_uint32_t *)&atm_cell[ATM_AAL5_TRAILER_POS]);
1319
1320 /* post the entry into the appropriate RX completion ring */
1321 ti1570_update_rx_cring(d,rde,atm_hdr,aal5_trailer,0,TRUE);
1322 }
1323
1324 return(TRUE);
1325 }
1326
1327 /* Handle a received ATM cell */
1328 static int ti1570_handle_rx_cell(netio_desc_t *nio,
1329 u_char *atm_cell,ssize_t cell_len,
1330 struct pa_a1_data *d)
1331 {
1332 m_uint32_t atm_hdr,vpi,vci,vci_idx,vci_mask;
1333 m_uint32_t vci_max,rvd_entry,bptr,pti,ptr;
1334 ti1570_rx_dma_entry_t *rde = NULL;
1335 ti1570_rx_buf_holder_t rbh;
1336
1337 if (cell_len != ATM_CELL_SIZE) {
1338 TI1570_LOG(d,"invalid RX cell size (%ld)\n",(long)cell_len);
1339 return(FALSE);
1340 }
1341
1342 /* Extract the VPI/VCI used as index in the RX VPI/VCI DMA pointer table */
1343 atm_hdr = ntohl(*(m_uint32_t *)&atm_cell[0]);
1344 vpi = (atm_hdr & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1345 vci = (atm_hdr & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1346 pti = (atm_hdr & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1347
1348 #if DEBUG_RECEIVE
1349 TI1570_LOG(d,"ti1570_handle_rx_cell: received cell with VPI/VCI=%u/%u\n",
1350 vpi,vci);
1351 #endif
1352
1353 /* Get the entry corresponding to this VPI in RX VPI/VCI dma ptr table */
1354 rvd_entry = d->rx_vpi_vci_dma_table[vpi];
1355
1356 if (!(rvd_entry & TI1570_RX_VPI_ENABLE)) {
1357 TI1570_LOG(d,"ti1570_handle_rx_cell: received cell with "
1358 "unknown VPI %u (VCI=%u)\n",vpi,vci);
1359 return(FALSE);
1360 }
1361
1362 /*
1363 * Special routing for OAM F4 cells:
1364 * - VCI 3 : OAM F4 segment cell
1365 * - VCI 4 : OAM F4 end-to-end cell
1366 */
1367 if ((vci == 3) || (vci == 4))
1368 rde = &d->rx_dma_table[2];
1369 else {
1370 if ((atm_hdr & ATM_PTI_NETWORK) != 0) {
1371 switch(pti) {
1372 case 0x04: /* OAM F5-segment cell */
1373 case 0x05: /* OAM F5 end-to-end cell */
1374 rde = &d->rx_dma_table[0];
1375 break;
1376
1377 case 0x06:
1378 case 0x07:
1379 rde = &d->rx_dma_table[1];
1380 break;
1381 }
1382 } else {
1383 /*
1384 * Standard VPI/VCI.
1385 * Apply the VCI mask if we don't have an OAM cell.
1386 */
1387 if (!(atm_hdr & ATM_PTI_NETWORK)) {
1388 vci_mask = d->iregs[TI1570_REG_TX_RX_FIFO] >> 16;
1389 vci_idx = vci & (~vci_mask);
1390
1391 vci_max = rvd_entry & TI1570_RX_VCI_RANGE_MASK;
1392
1393 if (vci_idx > vci_max) {
1394 TI1570_LOG(d,"ti1570_handle_rx_cell: out-of-range VCI %u "
1395 "(VPI=%u,vci_mask=%u,vci_max=%u)\n",
1396 vci,vpi,vci_mask,vci_max);
1397 return(FALSE);
1398 }
1399
1400 #if DEBUG_RECEIVE
1401 TI1570_LOG(d,"ti1570_handle_rx_cell: VPI/VCI=%u/%u, "
1402 "vci_mask=0x%x, vci_idx=%u (0x%x), vci_max=%u (0x%x)\n",
1403 vpi,vci,vci_mask,vci_idx,vci_idx,vci_max,vci_max);
1404 #endif
1405 bptr = (rvd_entry & TI1570_RX_BASE_PTR_MASK);
1406 bptr >>= TI1570_RX_BASE_PTR_SHIFT;
1407 bptr = (bptr + vci) * sizeof(ti1570_rx_dma_entry_t);
1408
1409 if (bptr < TI1570_RX_DMA_TABLE_OFFSET) {
1410 TI1570_LOG(d,"ti1570_handle_rx_cell: inconsistency in "
1411 "RX VPI/VCI table, VPI/VCI=%u/u, bptr=0x%x\n",
1412 vpi,vci,bptr);
1413 return(FALSE);
1414 }
1415
1416 bptr -= TI1570_RX_DMA_TABLE_OFFSET;
1417 rde = &d->rx_dma_table[bptr / sizeof(ti1570_rx_dma_entry_t)];
1418 }
1419 }
1420 }
1421
1422 if (!rde) {
1423 TI1570_LOG(d,"ti1570_handle_rx_cell: no RX DMA table entry found!\n");
1424 return(FALSE);
1425 }
1426
1427 /* The entry must be active */
1428 if (!(rde->fbr_entry & TI1570_RX_DMA_ON))
1429 return(FALSE);
1430
1431 /* Is this the start of a new packet ? */
1432 if (!(rde->ctrl & TI1570_RX_DMA_ACT))
1433 {
1434 /* Try to acquire a free buffer */
1435 if (!ti1570_acquire_rx_buffer(d,rde,&rbh,atm_hdr)) {
1436 rde->ctrl |= TI1570_RX_DMA_WAIT_EOP;
1437 return(FALSE);
1438 }
1439
1440 /* Insert the free buffer in the RX DMA structure */
1441 ti1570_insert_rx_free_buf(d,rde,&rbh);
1442 rde->sp_ptr = rde->sb_addr;
1443
1444 /* Mark the RX buffer as the start of packet (SOP) */
1445 ptr = (rde->sb_addr << 2) + OFFSET(ti1570_rx_buffer_t,ctrl);
1446 physmem_copy_u32_to_vm(d->vm,ptr,TI1570_RX_BUFFER_SOP);
1447
1448 /* Set ACT bit for the DMA channel */
1449 rde->ctrl |= TI1570_RX_DMA_ACT;
1450 }
1451
1452 /* Store the received cell */
1453 ti1570_store_rx_cell(d,rde,atm_cell);
1454 return(TRUE);
1455 }
1456
1457 /*
1458 * pci_ti1570_read()
1459 */
1460 static m_uint32_t pci_ti1570_read(cpu_gen_t *cpu,struct pci_device *dev,
1461 int reg)
1462 {
1463 struct pa_a1_data *d = dev->priv_data;
1464
1465 #if DEBUG_ACCESS
1466 TI1570_LOG(d,"pci_ti1570_read: read reg 0x%x\n",reg);
1467 #endif
1468
1469 switch(reg) {
1470 case PCI_REG_BAR0:
1471 return(d->dev->phys_addr);
1472 default:
1473 return(0);
1474 }
1475 }
1476
1477 /*
1478 * pci_ti1570_write()
1479 */
1480 static void pci_ti1570_write(cpu_gen_t *cpu,struct pci_device *dev,
1481 int reg,m_uint32_t value)
1482 {
1483 struct pa_a1_data *d = dev->priv_data;
1484
1485 #if DEBUG_ACCESS
1486 TI1570_LOG(d,"pci_ti1570_write: write reg 0x%x, value 0x%x\n",reg,value);
1487 #endif
1488
1489 switch(reg) {
1490 case PCI_REG_BAR0:
1491 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
1492 TI1570_LOG(d,"registers are mapped at 0x%x\n",value);
1493 break;
1494 }
1495 }
1496
1497 /*
1498 * pci_plx9060es_read()
1499 */
1500 static m_uint32_t pci_plx9060es_read(cpu_gen_t *cpu,struct pci_device *dev,
1501 int reg)
1502 {
1503 struct pa_a1_data *d = dev->priv_data;
1504
1505 #if DEBUG_ACCESS
1506 TI1570_LOG(d,"PLX9060ES","read reg 0x%x\n",reg);
1507 #endif
1508 switch(reg) {
1509 default:
1510 return(0);
1511 }
1512 }
1513
1514 /*
1515 * pci_plx9060es_write()
1516 */
1517 static void pci_plx9060es_write(cpu_gen_t *cpu,struct pci_device *dev,
1518 int reg,m_uint32_t value)
1519 {
1520 struct pa_a1_data *d = dev->priv_data;
1521
1522 #if DEBUG_ACCESS
1523 TI1570_LOG(d,"PLX9060ES","write reg 0x%x, value 0x%x\n",reg,value);
1524 #endif
1525
1526 switch(reg) {
1527 }
1528 }
1529
1530 /* Reset the TI1570 */
1531 static void ti1570_reset(struct pa_a1_data *d,int clear_ctrl_mem)
1532 {
1533 ti1570_clear_tx_fifo(d);
1534
1535 d->tcr_wi_pos = d->tcr_woi_pos = 0;
1536 d->rcr_wi_pos = d->rcr_woi_pos = 0;
1537
1538 if (clear_ctrl_mem)
1539 memset(d->ctrl_mem_ptr,0,TI1570_CTRL_MEM_SIZE);
1540 }
1541
1542 /*
1543 * dev_c7200_pa_a1_init()
1544 *
1545 * Add a PA-A1 port adapter into specified slot.
1546 */
1547 int dev_c7200_pa_a1_init(c7200_t *router,char *name,u_int pa_bay)
1548 {
1549 struct pci_device *pci_dev_ti,*pci_dev_plx;
1550 struct pa_a1_data *d;
1551 struct vdevice *dev;
1552 m_uint8_t *p;
1553
1554 /* Allocate the private data structure for TI1570 chip */
1555 if (!(d = malloc(sizeof(*d)))) {
1556 fprintf(stderr,"%s (TI1570): out of memory\n",name);
1557 return(-1);
1558 }
1559
1560 memset(d,0,sizeof(*d));
1561
1562 /* Set the EEPROM */
1563 c7200_pa_set_eeprom(router,pa_bay,cisco_eeprom_find_pa("PA-A1"));
1564
1565 /* Add PCI device TI1570 */
1566 pci_dev_ti = pci_dev_add(router->pa_bay[pa_bay].pci_map,name,
1567 TI1570_PCI_VENDOR_ID,TI1570_PCI_PRODUCT_ID,
1568 0,0,c7200_net_irq_for_slot_port(pa_bay,0),d,
1569 NULL,pci_ti1570_read,pci_ti1570_write);
1570
1571 if (!pci_dev_ti) {
1572 fprintf(stderr,"%s (TI1570): unable to create PCI device TI1570.\n",
1573 name);
1574 return(-1);
1575 }
1576
1577 /* Add PCI device PLX9060ES */
1578 pci_dev_plx = pci_dev_add(router->pa_bay[pa_bay].pci_map,name,
1579 PLX_9060ES_PCI_VENDOR_ID,
1580 PLX_9060ES_PCI_PRODUCT_ID,
1581 1,0,-1,d,
1582 NULL,pci_plx9060es_read,pci_plx9060es_write);
1583
1584 if (!pci_dev_plx) {
1585 fprintf(stderr,"%s (PLX_9060ES): unable to create PCI device "
1586 "PLX 9060ES.\n",name);
1587 return(-1);
1588 }
1589
1590 /* Create the TI1570 structure */
1591 d->name = name;
1592 d->vm = router->vm;
1593 d->pci_dev_ti = pci_dev_ti;
1594 d->pci_dev_plx = pci_dev_plx;
1595
1596 /* Allocate the control memory */
1597 if (!(d->ctrl_mem_ptr = malloc(TI1570_CTRL_MEM_SIZE))) {
1598 fprintf(stderr,"%s (PA-A1): unable to create control memory.\n",name);
1599 return(-1);
1600 }
1601
1602 /* Standard tables for the TI1570 */
1603 p = (m_uint8_t *)d->ctrl_mem_ptr;
1604
1605 d->iregs = (m_uint32_t *)(p + TI1570_INTERNAL_REGS_OFFSET);
1606 d->tx_sched_table = (m_uint32_t *)(p + TI1570_TX_SCHED_OFFSET);
1607 d->tx_dma_table = (ti1570_tx_dma_entry_t *)(p + TI1570_TX_DMA_TABLE_OFFSET);
1608 d->rx_vpi_vci_dma_table = (m_uint32_t *)(p+TI1570_RX_DMA_PTR_TABLE_OFFSET);
1609 d->rx_dma_table = (ti1570_rx_dma_entry_t *)(p + TI1570_RX_DMA_TABLE_OFFSET);
1610 d->rx_fbr_table = (ti1570_rx_fbr_entry_t *)(p + TI1570_FREE_BUFFERS_OFFSET);
1611
1612 ti1570_reset(d,TRUE);
1613
1614 /* Create the device itself */
1615 if (!(dev = dev_create(name))) {
1616 fprintf(stderr,"%s (PA-A1): unable to create device.\n",name);
1617 return(-1);
1618 }
1619
1620 dev->phys_addr = 0;
1621 dev->phys_len = 0x200000;
1622 dev->handler = dev_pa_a1_access;
1623
1624 /* Store device info */
1625 dev->priv_data = d;
1626 d->dev = dev;
1627
1628 /* Store device info into the router structure */
1629 return(c7200_pa_set_drvinfo(router,pa_bay,d));
1630 }
1631
1632 /* Remove a PA-A1 from the specified slot */
1633 int dev_c7200_pa_a1_shutdown(c7200_t *router,u_int pa_bay)
1634 {
1635 struct c7200_pa_bay *bay;
1636 struct pa_a1_data *d;
1637
1638 if (!(bay = c7200_pa_get_info(router,pa_bay)))
1639 return(-1);
1640
1641 d = bay->drv_info;
1642
1643 /* Remove the PA EEPROM */
1644 c7200_pa_unset_eeprom(router,pa_bay);
1645
1646 /* Remove the PCI devices */
1647 pci_dev_remove(d->pci_dev_ti);
1648 pci_dev_remove(d->pci_dev_plx);
1649
1650 /* Remove the device from the VM address space */
1651 vm_unbind_device(router->vm,d->dev);
1652 cpu_group_rebuild_mts(router->vm->cpu_group);
1653
1654 /* Free the control memory */
1655 free(d->ctrl_mem_ptr);
1656
1657 /* Free the device structure itself */
1658 free(d->dev);
1659 free(d);
1660 return(0);
1661 }
1662
1663 /* Bind a Network IO descriptor to a specific port */
1664 int dev_c7200_pa_a1_set_nio(c7200_t *router,u_int pa_bay,u_int port_id,
1665 netio_desc_t *nio)
1666 {
1667 struct pa_a1_data *d;
1668
1669 if ((port_id > 0) || !(d = c7200_pa_get_drvinfo(router,pa_bay)))
1670 return(-1);
1671
1672 if (d->nio != NULL)
1673 return(-1);
1674
1675 d->nio = nio;
1676 d->tx_tid = ptask_add((ptask_callback)ti1570_scan_tx_sched_table,d,NULL);
1677 netio_rxl_add(nio,(netio_rx_handler_t)ti1570_handle_rx_cell,d,NULL);
1678 return(0);
1679 }
1680
1681 /* Unbind a Network IO descriptor to a specific port */
1682 int dev_c7200_pa_a1_unset_nio(c7200_t *router,u_int pa_bay,u_int port_id)
1683 {
1684 struct pa_a1_data *d;
1685
1686 if ((port_id > 0) || !(d = c7200_pa_get_drvinfo(router,pa_bay)))
1687 return(-1);
1688
1689 if (d->nio) {
1690 ptask_remove(d->tx_tid);
1691 netio_rxl_remove(d->nio);
1692 d->nio = NULL;
1693 }
1694 return(0);
1695 }
1696
1697 /* PA-A1 driver */
1698 struct c7200_pa_driver dev_c7200_pa_a1_driver = {
1699 "PA-A1", 1,
1700 dev_c7200_pa_a1_init,
1701 dev_c7200_pa_a1_shutdown,
1702 dev_c7200_pa_a1_set_nio,
1703 dev_c7200_pa_a1_unset_nio,
1704 NULL,
1705 };

  ViewVC Help
Powered by ViewVC 1.1.26