/[dynamips]/upstream/dynamips-0.2.7-RC1/dev_pa_a1.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /upstream/dynamips-0.2.7-RC1/dev_pa_a1.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 7 - (hide annotations)
Sat Oct 6 16:23:47 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 53948 byte(s)
dynamips-0.2.7-RC1

1 dpavlin 1 /*
2 dpavlin 7 * Cisco router simulation platform.
3 dpavlin 1 * Copyright (C) 2005,2006 Christophe Fillot. All rights reserved.
4     *
5     * PA-A1 ATM interface based on TI1570 and PLX 9060-ES.
6     *
7     * EEPROM types:
8     * - 0x17: PA-A1-OC3MM
9     * - 0x2C: PA-A1-OC3SM
10     * - 0x2D: PA-A1-OC3UTP
11     *
12     * IOS command: "sh controller atm2/0"
13     *
14     * Manuals:
15     *
16     * Texas Instruments TNETA1570 ATM segmentation and reassembly device
17     * with integrated 64-bit PCI-host interface
18     * http://focus.ti.com/docs/prod/folders/print/tneta1570.html
19     *
20     * PLX 9060-ES
21     * http://www.plxtech.com/products/io_accelerators/PCI9060/default.htm
22     *
23     * TODO:
24     * - RX error handling and RX AAL5-related stuff
25     * - HEC and AAL5 CRC fields.
26     *
27     * Cell trains for faster NETIO communications ?
28     */
29    
30     #include <stdio.h>
31     #include <stdlib.h>
32     #include <string.h>
33     #include <unistd.h>
34     #include <errno.h>
35    
36     #include "crc.h"
37     #include "atm.h"
38 dpavlin 7 #include "cpu.h"
39     #include "vm.h"
40 dpavlin 1 #include "dynamips.h"
41     #include "memory.h"
42     #include "device.h"
43     #include "ptask.h"
44     #include "dev_c7200.h"
45    
46     /* Debugging flags */
47     #define DEBUG_ACCESS 0
48     #define DEBUG_UNKNOWN 0
49     #define DEBUG_TRANSMIT 0
50     #define DEBUG_RECEIVE 0
51     #define DEBUG_TX_DMA 0
52    
53     /* PCI vendor/product codes */
54     #define TI1570_PCI_VENDOR_ID 0x104c
55     #define TI1570_PCI_PRODUCT_ID 0xa001
56    
57     #define PLX_9060ES_PCI_VENDOR_ID 0x10b5
58     #define PLX_9060ES_PCI_PRODUCT_ID 0x906e
59    
60     /* Number of buffers transmitted at each TX DMA ring scan pass */
61     #define TI1570_TXDMA_PASS_COUNT 16
62    
63     /* TI1570 Internal Registers (p.58 of doc) */
64     #define TI1570_REG_CONFIG 0x0000 /* Configuration registers */
65     #define TI1570_REG_STATUS 0x0001 /* Status register */
66     #define TI1570_REG_IMASK 0x0002 /* Interrupt-mask register */
67     #define TI1570_REG_RGT_RAT 0x0003 /* RGT + RAT cycle-counter */
68     #define TI1570_REG_RX_UNKNOWN 0x0004 /* RX Unknown Register */
69     #define TI1570_REG_TX_CRING_SIZE 0x0005 /* TX Completion ring sizes */
70     #define TI1570_REG_RX_CRING_SIZE 0x0006 /* RX Completion ring sizes */
71     #define TI1570_REG_TX_PSR_SIZE 0x0007 /* TX Pkt-seg ring size + FIFO */
72     #define TI1570_REG_HEC_AAL5_DISC 0x0008 /* HEC err + AAL5 CPCS discard */
73     #define TI1570_REG_UNK_PROTO_CNT 0x0009 /* Unknown-protocols counter */
74     #define TI1570_REG_RX_ATM_COUNT 0x000A /* ATM-cells-received counter */
75     #define TI1570_REG_TX_ATM_COUNT 0x000B /* ATM-cells-tranmitted counter */
76     #define TI1570_REG_TX_RX_FIFO 0x000C /* TX/RX FIFO occupancy, VCI mask */
77     #define TI1570_REG_SCHED_SIZE 0x000D /* Scheduler Table size */
78     #define TI1570_REG_SOFT_RESET 0x000E /* Software Reset */
79     #define TI1570_REG_TCR_WOI_ADDR 0x0080 /* TX Compl. Ring w/o IRQ addr. */
80     #define TI1570_REG_TCR_WI_ADDR 0x0081 /* TX Compl. Ring w/ IRQ addr. */
81     #define TI1570_REG_RCR_WOI_ADDR 0x0082 /* RX Compl. Ring w/o IRQ addr. */
82     #define TI1570_REG_RCR_WI_ADDR 0x0083 /* RX Compl. Ring w/ IRQ addr. */
83    
84     /* TI1570 configuration register (p.59) */
85     #define TI1570_CFG_EN_RAT 0x00000001 /* Reassembly Aging */
86     #define TI1570_CFG_BP_SEL 0x00000002 /* IRQ on packet or buffer */
87     #define TI1570_CFG_EN_RX 0x00000010 /* RX enable */
88     #define TI1570_CFG_EN_TX 0x00000020 /* TX enable */
89     #define TI1570_CFG_SMALL_MAP 0x00000040 /* Small map */
90    
91     /* TI1570 status register (p.61) */
92     #define TI1570_STAT_CP_TX 0x00000001 /* Transmit completion ring */
93     #define TI1570_STAT_RX_IRR 0x00000040 /* Receive unknown reg set */
94     #define TI1570_STAT_CP_RX 0x00000080 /* Receive completion ring */
95     #define TI1570_STAT_TX_FRZ 0x00000100 /* TX Freeze */
96     #define TI1570_STAT_RX_FRZ 0x00000200 /* RX Freeze */
97    
98     /* Mask for RX/TX completion-ring sizes */
99     #define TI1570_TCR_SIZE_MASK 0x00001FFF /* TX compl. ring size mask */
100     #define TI1570_RCR_SIZE_MASK 0x000003FF /* RX compl. ring size mask */
101    
102     /* TI1750 TX packet segmentation ring register */
103     #define TI1570_PSR_SIZE_MASK 0x000000FF /* pkt-seg ring size */
104    
105     /* Total size of the TI1570 Control Memory */
106     #define TI1570_CTRL_MEM_SIZE 0x100000
107    
108     /* Offsets of the TI1570 structures (p.66) */
109     #define TI1570_TX_SCHED_OFFSET 0x0000 /* TX scheduler table */
110     #define TI1570_INTERNAL_REGS_OFFSET 0x3200 /* Internal Registers */
111     #define TI1570_FREE_BUFFERS_OFFSET 0x3800 /* Free-Buffer Pointers */
112     #define TI1570_RX_DMA_PTR_TABLE_OFFSET 0x4000 /* RX VPI/VCI pointer table */
113     #define TI1570_TX_DMA_TABLE_OFFSET 0x8000 /* TX DMA state table */
114     #define TI1570_RX_DMA_TABLE_OFFSET 0x10000 /* RX DMA state table */
115    
116     /* TX scheduler table */
117     #define TI1570_TX_SCHED_ENTRY_COUNT 6200
118     #define TI1570_TX_SCHED_ENTRY_MASK 0x3FF /* Entry mask */
119     #define TI1570_TX_SCHED_E0_SHIFT 0 /* Shift for entry 0 */
120     #define TI1570_TX_SCHED_E1_SHIFT 16 /* Shift for entry 0 */
121    
122     /* TX DMA state table */
123     #define TI1570_TX_DMA_ACT 0x80000000 /* ACTive (word 0) */
124     #define TI1570_TX_DMA_SOP 0x40000000 /* Start of Packet (SOP) */
125     #define TI1570_TX_DMA_EOP 0x20000000 /* End of Packet (EOP) */
126     #define TI1570_TX_DMA_ABORT 0x10000000 /* Abort */
127     #define TI1570_TX_DMA_TCR_SELECT 0x02000000 /* TX comp. ring selection */
128     #define TI1570_TX_DMA_AAL_TYPE_MASK 0x0C000000 /* AAL-type mask */
129    
130     #define TI1570_TX_DMA_AAL_TRWPTI 0x00000000 /* Transp. AAL w/ PTI set */
131     #define TI1570_TX_DMA_AAL_AAL5 0x04000000 /* AAL5 */
132     #define TI1570_TX_DMA_AAL_TRWOPTI 0x08000000 /* Transp. AAL w/o PTI set */
133    
134     #define TI1570_TX_DMA_OFFSET_MASK 0x00FF0000
135     #define TI1570_TX_DMA_OFFSET_SHIFT 16
136     #define TI1570_TX_DMA_DCOUNT_MASK 0x0000FFFF
137    
138     #define TI1570_TX_DMA_ON 0x80000000 /* DMA state (word 3) */
139     #define TI1570_TX_DMA_RING_OFFSET_MASK 0x3FFFFF00
140     #define TI1570_TX_DMA_RING_OFFSET_SHIFT 8
141     #define TI1570_TX_DMA_RING_INDEX_MASK 0x000000FF
142    
143     #define TI1570_TX_DMA_RING_AAL5_LEN_MASK 0x0000FFFF
144    
145     typedef struct ti1570_tx_dma_entry ti1570_tx_dma_entry_t;
146     struct ti1570_tx_dma_entry {
147     m_uint32_t ctrl_buf; /* Ctrl, Buffer Offset, Buffer data-byte count */
148     m_uint32_t cb_addr; /* Current Buffer Address */
149     m_uint32_t atm_hdr; /* 4-byte ATM header */
150     m_uint32_t dma_state; /* DMA state + Packet segmentation ring address */
151     m_uint32_t nb_addr; /* Next Buffer address */
152     m_uint32_t sb_addr; /* Start of Buffer address */
153     m_uint32_t aal5_crc; /* Partial AAL5-transmit CRC */
154     m_uint32_t aal5_ctrl; /* AAL5-control field and length field */
155     };
156    
157     /* TX Packet-Segmentation Rings */
158     #define TI1570_TX_RING_OWN 0x80000000 /* If set, packet is ready */
159     #define TI1570_TX_RING_PTR_MASK 0x3FFFFFFF /* Buffer pointer */
160    
161     /* TX Data Buffers */
162     #define TI1570_TX_BUFFER_RDY 0x80000000 /* If set, buffer is ready */
163     #define TI1570_TX_BUFFER_SOP 0x40000000 /* First buffer of packet */
164     #define TI1570_TX_BUFFER_EOP 0x20000000 /* Last buffer of packet */
165     #define TI1570_TX_BUFFER_ABORT 0x10000000 /* Abort */
166    
167     #define TI1570_TX_BUFFER_OFFSET_MASK 0x00FF0000
168     #define TI1570_TX_BUFFER_OFFSET_SHIFT 16
169     #define TI1570_TX_BUFFER_DCOUNT_MASK 0x0000FFFF
170    
171     typedef struct ti1570_tx_buffer ti1570_tx_buffer_t;
172     struct ti1570_tx_buffer {
173     m_uint32_t ctrl_buf; /* Ctrl, Buffer offset, Buffer data-byte count */
174     m_uint32_t nb_addr; /* Start-of-next buffer pointer */
175     m_uint32_t atm_hdr; /* 4-byte ATM header */
176     m_uint32_t aal5_ctrl; /* PCS-UU/CPI field (AAL5 control field) */
177     };
178    
179     /* TX completion-ring */
180     #define TI1570_TCR_OWN 0x80000000 /* OWNner bit */
181     #define TI1570_TCR_ABORT 0x40000000 /* Abort */
182    
183     /* RX VPI/VCI DMA pointer table */
184     #define TI1570_RX_VPI_ENABLE 0x80000000 /* VPI enabled ? */
185     #define TI1570_RX_BASE_PTR_MASK 0x7FFF0000 /* Base pointer mask */
186     #define TI1570_RX_BASE_PTR_SHIFT 16 /* Base pointer shift */
187     #define TI1570_RX_VCI_RANGE_MASK 0x0000FFFF /* Valid VCI range */
188    
189     /* RX DMA state table (p.36) */
190     #define TI1570_RX_DMA_ACT 0x80000000 /* ACTive (word 0) */
191     #define TI1570_RX_DMA_RCR_SELECT 0x20000000 /* RX comp. ring selection */
192     #define TI1570_RX_DMA_WAIT_EOP 0x10000000 /* Wait for EOP */
193     #define TI1570_RX_DMA_AAL_TYPE_MASK 0x0C000000 /* AAL-type mask */
194    
195     #define TI1570_RX_DMA_AAL_PTI 0x00000000 /* PTI based tr. AAL pkt */
196     #define TI1570_RX_DMA_AAL_AAL5 0x04000000 /* AAL5 */
197     #define TI1570_RX_DMA_AAL_CNT 0x08000000 /* Cnt based tr. AAL pkt */
198    
199     #define TI1570_RX_DMA_FIFO 0x02000000 /* FIFO used for free bufs */
200    
201     #define TI1570_RX_DMA_TR_CNT_MASK 0xFFFF0000 /* Cnt-based Tr-AAL */
202     #define TI1570_RX_DMA_TR_CNT_SHIFT 16
203     #define TI1570_RX_DMA_CB_LEN_MASK 0x0000FFFF /* Current buffer length */
204    
205     #define TI1570_RX_DMA_ON 0x80000000 /* DMA state (word 6) */
206     #define TI1570_RX_DMA_FILTER 0x40000000 /* Filter */
207    
208     #define TI1570_RX_DMA_FB_PTR_MASK 0x3FFFFFFF /* Free-buffer ptr mask */
209     #define TI1570_RX_DMA_FB_INDEX_MASK 0x000000FF /* Index with Free-buf ring */
210    
211     typedef struct ti1570_rx_dma_entry ti1570_rx_dma_entry_t;
212     struct ti1570_rx_dma_entry {
213     m_uint32_t ctrl; /* Control field, EFCN cell cnt, pkt length */
214     m_uint32_t cb_addr; /* Current Buffer Address */
215     m_uint32_t sb_addr; /* Start of Buffer address */
216     m_uint32_t cb_len; /* Transp-AAL pkt counter, current buf length */
217     m_uint32_t sp_ptr; /* Start-of-packet pointer */
218     m_uint32_t aal5_crc; /* Partial AAL5-receive CRC */
219     m_uint32_t fbr_entry; /* Free-buffer ring-pointer table entry */
220     m_uint32_t timeout; /* Timeout value, current timeout count */
221     };
222    
223     /* RX free-buffer ring pointer table entry (p.39) */
224     #define TI1570_RX_FBR_PTR_MASK 0xFFFFFFFC
225     #define TI1570_RX_FBR_BS_MASK 0xFFFF0000 /* Buffer size mask */
226     #define TI1570_RX_FBR_BS_SHIFT 16
227     #define TI1570_RX_FBR_RS_MASK 0x0000FC00 /* Ring size mask */
228     #define TI1570_RX_FBR_RS_SHIFT 10
229     #define TI1570_RX_FBR_IDX_MASK 0x000003FF /* Current index mask */
230    
231     typedef struct ti1570_rx_fbr_entry ti1570_rx_fbr_entry_t;
232     struct ti1570_rx_fbr_entry {
233     m_uint32_t fbr_ptr; /* RX free-buffer ring pointer */
234     m_uint32_t ring_size; /* Ring size and buffer size */
235     };
236    
237     /* RX buffer pointer (p.41) */
238     #define TI1570_RX_BUFPTR_OWN 0x80000000 /* If set, buffer is ready */
239     #define TI1570_RX_BUFPTR_MASK 0x3FFFFFFF /* Buffer address mask */
240    
241     /* RX data buffer (p.42) */
242     #define TI1570_RX_BUFFER_SOP 0x80000000 /* Start-of-Packet buffer */
243     #define TI1570_RX_BUFFER_EOP 0x40000000 /* End-of-Packet buffer */
244    
245     typedef struct ti1570_rx_buffer ti1570_rx_buffer_t;
246     struct ti1570_rx_buffer {
247     m_uint32_t reserved; /* Reserved, not used by the TI1570 */
248     m_uint32_t ctrl; /* Control field, Start of next buffer pointer */
249     m_uint32_t atm_hdr; /* ATM header */
250     m_uint32_t user; /* User-defined value */
251     };
252    
253     /* Internal structure to hold free buffer info */
254     typedef struct ti1570_rx_buf_holder ti1570_rx_buf_holder_t;
255     struct ti1570_rx_buf_holder {
256     m_uint32_t buf_addr;
257     m_uint32_t buf_size;
258     ti1570_rx_buffer_t rx_buf;
259     };
260    
261     /* RX completion ring entry */
262     #define TI1570_RCR_PKT_OVFLW 0x80000000 /* Packet overflow (word 0) */
263     #define TI1570_RCR_CRC_ERROR 0x40000000 /* CRC error */
264     #define TI1570_RCR_BUF_STARV 0x20000000 /* Buffer starvation */
265     #define TI1570_RCR_TIMEOUT 0x10000000 /* Reassembly timeout */
266     #define TI1570_RCR_ABORT 0x08000000 /* Abort condition */
267     #define TI1570_RCR_AAL5 0x04000000 /* AAL5 indicator */
268    
269     #define TI1570_RCR_VALID 0x80000000 /* Start-ptr valid (word 2) */
270    
271     #define TI1570_RCR_OWN 0x80000000 /* Buffer ready (word 4) */
272     #define TI1570_RCR_ERROR 0x40000000 /* Error entry */
273    
274     typedef struct ti1570_rcr_entry ti1570_rcr_entry_t;
275     struct ti1570_rcr_entry {
276     m_uint32_t atm_hdr; /* ATM header */
277     m_uint32_t error; /* Error Indicator + Congestion cell count */
278     m_uint32_t sp_addr; /* Start of packet */
279     m_uint32_t aal5_trailer; /* AAL5 trailer */
280     m_uint32_t fbr_entry; /* Free-buffer ring-pointer table entry */
281     m_uint32_t res[3]; /* Reserved, not used by the TI1570 */
282     };
283    
284     /* TI1570 Data */
285     struct pa_a1_data {
286     char *name;
287    
288     /* Control Memory pointer */
289     m_uint32_t *ctrl_mem_ptr;
290    
291     /* TI1570 internal registers */
292     m_uint32_t *iregs;
293    
294     /* TX FIFO cell */
295     m_uint8_t txfifo_cell[ATM_CELL_SIZE];
296     m_uint32_t txfifo_avail,txfifo_pos;
297    
298     /* TX Scheduler table */
299     m_uint32_t *tx_sched_table;
300    
301     /* TX DMA state table */
302     ti1570_tx_dma_entry_t *tx_dma_table;
303    
304     /* TX/RX completion ring current position */
305     m_uint32_t tcr_wi_pos,tcr_woi_pos;
306     m_uint32_t rcr_wi_pos,rcr_woi_pos;
307    
308     /* RX VPI/VCI DMA pointer table */
309     m_uint32_t *rx_vpi_vci_dma_table;
310    
311     /* RX DMA state table */
312     ti1570_rx_dma_entry_t *rx_dma_table;
313    
314     /* RX Free-buffer ring pointer table */
315     ti1570_rx_fbr_entry_t *rx_fbr_table;
316    
317     /* Virtual device */
318     struct vdevice *dev;
319    
320     /* PCI device information */
321     struct pci_device *pci_dev_ti,*pci_dev_plx;
322    
323     /* Virtual machine */
324     vm_instance_t *vm;
325    
326     /* NetIO descriptor */
327     netio_desc_t *nio;
328    
329     /* TX ring scanner task id */
330     ptask_id_t tx_tid;
331     };
332    
333     /* Log a TI1570 message */
334     #define TI1570_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
335    
336     /* Reset the TI1570 (forward declaration) */
337     static void ti1570_reset(struct pa_a1_data *d,int clear_ctrl_mem);
338    
339     /*
340     * dev_pa_a1_access()
341     */
342 dpavlin 7 void *dev_pa_a1_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
343 dpavlin 1 u_int op_size,u_int op_type,m_uint64_t *data)
344     {
345     struct pa_a1_data *d = dev->priv_data;
346    
347     if (op_type == MTS_READ)
348     *data = 0;
349    
350     #if DEBUG_ACCESS
351     if (op_type == MTS_READ) {
352     cpu_log(cpu,"TI1570","read access to offset = 0x%x, pc = 0x%llx\n",
353 dpavlin 7 offset,cpu_get_pc(cpu));
354 dpavlin 1 } else {
355     cpu_log(cpu,"TI1570","write access to vaddr = 0x%x, pc = 0x%llx, "
356 dpavlin 7 "val = 0x%llx\n",offset,cpu_get_pc(cpu),*data);
357 dpavlin 1 }
358     #endif
359    
360     /* Specific cases */
361     switch(offset) {
362     case 0x3238:
363     TI1570_LOG(d,"reset issued.\n");
364     ti1570_reset(d,FALSE);
365     break;
366    
367     case 0x18000c:
368     if (op_type == MTS_READ) {
369     *data = 0xa6;
370     return NULL;
371     }
372     break;
373     }
374    
375     /* Control Memory access */
376     if (offset < TI1570_CTRL_MEM_SIZE) {
377     if (op_type == MTS_READ)
378     *data = d->ctrl_mem_ptr[offset >> 2];
379     else
380     d->ctrl_mem_ptr[offset >> 2] = *data;
381     return NULL;
382     }
383    
384     /* Unknown offset */
385     #if DEBUG_UNKNOWN
386     if (op_type == MTS_READ) {
387     cpu_log(cpu,d->name,"read from unknown addr 0x%x, pc=0x%llx (size=%u)\n",
388 dpavlin 7 offset,cpu_get_pc(cpu),op_size);
389 dpavlin 1 } else {
390     cpu_log(cpu,d->name,"write to unknown addr 0x%x, value=0x%llx, "
391 dpavlin 7 "pc=0x%llx (size=%u)\n",offset,*data,cpu_get_pc(cpu),op_size);
392 dpavlin 1 }
393     #endif
394     return NULL;
395     }
396    
397     /* Fetch a TX data buffer from host memory */
398     static void ti1570_read_tx_buffer(struct pa_a1_data *d,m_uint32_t addr,
399     ti1570_tx_buffer_t *tx_buf)
400     {
401     physmem_copy_from_vm(d->vm,tx_buf,addr,sizeof(ti1570_tx_buffer_t));
402    
403     /* byte-swapping */
404     tx_buf->ctrl_buf = vmtoh32(tx_buf->ctrl_buf);
405     tx_buf->nb_addr = vmtoh32(tx_buf->nb_addr);
406     tx_buf->atm_hdr = vmtoh32(tx_buf->atm_hdr);
407     tx_buf->aal5_ctrl = vmtoh32(tx_buf->aal5_ctrl);
408     }
409    
410     /* Acquire a TX buffer */
411     static int ti1570_acquire_tx_buffer(struct pa_a1_data *d,
412     ti1570_tx_dma_entry_t *tde,
413     m_uint32_t buf_addr)
414     {
415     ti1570_tx_buffer_t tx_buf;
416     m_uint32_t buf_offset;
417    
418     #if DEBUG_TRANSMIT
419     TI1570_LOG(d,"ti1570_acquire_tx_buffer: acquiring buffer at address 0x%x\n",
420     buf_addr);
421     #endif
422    
423     /* Read the TX buffer from host memory */
424     ti1570_read_tx_buffer(d,buf_addr,&tx_buf);
425    
426     /* The buffer must be ready to be acquired */
427     if (!(tx_buf.ctrl_buf & TI1570_TX_BUFFER_RDY))
428     return(FALSE);
429    
430     /* Put the TX buffer data into the TX DMA state entry */
431     tde->ctrl_buf = tx_buf.ctrl_buf;
432     tde->nb_addr = tx_buf.nb_addr << 2;
433    
434     /* Read the ATM header only from the first buffer */
435     if (tx_buf.ctrl_buf & TI1570_TX_BUFFER_SOP) {
436     tde->atm_hdr = tx_buf.atm_hdr;
437     tde->aal5_ctrl = tx_buf.aal5_ctrl;
438     tde->aal5_crc = 0xFFFFFFFF;
439     }
440    
441     /* Compute the current-buffer-data address */
442     buf_offset = tx_buf.ctrl_buf & TI1570_TX_BUFFER_OFFSET_MASK;
443     buf_offset >>= TI1570_TX_BUFFER_OFFSET_SHIFT;
444     tde->cb_addr = buf_addr + sizeof(tx_buf) + buf_offset;
445    
446     /* Remember the start address of the buffer */
447     tde->sb_addr = buf_addr;
448     return(TRUE);
449     }
450    
451     /* Returns TRUE if the TX DMA entry is for an AAL5 packet */
452     static inline int ti1570_is_tde_aal5(ti1570_tx_dma_entry_t *tde)
453     {
454     m_uint32_t pkt_type;
455    
456     pkt_type = tde->ctrl_buf & TI1570_TX_DMA_AAL_TYPE_MASK;
457     return(pkt_type == TI1570_TX_DMA_AAL_AAL5);
458     }
459    
460     /* Update the AAL5 partial CRC */
461     static void ti1570_update_aal5_crc(struct pa_a1_data *d,
462     ti1570_tx_dma_entry_t *tde)
463     {
464     tde->aal5_crc = crc32_compute(tde->aal5_crc,
465     &d->txfifo_cell[ATM_HDR_SIZE],
466     ATM_PAYLOAD_SIZE);
467     }
468    
469     /*
470     * Update the TX DMA entry buffer offset and count when "data_len" bytes
471     * have been transmitted.
472     */
473     static void ti1570_update_tx_dma_bufinfo(ti1570_tx_dma_entry_t *tde,
474     m_uint32_t buf_size,
475     m_uint32_t data_len)
476     {
477     m_uint32_t tmp,tot_len;
478    
479     /* update the current buffer address */
480     tde->cb_addr += data_len;
481    
482     /* set the remaining byte count */
483     tmp = tde->ctrl_buf & ~TI1570_TX_BUFFER_DCOUNT_MASK;
484     tde->ctrl_buf = tmp + (buf_size - data_len);
485    
486     /* update the AAL5 count */
487     if (ti1570_is_tde_aal5(tde)) {
488     tot_len = tde->aal5_ctrl & TI1570_TX_DMA_RING_AAL5_LEN_MASK;
489     tot_len += data_len;
490    
491     tmp = (tde->aal5_ctrl & ~TI1570_TX_DMA_RING_AAL5_LEN_MASK) + tot_len;
492     tde->aal5_ctrl = tmp;
493     }
494     }
495    
496     /* Clear the TX fifo */
497     static void ti1570_clear_tx_fifo(struct pa_a1_data *d)
498     {
499     d->txfifo_avail = ATM_PAYLOAD_SIZE;
500     d->txfifo_pos = ATM_HDR_SIZE;
501     memset(d->txfifo_cell,0,ATM_CELL_SIZE);
502     }
503    
504     /*
505     * Transmit the TX FIFO cell through the NETIO infrastructure if
506     * it is full.
507     */
508     static void ti1570_send_tx_fifo(struct pa_a1_data *d,
509     ti1570_tx_dma_entry_t *tde,
510     int update_aal5_crc)
511     {
512     if (d->txfifo_avail == 0) {
513     #if DEBUG_TRANSMIT
514     TI1570_LOG(d,"ti1570_transmit_cell: transmitting to NETIO device\n");
515     mem_dump(log_file,d->txfifo_cell,ATM_CELL_SIZE);
516     #endif
517     if (update_aal5_crc)
518     ti1570_update_aal5_crc(d,tde);
519    
520     netio_send(d->nio,d->txfifo_cell,ATM_CELL_SIZE);
521     ti1570_clear_tx_fifo(d);
522     }
523     }
524    
525     /* Add padding to the FIFO */
526     static void ti1570_add_tx_padding(struct pa_a1_data *d,m_uint32_t len)
527     {
528     if (len > d->txfifo_avail) {
529     TI1570_LOG(d,"ti1570_add_tx_padding: trying to add too large "
530     "padding (avail: 0x%x, pad: 0x%x)\n",d->txfifo_avail,len);
531     len = d->txfifo_avail;
532     }
533    
534     memset(&d->txfifo_cell[d->txfifo_pos],0,len);
535     d->txfifo_pos += len;
536     d->txfifo_avail -= len;
537     }
538    
539     /* Initialize an ATM cell for tranmitting */
540     static m_uint32_t ti1570_init_tx_atm_cell(struct pa_a1_data *d,
541     ti1570_tx_dma_entry_t *tde,
542     int set_pti)
543     {
544     m_uint32_t buf_size,len,atm_hdr;
545    
546     buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
547     len = m_min(buf_size,d->txfifo_avail);
548    
549     #if DEBUG_TRANSMIT
550     TI1570_LOG(d,"ti1570_init_tx_atm_cell: data ptr=0x%x, "
551     "buf_size=%u (0x%x), len=%u (0x%x), atm_hdr=0x%x\n",
552     tde->cb_addr,buf_size,buf_size,len,len,tde->atm_hdr);
553     #endif
554    
555     /* copy the ATM header */
556     atm_hdr = tde->atm_hdr;
557    
558     if (set_pti) {
559     atm_hdr &= ~ATM_PTI_NETWORK;
560     atm_hdr |= ATM_PTI_EOP;
561     }
562    
563     *(m_uint32_t *)d->txfifo_cell = htonl(atm_hdr);
564    
565     /* compute HEC field */
566     atm_insert_hec(d->txfifo_cell);
567    
568     /* copy the payload and try to transmit if the FIFO is full */
569     if (len > 0) {
570     physmem_copy_from_vm(d->vm,&d->txfifo_cell[d->txfifo_pos],
571     tde->cb_addr,len);
572     d->txfifo_pos += len;
573     d->txfifo_avail -= len;
574     }
575    
576     ti1570_update_tx_dma_bufinfo(tde,buf_size,len);
577     return(len);
578     }
579    
580     /*
581     * Transmit an Transparent-AAL ATM cell through the NETIO infrastructure.
582     */
583     static int ti1570_transmit_transp_cell(struct pa_a1_data *d,
584     ti1570_tx_dma_entry_t *tde,
585     int atm_set_eop,int *buf_end)
586     {
587     m_uint32_t buf_size,len;
588     int pkt_end,last_cell;
589    
590     pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
591     buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
592     last_cell = FALSE;
593    
594     if (!pkt_end) {
595     len = ti1570_init_tx_atm_cell(d,tde,FALSE);
596     ti1570_send_tx_fifo(d,tde,FALSE);
597    
598     if ((buf_size - len) == 0)
599     *buf_end = TRUE;
600    
601     return(FALSE);
602     }
603    
604     /* this is the end of packet and the last buffer */
605     if (buf_size <= d->txfifo_avail)
606     last_cell = TRUE;
607    
608     len = ti1570_init_tx_atm_cell(d,tde,last_cell & atm_set_eop);
609     if (last_cell) ti1570_add_tx_padding(d,d->txfifo_avail);
610     ti1570_send_tx_fifo(d,tde,FALSE);
611     return(last_cell);
612     }
613    
614     /* Add the AAL5 trailer to the TX FIFO */
615     static void ti1570_add_aal5_trailer(struct pa_a1_data *d,
616     ti1570_tx_dma_entry_t *tde)
617     {
618     m_uint8_t *trailer;
619    
620     trailer = &d->txfifo_cell[ATM_AAL5_TRAILER_POS];
621    
622     /* Control field + Length */
623     *(m_uint32_t *)trailer = htonl(tde->aal5_ctrl);
624    
625     /* Final CRC-32 computation */
626     tde->aal5_crc = crc32_compute(tde->aal5_crc,
627     &d->txfifo_cell[ATM_HDR_SIZE],
628     ATM_PAYLOAD_SIZE - 4);
629    
630     *(m_uint32_t *)(trailer+4) = htonl(~tde->aal5_crc);
631    
632     /* Consider the FIFO as full */
633     d->txfifo_avail = 0;
634     }
635    
636     /*
637     * Tranmit an AAL5 cell through the NETIO infrastructure.
638     *
639     * Returns TRUE if this is the real end of packet.
640     */
641     static int ti1570_transmit_aal5_cell(struct pa_a1_data *d,
642     ti1570_tx_dma_entry_t *tde,
643     int *buf_end)
644     {
645     m_uint32_t buf_size,len;
646     int pkt_end;
647    
648     pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
649     buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
650    
651     #if DEBUG_TRANSMIT
652     TI1570_LOG(d,"ti1570_transmit_aal5_cell: data ptr=0x%x, "
653     "buf_size=0x%x (%u)\n",tde->cb_addr,buf_size,buf_size);
654     #endif
655    
656     /* If this is not the end of packet, transmit the cell normally */
657     if (!pkt_end) {
658     len = ti1570_init_tx_atm_cell(d,tde,FALSE);
659     ti1570_send_tx_fifo(d,tde,TRUE);
660    
661     if ((buf_size - len) == 0)
662     *buf_end = TRUE;
663    
664     return(FALSE);
665     }
666    
667     /*
668     * This is the end of packet, check if we need to emit a special cell
669     * for the AAL5 trailer.
670     */
671     if ((buf_size + ATM_AAL5_TRAILER_SIZE) <= d->txfifo_avail) {
672     len = ti1570_init_tx_atm_cell(d,tde,TRUE);
673    
674     /* add the padding */
675     ti1570_add_tx_padding(d,d->txfifo_avail - ATM_AAL5_TRAILER_SIZE);
676    
677     /* add the AAL5 trailer at offset 40 */
678     ti1570_add_aal5_trailer(d,tde);
679    
680     /* we can transmit the cell */
681     ti1570_send_tx_fifo(d,tde,FALSE);
682    
683     *buf_end = TRUE;
684     return(TRUE);
685     }
686    
687     /* Transmit the cell normally */
688     len = ti1570_init_tx_atm_cell(d,tde,FALSE);
689     ti1570_add_tx_padding(d,d->txfifo_avail);
690     ti1570_send_tx_fifo(d,tde,TRUE);
691     return(FALSE);
692     }
693    
694     /* Update the TX completion ring */
695     static void ti1570_update_tx_cring(struct pa_a1_data *d,
696     ti1570_tx_dma_entry_t *tde)
697     {
698     m_uint32_t tcr_addr,tcr_end,val;
699    
700     if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT) {
701     /* TX completion ring with interrupt */
702     tcr_addr = d->iregs[TI1570_REG_TCR_WI_ADDR] + (d->tcr_wi_pos * 4);
703     } else {
704     /* TX completion ring without interrupt */
705     tcr_addr = d->iregs[TI1570_REG_TCR_WOI_ADDR] + (d->tcr_woi_pos * 4);
706     }
707    
708     #if DEBUG_TRANSMIT
709     TI1570_LOG(d,"ti1570_update_tx_cring: posting 0x%x at address 0x%x\n",
710     tde->sb_addr,tcr_addr);
711    
712     physmem_dump_vm(d->vm,tde->sb_addr,sizeof(ti1570_tx_buffer_t) >> 2);
713     #endif
714    
715     /* we have a TX freeze if the buffer belongs to the host */
716     val = physmem_copy_u32_from_vm(d->vm,tcr_addr);
717     if (!(val & TI1570_TCR_OWN)) {
718     d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_TX_FRZ;
719     return;
720     }
721    
722     /* put the buffer address in the ring */
723     val = tde->sb_addr >> 2;
724    
725     if (tde->ctrl_buf & TI1570_TX_DMA_ABORT)
726     val |= TI1570_TCR_ABORT;
727    
728     physmem_copy_u32_to_vm(d->vm,tcr_addr,val);
729    
730     /* update the internal position pointer */
731     if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT) {
732     tcr_end = d->iregs[TI1570_REG_TX_CRING_SIZE] & TI1570_TCR_SIZE_MASK;
733    
734     if ((d->tcr_wi_pos++) == tcr_end)
735     d->tcr_wi_pos = 0;
736     } else {
737     tcr_end = (d->iregs[TI1570_REG_TX_CRING_SIZE] >> 16);
738     tcr_end &= TI1570_TCR_SIZE_MASK;
739    
740     if ((d->tcr_woi_pos++) == tcr_end)
741     d->tcr_woi_pos = 0;
742     }
743     }
744    
745     /* Analyze a TX DMA state table entry */
746     static int ti1570_scan_tx_dma_entry_single(struct pa_a1_data *d,
747     m_uint32_t index)
748     {
749     ti1570_tx_dma_entry_t *tde;
750     m_uint32_t psr_base,psr_addr,psr_entry,psr_end;
751     m_uint32_t buf_addr,buf_size,pkt_type,tmp;
752     m_uint32_t psr_index;
753     int atm_set_eop = 0;
754     int pkt_end,buf_end = 0;
755    
756     tde = &d->tx_dma_table[index];
757    
758     /* The DMA channel state flag must be ON */
759     if (!(tde->dma_state & TI1570_TX_DMA_ON))
760     return(FALSE);
761    
762     #if DEBUG_TX_DMA
763     /* We have a running DMA channel */
764     TI1570_LOG(d,"ti1570_scan_tx_dma_entry: TX DMA entry %u is ON "
765     "(ctrl_buf = 0x%x)\n",index,tde->ctrl_buf);
766     #endif
767    
768     /* Is this the start of a new packet ? */
769     if (!(tde->ctrl_buf & TI1570_TX_DMA_ACT))
770     {
771     #if DEBUG_TX_DMA
772     TI1570_LOG(d,"ti1570_scan_tx_dma_entry: TX DMA entry %u is not ACT\n",
773     index);
774     #endif
775    
776     /* No packet yet, fetch it from the packet-segmentation ring */
777     psr_base = tde->dma_state & TI1570_TX_DMA_RING_OFFSET_MASK;
778     psr_index = tde->dma_state & TI1570_TX_DMA_RING_INDEX_MASK;
779    
780     /* Compute address of the current packet segmentation ring entry */
781     psr_addr = (psr_base + psr_index) << 2;
782     psr_entry = physmem_copy_u32_from_vm(d->vm,psr_addr);
783    
784     #if DEBUG_TX_DMA
785     TI1570_LOG(d,"ti1570_scan_tx_dma_entry: psr_addr = 0x%x, "
786     "psr_entry = 0x%x\n",psr_addr,psr_entry);
787     #endif
788    
789     /* The packet-segmentation-ring entry is owned by host, quit now */
790     if (!(psr_entry & TI1570_TX_RING_OWN))
791     return(FALSE);
792    
793     /* Acquire the first buffer (it MUST be in the ready state) */
794     buf_addr = (psr_entry & TI1570_TX_RING_PTR_MASK) << 2;
795    
796     if (!ti1570_acquire_tx_buffer(d,tde,buf_addr)) {
797     TI1570_LOG(d,"ti1570_scan_tx_dma_entry: PSR entry with OWN bit set "
798     "but buffer without RDY bit set.\n");
799     return(FALSE);
800     }
801    
802     /* Set ACT bit for the DMA channel */
803     tde->ctrl_buf |= TI1570_TX_DMA_ACT;
804     }
805    
806     /* Compute the remaining size and determine the packet type */
807     buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
808     pkt_type = tde->ctrl_buf & TI1570_TX_DMA_AAL_TYPE_MASK;
809     pkt_end = tde->ctrl_buf & TI1570_TX_DMA_EOP;
810    
811     #if DEBUG_TRANSMIT
812     TI1570_LOG(d,"ti1570_scan_tx_dma_entry: ctrl_buf=0x%8.8x, "
813     "cb_addr=0x%8.8x, atm_hdr=0x%8.8x, dma_state=0x%8.8x\n",
814     tde->ctrl_buf, tde->cb_addr, tde->atm_hdr, tde->dma_state);
815    
816     TI1570_LOG(d,"ti1570_scan_tx_dma_entry: nb_addr=0x%8.8x, "
817     "sb_addr=0x%8.8x, aal5_crc=0x%8.8x, aal5_ctrl=0x%8.8x\n",
818     tde->nb_addr, tde->sb_addr, tde->aal5_crc, tde->aal5_ctrl);
819     #endif
820    
821     /*
822     * If the current buffer is now empty and if this is not the last
823     * buffer in the current packet, try to fetch a new buffer.
824     * If the next buffer is not yet ready, we have finished.
825     */
826     if (!buf_size && !pkt_end && !ti1570_acquire_tx_buffer(d,tde,tde->nb_addr))
827     return(FALSE);
828    
829     switch(pkt_type) {
830     case TI1570_TX_DMA_AAL_TRWPTI:
831     atm_set_eop = 1;
832    
833     case TI1570_TX_DMA_AAL_TRWOPTI:
834     /* Transmit the ATM cell transparently */
835     pkt_end = ti1570_transmit_transp_cell(d,tde,atm_set_eop,&buf_end);
836     break;
837    
838     case TI1570_TX_DMA_AAL_AAL5:
839     pkt_end = ti1570_transmit_aal5_cell(d,tde,&buf_end);
840     break;
841    
842     default:
843     TI1570_LOG(d,"ti1570_scan_tx_dma_entry: invalid AAL-type\n");
844     return(FALSE);
845     }
846    
847     /* Re-read the remaining buffer size */
848     buf_size = tde->ctrl_buf & TI1570_TX_DMA_DCOUNT_MASK;
849    
850     /* Put the buffer address in the transmit completion ring */
851     if (buf_end) ti1570_update_tx_cring(d,tde);
852    
853     /*
854     * If we have reached end of packet (EOP): clear the ACT bit,
855     * give back the packet-segmentation ring entry to the host,
856     * and increment the PSR index.
857     */
858     if (pkt_end) {
859     tde->ctrl_buf &= ~TI1570_TX_DMA_ACT;
860    
861     /* Clear the OWN bit of the packet-segmentation ring entry */
862     psr_base = tde->dma_state & TI1570_TX_DMA_RING_OFFSET_MASK;
863     psr_index = (tde->dma_state & TI1570_TX_DMA_RING_INDEX_MASK);
864     psr_addr = (psr_base + psr_index) << 2;
865    
866     psr_entry = physmem_copy_u32_from_vm(d->vm,psr_addr);
867     psr_entry &= ~TI1570_TX_RING_OWN;
868     physmem_copy_u32_to_vm(d->vm,psr_addr,psr_entry);
869    
870     /* Increment the packet-segmentation ring index */
871     psr_index++;
872     psr_end = d->iregs[TI1570_REG_TX_PSR_SIZE] >> 16;
873     psr_end &= TI1570_PSR_SIZE_MASK;
874    
875     if (psr_index > psr_end) {
876     psr_index = 0;
877     #if DEBUG_TX_DMA
878     TI1570_LOG(d,"ti1570_scan_tx_dma_entry: PSR ring rotation "
879     "(psr_end = %u)\n",psr_end);
880     #endif
881     }
882    
883     tmp = (tde->dma_state & ~TI1570_TX_DMA_RING_INDEX_MASK);
884     tmp |= (psr_index & TI1570_TX_DMA_RING_INDEX_MASK);
885     tde->dma_state = tmp;
886     }
887    
888     /* Generate an interrupt if required */
889     if (tde->ctrl_buf & TI1570_TX_DMA_TCR_SELECT)
890     {
891     if (((d->iregs[TI1570_REG_STATUS] & TI1570_CFG_BP_SEL) && buf_end) ||
892     pkt_end)
893     {
894     d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_CP_TX;
895     pci_dev_trigger_irq(d->vm,d->pci_dev_ti);
896     }
897     }
898    
899     return(TRUE);
900     }
901    
902     /* Analyze a TX DMA state table entry */
903     static void ti1570_scan_tx_dma_entry(struct pa_a1_data *d,m_uint32_t index)
904     {
905     int i;
906    
907     for(i=0;i<TI1570_TXDMA_PASS_COUNT;i++)
908     if (!ti1570_scan_tx_dma_entry_single(d,index))
909     break;
910     }
911    
912     /* Analyze the TX schedule table */
913     static void ti1570_scan_tx_sched_table(struct pa_a1_data *d)
914     {
915     m_uint32_t cw,index0,index1;
916     u_int i;
917    
918     for(i=0;i<TI1570_TX_SCHED_ENTRY_COUNT>>1;i++) {
919     cw = d->tx_sched_table[i];
920    
921     /* We have 2 index in TX DMA state table per word */
922     index0 = (cw >> TI1570_TX_SCHED_E0_SHIFT) & TI1570_TX_SCHED_ENTRY_MASK;
923     index1 = (cw >> TI1570_TX_SCHED_E1_SHIFT) & TI1570_TX_SCHED_ENTRY_MASK;
924    
925     /* Scan the two entries (null entry => nothing to do) */
926     if (index0) ti1570_scan_tx_dma_entry(d,index0);
927     if (index1) ti1570_scan_tx_dma_entry(d,index1);
928     }
929     }
930    
931     /*
932     * Read a RX buffer from the host memory.
933     */
934     static void ti1570_read_rx_buffer(struct pa_a1_data *d,m_uint32_t addr,
935     ti1570_rx_buffer_t *rx_buf)
936     {
937     physmem_copy_from_vm(d->vm,rx_buf,addr,sizeof(ti1570_rx_buffer_t));
938    
939     /* byte-swapping */
940     rx_buf->reserved = vmtoh32(rx_buf->reserved);
941     rx_buf->ctrl = vmtoh32(rx_buf->ctrl);
942     rx_buf->atm_hdr = vmtoh32(rx_buf->atm_hdr);
943     rx_buf->user = vmtoh32(rx_buf->user);
944     }
945    
946     /* Update the RX completion ring */
947     static void ti1570_update_rx_cring(struct pa_a1_data *d,
948     ti1570_rx_dma_entry_t *rde,
949     m_uint32_t atm_hdr,
950     m_uint32_t aal5_trailer,
951     m_uint32_t err_ind,
952     m_uint32_t fbuf_valid)
953     {
954     m_uint32_t rcr_addr,rcr_end,aal_type,ptr,val;
955     ti1570_rcr_entry_t rcre;
956    
957     if (rde->ctrl & TI1570_RX_DMA_RCR_SELECT) {
958     /* RX completion ring with interrupt */
959     rcr_addr = d->iregs[TI1570_REG_RCR_WI_ADDR];
960     rcr_addr += (d->rcr_wi_pos * sizeof(rcre));
961     } else {
962     /* RX completion ring without interrupt */
963     rcr_addr = d->iregs[TI1570_REG_RCR_WOI_ADDR];
964     rcr_addr += (d->rcr_woi_pos * sizeof(rcre));
965     }
966    
967     #if DEBUG_RECEIVE
968     TI1570_LOG(d,"ti1570_update_rx_cring: posting 0x%x at address 0x%x\n",
969     (rde->sp_ptr << 2),rcr_addr);
970    
971     physmem_dump_vm(d->vm,rde->sp_ptr<<2,sizeof(ti1570_rx_buffer_t) >> 2);
972     #endif
973    
974     /* we have a RX freeze if the buffer belongs to the host */
975     ptr = rcr_addr + OFFSET(ti1570_rcr_entry_t,fbr_entry);
976     val = physmem_copy_u32_from_vm(d->vm,ptr);
977    
978     if (!(val & TI1570_RCR_OWN)) {
979     TI1570_LOG(d,"ti1570_update_rx_cring: RX freeze...\n");
980     d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_RX_FRZ;
981     return;
982     }
983    
984     /* fill the RX completion ring entry and write it back to the host */
985     memset(&rcre,0,sizeof(rcre));
986    
987     /* word 0: atm header from last cell received */
988     rcre.atm_hdr = atm_hdr;
989    
990     /* word 1: error indicator */
991     aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
992     if (aal_type == TI1570_RX_DMA_AAL_AAL5)
993     rcre.error |= TI1570_RCR_AAL5;
994    
995     rcre.error |= err_ind;
996    
997     /* word 2: Start of packet */
998     if (fbuf_valid)
999     rcre.sp_addr = TI1570_RCR_VALID | rde->sp_ptr;
1000    
1001     /* word 3: AAL5 trailer */
1002     rcre.aal5_trailer = aal5_trailer;
1003    
1004     /* word 4: OWN + error entry + free-buffer ring pointer */
1005     rcre.fbr_entry = rde->fbr_entry & TI1570_RX_DMA_FB_PTR_MASK;
1006     if (err_ind) rcre.fbr_entry |= TI1570_RCR_ERROR;
1007    
1008     /* byte-swap and write this back to the host memory */
1009     rcre.atm_hdr = htonl(rcre.atm_hdr);
1010     rcre.error = htonl(rcre.error);
1011     rcre.sp_addr = htonl(rcre.sp_addr);
1012     rcre.aal5_trailer = htonl(rcre.aal5_trailer);
1013     rcre.fbr_entry = htonl(rcre.fbr_entry);
1014     physmem_copy_to_vm(d->vm,&rcre,rcr_addr,sizeof(rcre));
1015    
1016     /* clear the active bit of the RX DMA entry */
1017     rde->ctrl &= ~TI1570_RX_DMA_ACT;
1018    
1019     /* update the internal position pointer */
1020     if (rde->ctrl & TI1570_RX_DMA_RCR_SELECT) {
1021     rcr_end = d->iregs[TI1570_REG_RX_CRING_SIZE] & TI1570_RCR_SIZE_MASK;
1022    
1023     if ((d->rcr_wi_pos++) == rcr_end)
1024     d->rcr_wi_pos = 0;
1025    
1026     /* generate the appropriate IRQ */
1027     d->iregs[TI1570_REG_STATUS] |= TI1570_STAT_CP_RX;
1028     pci_dev_trigger_irq(d->vm,d->pci_dev_ti);
1029     } else {
1030     rcr_end = (d->iregs[TI1570_REG_RX_CRING_SIZE] >> 16);
1031     rcr_end &= TI1570_RCR_SIZE_MASK;
1032    
1033     if ((d->rcr_woi_pos++) == rcr_end)
1034     d->rcr_woi_pos = 0;
1035     }
1036     }
1037    
1038     /*
1039     * Acquire a free RX buffer.
1040     *
1041     * Returns FALSE if no buffer is available (buffer starvation).
1042     */
1043     static int ti1570_acquire_rx_buffer(struct pa_a1_data *d,
1044     ti1570_rx_dma_entry_t *rde,
1045     ti1570_rx_buf_holder_t *rbh,
1046     m_uint32_t atm_hdr)
1047     {
1048     ti1570_rx_fbr_entry_t *fbr_entry = NULL;
1049     m_uint32_t bp_addr,buf_addr,buf_size,buf_idx;
1050     m_uint32_t ring_index,ring_size;
1051     m_uint32_t buf_ptr,val;
1052     int fifo = FALSE;
1053    
1054     /* To keep this fucking compiler quiet */
1055     ring_size = 0;
1056     buf_idx = 0;
1057    
1058     if (rde->ctrl & TI1570_RX_DMA_FIFO) {
1059     bp_addr = (rde->fbr_entry & TI1570_RX_DMA_FB_PTR_MASK) << 2;
1060     buf_ptr = physmem_copy_u32_from_vm(d->vm,bp_addr);
1061     buf_size = d->iregs[TI1570_REG_TX_PSR_SIZE] & 0xFFFF;
1062     fifo = TRUE;
1063    
1064     #if DEBUG_RECEIVE
1065     TI1570_LOG(d,"ti1570_acquire_rx_buffer: acquiring FIFO buffer\n");
1066     #endif
1067     }
1068     else
1069     {
1070     ring_index = rde->fbr_entry & TI1570_RX_DMA_FB_INDEX_MASK;
1071     fbr_entry = &d->rx_fbr_table[ring_index];
1072    
1073     #if DEBUG_RECEIVE
1074     TI1570_LOG(d,"ti1570_acquire_rx_buffer: acquiring non-FIFO buffer, "
1075     "ring index=%u (0x%x)\n",ring_index,ring_index);
1076     #endif
1077    
1078     /* Compute the number of entries in ring */
1079     ring_size = fbr_entry->ring_size & TI1570_RX_FBR_RS_MASK;
1080     ring_size >>= TI1570_RX_FBR_RS_SHIFT;
1081     ring_size = (ring_size << 4) + 15 + 1;
1082    
1083     /* Compute the buffer size */
1084     buf_size = fbr_entry->ring_size & TI1570_RX_FBR_BS_MASK;
1085     buf_size >>= TI1570_RX_FBR_BS_SHIFT;
1086    
1087     /* Compute the buffer address */
1088     buf_idx = fbr_entry->ring_size & TI1570_RX_FBR_IDX_MASK;
1089     bp_addr = fbr_entry->fbr_ptr + (buf_idx << 2);
1090    
1091     #if DEBUG_RECEIVE
1092     TI1570_LOG(d,"ti1570_acquire_rx_buffer: ring size=%u (0x%x), "
1093     "buf size=%u ATM cells\n",ring_size,ring_size,buf_size);
1094    
1095     TI1570_LOG(d,"ti1570_acquire_rx_buffer: buffer index=%u (0x%x), "
1096     "buffer ptr address = 0x%x\n",buf_idx,buf_idx,bp_addr);
1097     #endif
1098    
1099     buf_ptr = physmem_copy_u32_from_vm(d->vm,bp_addr);
1100     }
1101    
1102     #if DEBUG_RECEIVE
1103     TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_ptr = 0x%x\n",buf_ptr);
1104     #endif
1105    
1106     /* The TI1570 must own the buffer */
1107     if (!(buf_ptr & TI1570_RX_BUFPTR_OWN)) {
1108     TI1570_LOG(d,"ti1570_acquire_rx_buffer: no free buffer available.\n");
1109     return(FALSE);
1110     }
1111    
1112     /*
1113     * If we are using a ring, we have to clear the OWN bit and increment
1114     * the index field.
1115     */
1116     if (!fifo) {
1117     buf_ptr &= ~TI1570_RX_BUFPTR_OWN;
1118     physmem_copy_u32_to_vm(d->vm,bp_addr,buf_ptr);
1119    
1120     if (++buf_idx == ring_size) {
1121     #if DEBUG_RECEIVE
1122     TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_idx=0x%x, "
1123     "ring_size=0x%x -> resetting buf_idx\n",
1124     buf_idx-1,ring_size);
1125     #endif
1126     buf_idx = 0;
1127     }
1128    
1129     val = fbr_entry->ring_size & ~TI1570_RX_FBR_IDX_MASK;
1130     val |= buf_idx;
1131     fbr_entry->ring_size = val;
1132     }
1133    
1134     /* Get the buffer address */
1135     buf_addr = (buf_ptr & TI1570_RX_BUFPTR_MASK) << 2;
1136    
1137     #if DEBUG_RECEIVE
1138     TI1570_LOG(d,"ti1570_acquire_rx_buffer: buf_addr = 0x%x\n",buf_addr);
1139     #endif
1140    
1141     /* Read the buffer descriptor itself and store info for caller */
1142     rbh->buf_addr = buf_addr;
1143     rbh->buf_size = buf_size;
1144     ti1570_read_rx_buffer(d,buf_addr,&rbh->rx_buf);
1145    
1146     /* Clear the control field */
1147     physmem_copy_u32_to_vm(d->vm,buf_addr+OFFSET(ti1570_rx_buffer_t,ctrl),0);
1148    
1149     /* Store the ATM header in data buffer */
1150     physmem_copy_u32_to_vm(d->vm,buf_addr+OFFSET(ti1570_rx_buffer_t,atm_hdr),
1151     atm_hdr);
1152     return(TRUE);
1153     }
1154    
1155     /* Insert a new free buffer in a RX DMA entry */
1156     static void ti1570_insert_rx_free_buf(struct pa_a1_data *d,
1157     ti1570_rx_dma_entry_t *rde,
1158     ti1570_rx_buf_holder_t *rbh)
1159     {
1160     m_uint32_t val,aal_type;
1161    
1162     aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
1163    
1164     /* Set current and start of buffer addresses */
1165     rde->cb_addr = rbh->buf_addr + sizeof(ti1570_rx_buffer_t);
1166     rde->sb_addr = rbh->buf_addr >> 2;
1167    
1168     /* Set the buffer length */
1169     val = rbh->buf_size;
1170    
1171     if (aal_type == TI1570_RX_DMA_AAL_CNT)
1172     val |= (rde->aal5_crc & 0xFFFF) << 16;
1173    
1174     rde->cb_len = val;
1175     }
1176    
1177     /* Store a RX cell */
1178     static int ti1570_store_rx_cell(struct pa_a1_data *d,
1179     ti1570_rx_dma_entry_t *rde,
1180     m_uint8_t *atm_cell)
1181     {
1182     m_uint32_t aal_type,atm_hdr,aal5_trailer,pti,real_eop,pti_eop;
1183     m_uint32_t prev_buf_addr,buf_len,val,ptr,cnt;
1184     ti1570_rx_buf_holder_t rbh;
1185    
1186     real_eop = pti_eop = FALSE;
1187     aal_type = rde->ctrl & TI1570_RX_DMA_AAL_TYPE_MASK;
1188    
1189     /* Extract PTI from the ATM header */
1190     atm_hdr = ntohl(*(m_uint32_t *)&atm_cell[0]);
1191     pti = (atm_hdr & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1192    
1193     /* PTI == 0x1 => EOP */
1194     if ((pti == 0x01) || (pti == 0x03))
1195     pti_eop = TRUE;
1196    
1197     if (rde->ctrl & TI1570_RX_DMA_WAIT_EOP) {
1198     TI1570_LOG(d,"ti1570_store_rx_cell: EOP processing, not handled yet.\n");
1199     return(FALSE);
1200     }
1201    
1202     /* AAL5 special processing */
1203     if (aal_type == TI1570_RX_DMA_AAL_AAL5)
1204     {
1205     /* Check that we don't exceed 1366 cells for AAL5 */
1206     /* XXX TODO */
1207     }
1208     else
1209     {
1210     /* EOP processing for non counter-based transparent-AAL packets */
1211     if ((rde->ctrl & TI1570_RX_DMA_WAIT_EOP) && pti_eop)
1212     {
1213     /* XXX TODO */
1214     }
1215     }
1216    
1217     /* do we have enough room in buffer ? */
1218     buf_len = rde->cb_len & TI1570_RX_DMA_CB_LEN_MASK;
1219    
1220     if (!buf_len) {
1221     prev_buf_addr = rde->sb_addr << 2;
1222    
1223     /* acquire a new free buffer */
1224     if (!ti1570_acquire_rx_buffer(d,rde,&rbh,atm_hdr)) {
1225     rde->ctrl |= TI1570_RX_DMA_WAIT_EOP;
1226     return(FALSE);
1227     }
1228    
1229     /* insert the free buffer in the RX DMA structure */
1230     ti1570_insert_rx_free_buf(d,rde,&rbh);
1231    
1232     /* chain the buffers (keep SOP/EOP bits intact) */
1233     ptr = prev_buf_addr + OFFSET(ti1570_rx_buffer_t,ctrl);
1234    
1235     val = physmem_copy_u32_from_vm(d->vm,ptr);
1236     val |= rde->sb_addr;
1237     physmem_copy_u32_to_vm(d->vm,ptr,val);
1238    
1239     /* read the new buffer length */
1240     buf_len = rde->cb_len & TI1570_RX_DMA_CB_LEN_MASK;
1241     }
1242    
1243     /* copy the ATM payload */
1244     #if DEBUG_RECEIVE
1245     TI1570_LOG(d,"ti1570_store_rx_cell: storing cell payload at 0x%x "
1246     "(buf_addr=0x%x)\n",rde->cb_addr,rde->sb_addr << 2);
1247     #endif
1248    
1249     physmem_copy_to_vm(d->vm,&atm_cell[ATM_HDR_SIZE],
1250     rde->cb_addr,ATM_PAYLOAD_SIZE);
1251     rde->cb_addr += ATM_PAYLOAD_SIZE;
1252    
1253     /* update the current buffer length */
1254     val = rde->cb_len & ~TI1570_RX_DMA_CB_LEN_MASK;
1255     rde->cb_len = val | (--buf_len);
1256    
1257     #if DEBUG_RECEIVE
1258     TI1570_LOG(d,"ti1570_store_rx_cell: new rde->cb_len = 0x%x, "
1259     "buf_len=0x%x\n",rde->cb_len,buf_len);
1260     #endif
1261    
1262     /* determine if this is the end of the packet (EOP) */
1263     if (aal_type == TI1570_RX_DMA_AAL_CNT)
1264     {
1265     /* counter-based tranparent-AAL packets */
1266     cnt = rde->cb_len & TI1570_RX_DMA_TR_CNT_MASK;
1267     cnt >>= TI1570_RX_DMA_TR_CNT_SHIFT;
1268    
1269     /* if the counter reaches 0, this is the EOP */
1270     if (--cnt == 0)
1271     real_eop = TRUE;
1272    
1273     val = rde->cb_len & ~TI1570_RX_DMA_TR_CNT_MASK;
1274     val |= cnt << TI1570_RX_DMA_TR_CNT_SHIFT;
1275     }
1276     else {
1277     /* PTI-based transparent AAL packets or AAL5 */
1278     if (pti_eop)
1279     real_eop = TRUE;
1280     }
1281    
1282     if (real_eop) {
1283     /* mark the buffer as EOP */
1284     ptr = (rde->sb_addr << 2) + OFFSET(ti1570_rx_buffer_t,ctrl);
1285     val = physmem_copy_u32_from_vm(d->vm,ptr);
1286     val |= TI1570_RX_BUFFER_EOP;
1287     physmem_copy_u32_to_vm(d->vm,ptr,val);
1288    
1289     /* get the aal5 trailer */
1290     aal5_trailer = ntohl(*(m_uint32_t *)&atm_cell[ATM_AAL5_TRAILER_POS]);
1291    
1292     /* post the entry into the appropriate RX completion ring */
1293     ti1570_update_rx_cring(d,rde,atm_hdr,aal5_trailer,0,TRUE);
1294     }
1295    
1296     return(TRUE);
1297     }
1298    
1299     /* Handle a received ATM cell */
1300     static int ti1570_handle_rx_cell(netio_desc_t *nio,
1301     u_char *atm_cell,ssize_t cell_len,
1302     struct pa_a1_data *d)
1303     {
1304     m_uint32_t atm_hdr,vpi,vci,vci_idx,vci_mask;
1305     m_uint32_t vci_max,rvd_entry,bptr,pti,ptr;
1306     ti1570_rx_dma_entry_t *rde = NULL;
1307     ti1570_rx_buf_holder_t rbh;
1308    
1309     if (cell_len != ATM_CELL_SIZE) {
1310     TI1570_LOG(d,"invalid RX cell size (%ld)\n",(long)cell_len);
1311     return(FALSE);
1312     }
1313    
1314     /* Extract the VPI/VCI used as index in the RX VPI/VCI DMA pointer table */
1315     atm_hdr = ntohl(*(m_uint32_t *)&atm_cell[0]);
1316     vpi = (atm_hdr & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1317     vci = (atm_hdr & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1318     pti = (atm_hdr & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1319    
1320     #if DEBUG_RECEIVE
1321     TI1570_LOG(d,"ti1570_handle_rx_cell: received cell with VPI/VCI=%u/%u\n",
1322     vpi,vci);
1323     #endif
1324    
1325     /* Get the entry corresponding to this VPI in RX VPI/VCI dma ptr table */
1326     rvd_entry = d->rx_vpi_vci_dma_table[vpi];
1327    
1328     if (!(rvd_entry & TI1570_RX_VPI_ENABLE)) {
1329     TI1570_LOG(d,"ti1570_handle_rx_cell: received cell with "
1330     "unknown VPI %u (VCI=%u)\n",vpi,vci);
1331     return(FALSE);
1332     }
1333    
1334     /*
1335     * Special routing for OAM F4 cells:
1336     * - VCI 3 : OAM F4 segment cell
1337     * - VCI 4 : OAM F4 end-to-end cell
1338     */
1339     if ((vci == 3) || (vci == 4))
1340     rde = &d->rx_dma_table[2];
1341     else {
1342     if ((atm_hdr & ATM_PTI_NETWORK) != 0) {
1343     switch(pti) {
1344     case 0x04: /* OAM F5-segment cell */
1345     case 0x05: /* OAM F5 end-to-end cell */
1346     rde = &d->rx_dma_table[0];
1347     break;
1348    
1349     case 0x06:
1350     case 0x07:
1351     rde = &d->rx_dma_table[1];
1352     break;
1353     }
1354     } else {
1355     /*
1356     * Standard VPI/VCI.
1357     * Apply the VCI mask if we don't have an OAM cell.
1358     */
1359     if (!(atm_hdr & ATM_PTI_NETWORK)) {
1360     vci_mask = d->iregs[TI1570_REG_TX_RX_FIFO] >> 16;
1361     vci_idx = vci & (~vci_mask);
1362    
1363     vci_max = rvd_entry & TI1570_RX_VCI_RANGE_MASK;
1364    
1365     if (vci_idx > vci_max) {
1366     TI1570_LOG(d,"ti1570_handle_rx_cell: out-of-range VCI %u "
1367     "(VPI=%u,vci_mask=%u,vci_max=%u)\n",
1368     vci,vpi,vci_mask,vci_max);
1369     return(FALSE);
1370     }
1371    
1372     #if DEBUG_RECEIVE
1373     TI1570_LOG(d,"ti1570_handle_rx_cell: VPI/VCI=%u/%u, "
1374     "vci_mask=0x%x, vci_idx=%u (0x%x), vci_max=%u (0x%x)\n",
1375     vpi,vci,vci_mask,vci_idx,vci_idx,vci_max,vci_max);
1376     #endif
1377     bptr = (rvd_entry & TI1570_RX_BASE_PTR_MASK);
1378     bptr >>= TI1570_RX_BASE_PTR_SHIFT;
1379     bptr = (bptr + vci) * sizeof(ti1570_rx_dma_entry_t);
1380    
1381     if (bptr < TI1570_RX_DMA_TABLE_OFFSET) {
1382     TI1570_LOG(d,"ti1570_handle_rx_cell: inconsistency in "
1383     "RX VPI/VCI table, VPI/VCI=%u/u, bptr=0x%x\n",
1384     vpi,vci,bptr);
1385     return(FALSE);
1386     }
1387    
1388     bptr -= TI1570_RX_DMA_TABLE_OFFSET;
1389     rde = &d->rx_dma_table[bptr / sizeof(ti1570_rx_dma_entry_t)];
1390     }
1391     }
1392     }
1393    
1394     if (!rde) {
1395     TI1570_LOG(d,"ti1570_handle_rx_cell: no RX DMA table entry found!\n");
1396     return(FALSE);
1397     }
1398    
1399     /* The entry must be active */
1400     if (!(rde->fbr_entry & TI1570_RX_DMA_ON))
1401     return(FALSE);
1402    
1403     /* Is this the start of a new packet ? */
1404     if (!(rde->ctrl & TI1570_RX_DMA_ACT))
1405     {
1406     /* Try to acquire a free buffer */
1407     if (!ti1570_acquire_rx_buffer(d,rde,&rbh,atm_hdr)) {
1408     rde->ctrl |= TI1570_RX_DMA_WAIT_EOP;
1409     return(FALSE);
1410     }
1411    
1412     /* Insert the free buffer in the RX DMA structure */
1413     ti1570_insert_rx_free_buf(d,rde,&rbh);
1414     rde->sp_ptr = rde->sb_addr;
1415    
1416     /* Mark the RX buffer as the start of packet (SOP) */
1417     ptr = (rde->sb_addr << 2) + OFFSET(ti1570_rx_buffer_t,ctrl);
1418     physmem_copy_u32_to_vm(d->vm,ptr,TI1570_RX_BUFFER_SOP);
1419    
1420     /* Set ACT bit for the DMA channel */
1421     rde->ctrl |= TI1570_RX_DMA_ACT;
1422     }
1423    
1424     /* Store the received cell */
1425     ti1570_store_rx_cell(d,rde,atm_cell);
1426     return(TRUE);
1427     }
1428    
1429     /*
1430     * pci_ti1570_read()
1431     */
1432 dpavlin 7 static m_uint32_t pci_ti1570_read(cpu_gen_t *cpu,struct pci_device *dev,
1433 dpavlin 1 int reg)
1434     {
1435     struct pa_a1_data *d = dev->priv_data;
1436    
1437     #if DEBUG_ACCESS
1438     TI1570_LOG(d,"pci_ti1570_read: read reg 0x%x\n",reg);
1439     #endif
1440    
1441     switch(reg) {
1442     case PCI_REG_BAR0:
1443     return(d->dev->phys_addr);
1444     default:
1445     return(0);
1446     }
1447     }
1448    
1449     /*
1450     * pci_ti1570_write()
1451     */
1452 dpavlin 7 static void pci_ti1570_write(cpu_gen_t *cpu,struct pci_device *dev,
1453 dpavlin 1 int reg,m_uint32_t value)
1454     {
1455     struct pa_a1_data *d = dev->priv_data;
1456    
1457     #if DEBUG_ACCESS
1458     TI1570_LOG(d,"pci_ti1570_write: write reg 0x%x, value 0x%x\n",reg,value);
1459     #endif
1460    
1461     switch(reg) {
1462     case PCI_REG_BAR0:
1463     vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
1464     TI1570_LOG(d,"registers are mapped at 0x%x\n",value);
1465     break;
1466     }
1467     }
1468    
1469     /*
1470     * pci_plx9060es_read()
1471     */
1472 dpavlin 7 static m_uint32_t pci_plx9060es_read(cpu_gen_t *cpu,struct pci_device *dev,
1473 dpavlin 1 int reg)
1474     {
1475     #if DEBUG_ACCESS
1476     TI1570_LOG(d,"PLX9060ES","read reg 0x%x\n",reg);
1477     #endif
1478     switch(reg) {
1479     default:
1480     return(0);
1481     }
1482     }
1483    
1484     /*
1485     * pci_plx9060es_write()
1486     */
1487 dpavlin 7 static void pci_plx9060es_write(cpu_gen_t *cpu,struct pci_device *dev,
1488 dpavlin 1 int reg,m_uint32_t value)
1489     {
1490     #if DEBUG_ACCESS
1491     TI1570_LOG(d,"PLX9060ES","write reg 0x%x, value 0x%x\n",reg,value);
1492     #endif
1493    
1494     switch(reg) {
1495     }
1496     }
1497    
1498     /* Reset the TI1570 */
1499     static void ti1570_reset(struct pa_a1_data *d,int clear_ctrl_mem)
1500     {
1501     ti1570_clear_tx_fifo(d);
1502    
1503     d->tcr_wi_pos = d->tcr_woi_pos = 0;
1504     d->rcr_wi_pos = d->rcr_woi_pos = 0;
1505    
1506     if (clear_ctrl_mem)
1507     memset(d->ctrl_mem_ptr,0,TI1570_CTRL_MEM_SIZE);
1508     }
1509    
1510     /*
1511     * dev_c7200_pa_a1_init()
1512     *
1513     * Add a PA-A1 port adapter into specified slot.
1514     */
1515     int dev_c7200_pa_a1_init(c7200_t *router,char *name,u_int pa_bay)
1516     {
1517     struct pci_device *pci_dev_ti,*pci_dev_plx;
1518     struct pa_a1_data *d;
1519     struct vdevice *dev;
1520     m_uint8_t *p;
1521    
1522     /* Allocate the private data structure for TI1570 chip */
1523     if (!(d = malloc(sizeof(*d)))) {
1524     fprintf(stderr,"%s (TI1570): out of memory\n",name);
1525     return(-1);
1526     }
1527    
1528     memset(d,0,sizeof(*d));
1529    
1530     /* Set the EEPROM */
1531 dpavlin 3 c7200_pa_set_eeprom(router,pa_bay,cisco_eeprom_find_pa("PA-A1"));
1532 dpavlin 1
1533     /* Add PCI device TI1570 */
1534     pci_dev_ti = pci_dev_add(router->pa_bay[pa_bay].pci_map,name,
1535     TI1570_PCI_VENDOR_ID,TI1570_PCI_PRODUCT_ID,
1536     0,0,C7200_NETIO_IRQ,d,
1537     NULL,pci_ti1570_read,pci_ti1570_write);
1538    
1539     if (!pci_dev_ti) {
1540     fprintf(stderr,"%s (TI1570): unable to create PCI device TI1570.\n",
1541     name);
1542     return(-1);
1543     }
1544    
1545     /* Add PCI device PLX9060ES */
1546     pci_dev_plx = pci_dev_add(router->pa_bay[pa_bay].pci_map,name,
1547     PLX_9060ES_PCI_VENDOR_ID,
1548     PLX_9060ES_PCI_PRODUCT_ID,
1549     1,0,C7200_NETIO_IRQ,d,
1550     NULL,pci_plx9060es_read,pci_plx9060es_write);
1551    
1552     if (!pci_dev_plx) {
1553     fprintf(stderr,"%s (PLX_9060ES): unable to create PCI device "
1554     "PLX 9060ES.\n",name);
1555     return(-1);
1556     }
1557    
1558     /* Create the TI1570 structure */
1559     d->name = name;
1560     d->vm = router->vm;
1561     d->pci_dev_ti = pci_dev_ti;
1562     d->pci_dev_plx = pci_dev_plx;
1563    
1564     /* Allocate the control memory */
1565     if (!(d->ctrl_mem_ptr = malloc(TI1570_CTRL_MEM_SIZE))) {
1566     fprintf(stderr,"%s (PA-A1): unable to create control memory.\n",name);
1567     return(-1);
1568     }
1569    
1570     /* Standard tables for the TI1570 */
1571     p = (m_uint8_t *)d->ctrl_mem_ptr;
1572    
1573     d->iregs = (m_uint32_t *)(p + TI1570_INTERNAL_REGS_OFFSET);
1574     d->tx_sched_table = (m_uint32_t *)(p + TI1570_TX_SCHED_OFFSET);
1575     d->tx_dma_table = (ti1570_tx_dma_entry_t *)(p + TI1570_TX_DMA_TABLE_OFFSET);
1576     d->rx_vpi_vci_dma_table = (m_uint32_t *)(p+TI1570_RX_DMA_PTR_TABLE_OFFSET);
1577     d->rx_dma_table = (ti1570_rx_dma_entry_t *)(p + TI1570_RX_DMA_TABLE_OFFSET);
1578     d->rx_fbr_table = (ti1570_rx_fbr_entry_t *)(p + TI1570_FREE_BUFFERS_OFFSET);
1579    
1580     ti1570_reset(d,TRUE);
1581    
1582     /* Create the device itself */
1583     if (!(dev = dev_create(name))) {
1584     fprintf(stderr,"%s (PA-A1): unable to create device.\n",name);
1585     return(-1);
1586     }
1587    
1588     dev->phys_addr = 0;
1589     dev->phys_len = 0x200000;
1590     dev->handler = dev_pa_a1_access;
1591    
1592     /* Store device info */
1593     dev->priv_data = d;
1594     d->dev = dev;
1595    
1596     /* Store device info into the router structure */
1597     return(c7200_pa_set_drvinfo(router,pa_bay,d));
1598     }
1599    
1600     /* Remove a PA-A1 from the specified slot */
1601     int dev_c7200_pa_a1_shutdown(c7200_t *router,u_int pa_bay)
1602     {
1603     struct c7200_pa_bay *bay;
1604     struct pa_a1_data *d;
1605    
1606     if (!(bay = c7200_pa_get_info(router,pa_bay)))
1607     return(-1);
1608    
1609     d = bay->drv_info;
1610    
1611     /* Remove the PA EEPROM */
1612     c7200_pa_unset_eeprom(router,pa_bay);
1613    
1614     /* Remove the PCI devices */
1615     pci_dev_remove(d->pci_dev_ti);
1616     pci_dev_remove(d->pci_dev_plx);
1617    
1618     /* Remove the device from the VM address space */
1619     vm_unbind_device(router->vm,d->dev);
1620     cpu_group_rebuild_mts(router->vm->cpu_group);
1621    
1622     /* Free the control memory */
1623     free(d->ctrl_mem_ptr);
1624    
1625     /* Free the device structure itself */
1626     free(d->dev);
1627     free(d);
1628     return(0);
1629     }
1630    
1631     /* Bind a Network IO descriptor to a specific port */
1632     int dev_c7200_pa_a1_set_nio(c7200_t *router,u_int pa_bay,u_int port_id,
1633     netio_desc_t *nio)
1634     {
1635     struct pa_a1_data *d;
1636    
1637     if ((port_id > 0) || !(d = c7200_pa_get_drvinfo(router,pa_bay)))
1638     return(-1);
1639    
1640     if (d->nio != NULL)
1641     return(-1);
1642    
1643     d->nio = nio;
1644     d->tx_tid = ptask_add((ptask_callback)ti1570_scan_tx_sched_table,d,NULL);
1645     netio_rxl_add(nio,(netio_rx_handler_t)ti1570_handle_rx_cell,d,NULL);
1646     return(0);
1647     }
1648    
1649     /* Unbind a Network IO descriptor to a specific port */
1650     int dev_c7200_pa_a1_unset_nio(c7200_t *router,u_int pa_bay,u_int port_id)
1651     {
1652     struct pa_a1_data *d;
1653    
1654     if ((port_id > 0) || !(d = c7200_pa_get_drvinfo(router,pa_bay)))
1655     return(-1);
1656    
1657     if (d->nio) {
1658     ptask_remove(d->tx_tid);
1659     netio_rxl_remove(d->nio);
1660     d->nio = NULL;
1661     }
1662     return(0);
1663     }
1664    
1665     /* PA-A1 driver */
1666     struct c7200_pa_driver dev_c7200_pa_a1_driver = {
1667     "PA-A1", 1,
1668     dev_c7200_pa_a1_init,
1669     dev_c7200_pa_a1_shutdown,
1670     dev_c7200_pa_a1_set_nio,
1671     dev_c7200_pa_a1_unset_nio,
1672 dpavlin 2 NULL,
1673 dpavlin 1 };

  ViewVC Help
Powered by ViewVC 1.1.26