/[dynamips]/upstream/dynamips-0.2.7/dev_gt.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /upstream/dynamips-0.2.7/dev_gt.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 7 - (hide annotations)
Sat Oct 6 16:23:47 2007 UTC (16 years, 5 months ago) by dpavlin
Original Path: upstream/dynamips-0.2.7-RC1/dev_gt.c
File MIME type: text/plain
File size: 57229 byte(s)
dynamips-0.2.7-RC1

1 dpavlin 4 /*
2 dpavlin 7 * Cisco router simulation platform.
3 dpavlin 4 * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4     *
5     * Galileo GT64010/GT64120A/GT96100A system controller.
6     *
7     * The DMA stuff is not complete, only "normal" transfers are working
8     * (source and destination addresses incrementing).
9     *
10     * Also, these transfers are "instantaneous" from a CPU point-of-view: when
11     * a channel is enabled, the transfer is immediately done. So, this is not
12     * very realistic.
13     */
14    
15     #include <stdio.h>
16     #include <stdlib.h>
17     #include <string.h>
18    
19     #include "utils.h"
20     #include "net.h"
21 dpavlin 7 #include "cpu.h"
22     #include "vm.h"
23 dpavlin 4 #include "dynamips.h"
24     #include "memory.h"
25     #include "device.h"
26     #include "net_io.h"
27     #include "ptask.h"
28     #include "dev_gt.h"
29    
30     /* Debugging flags */
31     #define DEBUG_UNKNOWN 0
32     #define DEBUG_DMA 0
33     #define DEBUG_MII 0
34     #define DEBUG_ETH_TX 0
35     #define DEBUG_ETH_RX 0
36     #define DEBUG_ETH_HASH 0
37    
38     /* PCI identification */
39     #define PCI_VENDOR_GALILEO 0x11ab /* Galileo Technology */
40     #define PCI_PRODUCT_GALILEO_GT64010 0x0146 /* GT-64010 */
41     #define PCI_PRODUCT_GALILEO_GT64011 0x4146 /* GT-64011 */
42     #define PCI_PRODUCT_GALILEO_GT64120 0x4620 /* GT-64120 */
43     #define PCI_PRODUCT_GALILEO_GT96100 0x9653 /* GT-96100 */
44    
45     /* === Global definitions === */
46    
47     /* Interrupt High Cause Register */
48     #define GT_IHCR_ETH0_SUM 0x00000001
49     #define GT_IHCR_ETH1_SUM 0x00000002
50     #define GT_IHCR_SDMA_SUM 0x00000010
51    
52     /* Serial Cause Register */
53     #define GT_SCR_ETH0_SUM 0x00000001
54     #define GT_SCR_ETH1_SUM 0x00000002
55     #define GT_SCR_SDMA_SUM 0x00000010
56    
57     /* === DMA definitions === */
58     #define GT_DMA_CHANNELS 4
59    
60     #define GT_DMA_FLYBY_ENABLE 0x00000001 /* FlyBy Enable */
61     #define GT_DMA_FLYBY_RDWR 0x00000002 /* SDRAM Read/Write (FlyBy) */
62     #define GT_DMA_SRC_DIR 0x0000000c /* Source Direction */
63     #define GT_DMA_DST_DIR 0x00000030 /* Destination Direction */
64     #define GT_DMA_DATA_LIMIT 0x000001c0 /* Data Transfer Limit */
65     #define GT_DMA_CHAIN_MODE 0x00000200 /* Chained Mode */
66     #define GT_DMA_INT_MODE 0x00000400 /* Interrupt Mode */
67     #define GT_DMA_TRANS_MODE 0x00000800 /* Transfer Mode */
68     #define GT_DMA_CHAN_ENABLE 0x00001000 /* Channel Enable */
69     #define GT_DMA_FETCH_NEXT 0x00002000 /* Fetch Next Record */
70     #define GT_DMA_ACT_STATUS 0x00004000 /* DMA Activity Status */
71     #define GT_DMA_SDA 0x00008000 /* Source/Destination Alignment */
72     #define GT_DMA_MDREQ 0x00010000 /* Mask DMA Requests */
73     #define GT_DMA_CDE 0x00020000 /* Close Descriptor Enable */
74     #define GT_DMA_EOTE 0x00040000 /* End-of-Transfer (EOT) Enable */
75     #define GT_DMA_EOTIE 0x00080000 /* EOT Interrupt Enable */
76     #define GT_DMA_ABORT 0x00100000 /* Abort DMA Transfer */
77     #define GT_DMA_SLP 0x00600000 /* Override Source Address */
78     #define GT_DMA_DLP 0x01800000 /* Override Dest Address */
79     #define GT_DMA_RLP 0x06000000 /* Override Record Address */
80     #define GT_DMA_REQ_SRC 0x10000000 /* DMA Request Source */
81    
82     /* Galileo DMA channel */
83     struct dma_channel {
84     m_uint32_t byte_count;
85     m_uint32_t src_addr;
86     m_uint32_t dst_addr;
87     m_uint32_t cdptr;
88     m_uint32_t nrptr;
89     m_uint32_t ctrl;
90     };
91    
92     /* === Ethernet definitions === */
93     #define GT_ETH_PORTS 2
94     #define GT_MAX_PKT_SIZE 2048
95    
96     /* SMI register */
97     #define GT_SMIR_DATA_MASK 0x0000FFFF
98     #define GT_SMIR_PHYAD_MASK 0x001F0000 /* PHY Device Address */
99     #define GT_SMIR_PHYAD_SHIFT 16
100     #define GT_SMIR_REGAD_MASK 0x03e00000 /* PHY Device Register Address */
101     #define GT_SMIR_REGAD_SHIFT 21
102     #define GT_SMIR_OPCODE_MASK 0x04000000 /* Opcode (0: write, 1: read) */
103     #define GT_SMIR_OPCODE_READ 0x04000000
104     #define GT_SMIR_RVALID_FLAG 0x08000000 /* Read Valid */
105     #define GT_SMIR_BUSY_FLAG 0x10000000 /* Busy: 1=op in progress */
106    
107     /* PCR: Port Configuration Register */
108     #define GT_PCR_PM 0x00000001 /* Promiscuous mode */
109     #define GT_PCR_RBM 0x00000002 /* Reject broadcast mode */
110     #define GT_PCR_PBF 0x00000004 /* Pass bad frames */
111     #define GT_PCR_EN 0x00000080 /* Port Enabled/Disabled */
112     #define GT_PCR_LPBK 0x00000300 /* Loopback mode */
113     #define GT_PCR_FC 0x00000400 /* Force collision */
114     #define GT_PCR_HS 0x00001000 /* Hash size */
115     #define GT_PCR_HM 0x00002000 /* Hash mode */
116     #define GT_PCR_HDM 0x00004000 /* Hash default mode */
117     #define GT_PCR_HD 0x00008000 /* Duplex Mode */
118     #define GT_PCR_ISL 0x70000000 /* ISL enabled (0x06) */
119     #define GT_PCR_ACCS 0x80000000 /* Accelerate Slot Time */
120    
121     /* PCXR: Port Configuration Extend Register */
122     #define GT_PCXR_IGMP 0x00000001 /* IGMP packet capture */
123     #define GT_PCXR_SPAN 0x00000002 /* BPDU packet capture */
124     #define GT_PCXR_PAR 0x00000004 /* Partition Enable */
125     #define GT_PCXR_PRIOTX 0x00000038 /* Priority weight for TX */
126     #define GT_PCXR_PRIORX 0x000000C0 /* Priority weight for RX */
127     #define GT_PCXR_PRIORX_OV 0x00000100 /* Prio RX override */
128     #define GT_PCXR_DPLX_EN 0x00000200 /* Autoneg for Duplex */
129     #define GT_PCXR_FCTL_EN 0x00000400 /* Autoneg for 802.3x */
130     #define GT_PCXR_FLP 0x00000800 /* Force Link Pass */
131     #define GT_PCXR_FCTL 0x00001000 /* Flow Control Mode */
132     #define GT_PCXR_MFL 0x0000C000 /* Maximum Frame Length */
133     #define GT_PCXR_MIB_CLR_MODE 0x00010000 /* MIB counters clear mode */
134     #define GT_PCXR_SPEED 0x00040000 /* Port Speed */
135     #define GT_PCXR_SPEED_EN 0x00080000 /* Autoneg for Speed */
136     #define GT_PCXR_RMII_EN 0x00100000 /* RMII Enable */
137     #define GT_PCXR_DSCP_EN 0x00200000 /* DSCP decoding enable */
138    
139     /* PCMR: Port Command Register */
140     #define GT_PCMR_FJ 0x00008000 /* Force Jam / Flow Control */
141    
142     /* PSR: Port Status Register */
143     #define GT_PSR_SPEED 0x00000001 /* Speed: 10/100 Mb/s (100=>1)*/
144     #define GT_PSR_DUPLEX 0x00000002 /* Duplex (1: full) */
145     #define GT_PSR_FCTL 0x00000004 /* Flow Control Mode */
146     #define GT_PSR_LINK 0x00000008 /* Link Up/Down */
147     #define GT_PSR_PAUSE 0x00000010 /* Flow-control disabled state */
148     #define GT_PSR_TXLOW 0x00000020 /* TX Low priority status */
149     #define GT_PSR_TXHIGH 0x00000040 /* TX High priority status */
150     #define GT_PSR_TXINP 0x00000080 /* TX in Progress */
151    
152     /* SDCR: SDMA Configuration Register */
153     #define GT_SDCR_RC 0x0000003c /* Retransmit count */
154     #define GT_SDCR_BLMR 0x00000040 /* Big/Little Endian RX mode */
155     #define GT_SDCR_BLMT 0x00000080 /* Big/Litlle Endian TX mode */
156     #define GT_SDCR_POVR 0x00000100 /* PCI override */
157     #define GT_SDCR_RIFB 0x00000200 /* RX IRQ on frame boundary */
158     #define GT_SDCR_BSZ 0x00003000 /* Burst size */
159    
160     /* SDCMR: SDMA Command Register */
161     #define GT_SDCMR_ERD 0x00000080 /* Enable RX DMA */
162     #define GT_SDCMR_AR 0x00008000 /* Abort Receive */
163     #define GT_SDCMR_STDH 0x00010000 /* Stop TX High */
164     #define GT_SDCMR_STDL 0x00020000 /* Stop TX Low */
165     #define GT_SDCMR_TXDH 0x00800000 /* Start TX High */
166     #define GT_SDCMR_TXDL 0x01000000 /* Start TX Low */
167     #define GT_SDCMR_AT 0x80000000 /* Abort Transmit */
168    
169     /* ICR: Interrupt Cause Register */
170     #define GT_ICR_RXBUF 0x00000001 /* RX Buffer returned to host */
171     #define GT_ICR_TXBUFH 0x00000004 /* TX Buffer High */
172     #define GT_ICR_TXBUFL 0x00000008 /* TX Buffer Low */
173     #define GT_ICR_TXENDH 0x00000040 /* TX End High */
174     #define GT_ICR_TXENDL 0x00000080 /* TX End Low */
175     #define GT_ICR_RXERR 0x00000100 /* RX Error */
176     #define GT_ICR_TXERRH 0x00000400 /* TX Error High */
177     #define GT_ICR_TXERRL 0x00000800 /* TX Error Low */
178     #define GT_ICR_RXOVR 0x00001000 /* RX Overrun */
179     #define GT_ICR_TXUDR 0x00002000 /* TX Underrun */
180     #define GT_ICR_RXBUFQ0 0x00010000 /* RX Buffer in Prio Queue 0 */
181     #define GT_ICR_RXBUFQ1 0x00020000 /* RX Buffer in Prio Queue 1 */
182     #define GT_ICR_RXBUFQ2 0x00040000 /* RX Buffer in Prio Queue 2 */
183     #define GT_ICR_RXBUFQ3 0x00080000 /* RX Buffer in Prio Queue 3 */
184     #define GT_ICR_RXERRQ0 0x00010000 /* RX Error in Prio Queue 0 */
185     #define GT_ICR_RXERRQ1 0x00020000 /* RX Error in Prio Queue 1 */
186     #define GT_ICR_RXERRQ2 0x00040000 /* RX Error in Prio Queue 2 */
187     #define GT_ICR_RXERRQ3 0x00080000 /* RX Error in Prio Queue 3 */
188     #define GT_ICR_MII_STC 0x10000000 /* MII PHY Status Change */
189     #define GT_ICR_SMI_DONE 0x20000000 /* SMI Command Done */
190     #define GT_ICR_INT_SUM 0x80000000 /* Ethernet Interrupt Summary */
191     #define GT_ICR_MASK 0x7FFFFFFF
192    
193     /* Ethernet hash entry */
194     #define GT_HTE_VALID 0x00000001 /* Valid entry */
195     #define GT_HTE_SKIP 0x00000002 /* Skip entry in a chain */
196     #define GT_HTE_RD 0x00000004 /* 0: Discard, 1: Receive */
197     #define GT_HTE_ADDR_MASK 0x7fffffffffff8ULL
198    
199     #define GT_HTE_HOPNUM 12 /* Hash Table Hop Number */
200    
201     enum {
202     GT_HTLOOKUP_MISS,
203     GT_HTLOOKUP_MATCH,
204     GT_HTLOOKUP_HOP_EXCEEDED,
205     };
206    
207     /* TX Descriptor */
208     #define GT_TXDESC_OWN 0x80000000 /* Ownership */
209     #define GT_TXDESC_AM 0x40000000 /* Auto-mode */
210     #define GT_TXDESC_EI 0x00800000 /* Enable Interrupt */
211     #define GT_TXDESC_GC 0x00400000 /* Generate CRC */
212     #define GT_TXDESC_P 0x00040000 /* Padding */
213     #define GT_TXDESC_F 0x00020000 /* First buffer of packet */
214     #define GT_TXDESC_L 0x00010000 /* Last buffer of packet */
215     #define GT_TXDESC_ES 0x00008000 /* Error Summary */
216     #define GT_TXDESC_RC 0x00003c00 /* Retransmit Count */
217     #define GT_TXDESC_COL 0x00000200 /* Collision */
218     #define GT_TXDESC_RL 0x00000100 /* Retransmit Limit Error */
219     #define GT_TXDESC_UR 0x00000040 /* Underrun Error */
220     #define GT_TXDESC_LC 0x00000020 /* Late Collision Error */
221    
222     #define GT_TXDESC_BC_MASK 0xFFFF0000 /* Number of bytes to transmit */
223     #define GT_TXDESC_BC_SHIFT 16
224    
225     /* RX Descriptor */
226     #define GT_RXDESC_OWN 0x80000000 /* Ownership */
227     #define GT_RXDESC_AM 0x40000000 /* Auto-mode */
228     #define GT_RXDESC_EI 0x00800000 /* Enable Interrupt */
229     #define GT_RXDESC_F 0x00020000 /* First buffer of packet */
230     #define GT_RXDESC_L 0x00010000 /* Last buffer of packet */
231     #define GT_RXDESC_ES 0x00008000 /* Error Summary */
232     #define GT_RXDESC_IGMP 0x00004000 /* IGMP packet detected */
233     #define GT_RXDESC_HE 0x00002000 /* Hash Table Expired */
234     #define GT_RXDESC_M 0x00001000 /* Missed Frame */
235     #define GT_RXDESC_FT 0x00000800 /* Frame Type (802.3/Ethernet) */
236     #define GT_RXDESC_SF 0x00000100 /* Short Frame Error */
237     #define GT_RXDESC_MFL 0x00000080 /* Maximum Frame Length Error */
238     #define GT_RXDESC_OR 0x00000040 /* Overrun Error */
239     #define GT_RXDESC_COL 0x00000010 /* Collision */
240     #define GT_RXDESC_CE 0x00000001 /* CRC Error */
241    
242     #define GT_RXDESC_BC_MASK 0x0000FFFF /* Byte count */
243     #define GT_RXDESC_BS_MASK 0xFFFF0000 /* Buffer size */
244     #define GT_RXDESC_BS_SHIFT 16
245    
246     /* RX/TX descriptor */
247     struct eth_desc {
248     m_uint32_t buf_size;
249     m_uint32_t cmd_stat;
250     m_uint32_t next_ptr;
251     m_uint32_t buf_ptr;
252     };
253    
254     /* Galileo Ethernet port */
255     struct eth_port {
256     netio_desc_t *nio;
257    
258     /* First and Current RX descriptors (4 queues) */
259     m_uint32_t rx_start[4],rx_current[4];
260    
261     /* Current TX descriptors (2 queues) */
262     m_uint32_t tx_current[2];
263    
264     /* Port registers */
265     m_uint32_t pcr,pcxr,pcmr,psr;
266    
267     /* SDMA registers */
268     m_uint32_t sdcr,sdcmr;
269    
270     /* Interrupt register */
271     m_uint32_t icr,imr;
272    
273     /* Hash Table pointer */
274     m_uint32_t ht_addr;
275    
276     /* Ethernet MIB counters */
277     m_uint32_t rx_bytes,tx_bytes,rx_frames,tx_frames;
278     };
279    
280     /* Galileo GT64xxx/GT96xxx system controller */
281     struct gt_data {
282     char *name;
283     vm_obj_t vm_obj;
284     struct vdevice dev;
285     struct pci_device *pci_dev;
286     vm_instance_t *vm;
287    
288     struct pci_bus *bus[2];
289     struct dma_channel dma[GT_DMA_CHANNELS];
290     m_uint32_t int_cause_reg;
291     m_uint32_t int_mask_reg;
292    
293     /* Ethernet ports (GT-96100) */
294     u_int eth_irq;
295     ptask_id_t eth_tx_tid;
296     struct eth_port eth_ports[GT_ETH_PORTS];
297     m_uint32_t smi_reg;
298     m_uint16_t mii_regs[32][32];
299     };
300    
301     /* Log a GT message */
302     #define GT_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
303    
304     /* Update the interrupt status */
305     static void gt_update_irq_status(struct gt_data *gt_data)
306     {
307     if (gt_data->pci_dev) {
308     if (gt_data->int_cause_reg & gt_data->int_mask_reg)
309     pci_dev_trigger_irq(gt_data->vm,gt_data->pci_dev);
310     else
311     pci_dev_clear_irq(gt_data->vm,gt_data->pci_dev);
312     }
313     }
314    
315     /* Fetch a DMA record (chained mode) */
316     static void gt_dma_fetch_rec(vm_instance_t *vm,struct dma_channel *channel)
317     {
318     m_uint32_t ptr;
319    
320     #if DEBUG_DMA
321     vm_log(vm,"GT_DMA","fetching record at address 0x%x\n",channel->nrptr);
322     #endif
323    
324     /* fetch the record from RAM */
325     ptr = channel->nrptr;
326     channel->byte_count = swap32(physmem_copy_u32_from_vm(vm,ptr));
327     channel->src_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x04));
328     channel->dst_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x08));
329     channel->nrptr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x0c));
330    
331     /* clear the "fetch next record bit" */
332     channel->ctrl &= ~GT_DMA_FETCH_NEXT;
333     }
334    
335     /* Handle control register of a DMA channel */
336     static void gt_dma_handle_ctrl(struct gt_data *gt_data,int chan_id)
337     {
338     struct dma_channel *channel = &gt_data->dma[chan_id];
339     vm_instance_t *vm = gt_data->vm;
340     int done;
341    
342     if (channel->ctrl & GT_DMA_FETCH_NEXT) {
343     if (channel->nrptr == 0) {
344     vm_log(vm,"GT_DMA","trying to load a NULL DMA record...\n");
345     return;
346     }
347    
348     gt_dma_fetch_rec(vm,channel);
349     }
350    
351     if (channel->ctrl & GT_DMA_CHAN_ENABLE)
352     {
353     do {
354     done = TRUE;
355    
356     #if DEBUG_DMA
357     vm_log(vm,"GT_DMA",
358     "starting transfer from 0x%x to 0x%x (size=%u bytes)\n",
359     channel->src_addr,channel->dst_addr,
360     channel->byte_count & 0xFFFF);
361     #endif
362     physmem_dma_transfer(vm,channel->src_addr,channel->dst_addr,
363     channel->byte_count & 0xFFFF);
364    
365     /* chained mode */
366     if (!(channel->ctrl & GT_DMA_CHAIN_MODE)) {
367     if (channel->nrptr) {
368     gt_dma_fetch_rec(vm,channel);
369     done = FALSE;
370     }
371     }
372     }while(!done);
373    
374     #if DEBUG_DMA
375     vm_log(vm,"GT_DMA","finished transfer.\n");
376     #endif
377     /* Trigger DMA interrupt */
378     gt_data->int_cause_reg |= 1 << (4 + chan_id);
379     gt_update_irq_status(gt_data);
380     }
381     }
382    
383     #define DMA_REG(ch,reg_name) \
384     if (op_type == MTS_WRITE) \
385     gt_data->dma[ch].reg_name = swap32(*data); \
386     else \
387     *data = swap32(gt_data->dma[ch].reg_name);
388    
389     /* Handle a DMA channel */
390 dpavlin 7 static int gt_dma_access(cpu_gen_t *cpu,struct vdevice *dev,
391     m_uint32_t offset,u_int op_size,u_int op_type,
392     m_uint64_t *data)
393 dpavlin 4 {
394     struct gt_data *gt_data = dev->priv_data;
395    
396     switch(offset) {
397     /* DMA Source Address */
398     case 0x810: DMA_REG(0,src_addr); return(1);
399     case 0x814: DMA_REG(1,src_addr); return(1);
400     case 0x818: DMA_REG(2,src_addr); return(1);
401     case 0x81c: DMA_REG(3,src_addr); return(1);
402    
403     /* DMA Destination Address */
404     case 0x820: DMA_REG(0,dst_addr); return(1);
405     case 0x824: DMA_REG(1,dst_addr); return(1);
406     case 0x828: DMA_REG(2,dst_addr); return(1);
407     case 0x82c: DMA_REG(3,dst_addr); return(1);
408    
409     /* DMA Next Record Pointer */
410     case 0x830:
411     gt_data->dma[0].cdptr = *data;
412     DMA_REG(0,nrptr);
413     return(1);
414    
415     case 0x834:
416     gt_data->dma[1].cdptr = *data;
417     DMA_REG(1,nrptr);
418     return(1);
419    
420     case 0x838:
421     gt_data->dma[2].cdptr = *data;
422     DMA_REG(2,nrptr);
423     return(1);
424    
425     case 0x83c:
426     gt_data->dma[3].cdptr = *data;
427     DMA_REG(3,nrptr);
428     return(1);
429    
430     /* DMA Channel Control */
431     case 0x840:
432     DMA_REG(0,ctrl);
433     if (op_type == MTS_WRITE)
434     gt_dma_handle_ctrl(gt_data,0);
435     return(1);
436    
437     case 0x844:
438     DMA_REG(1,ctrl);
439     if (op_type == MTS_WRITE)
440     gt_dma_handle_ctrl(gt_data,1);
441     return(1);
442    
443     case 0x848:
444     DMA_REG(2,ctrl);
445     if (op_type == MTS_WRITE)
446     gt_dma_handle_ctrl(gt_data,2);
447     return(1);
448    
449     case 0x84c:
450     DMA_REG(3,ctrl);
451     if (op_type == MTS_WRITE)
452     gt_dma_handle_ctrl(gt_data,3);
453     return(1);
454     }
455    
456     return(0);
457     }
458    
459     /*
460     * dev_gt64010_access()
461     */
462 dpavlin 7 void *dev_gt64010_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
463 dpavlin 4 u_int op_size,u_int op_type,m_uint64_t *data)
464     {
465     struct gt_data *gt_data = dev->priv_data;
466    
467     if (op_type == MTS_READ)
468     *data = 0;
469    
470     if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
471     return NULL;
472    
473     switch(offset) {
474     /* ===== DRAM Settings (completely faked, 128 Mb) ===== */
475     case 0x008: /* ras10_low */
476     if (op_type == MTS_READ)
477     *data = swap32(0x000);
478     break;
479     case 0x010: /* ras10_high */
480     if (op_type == MTS_READ)
481     *data = swap32(0x7F);
482     break;
483     case 0x018: /* ras32_low */
484     if (op_type == MTS_READ)
485     *data = swap32(0x080);
486     break;
487     case 0x020: /* ras32_high */
488     if (op_type == MTS_READ)
489     *data = swap32(0x7F);
490     break;
491     case 0x400: /* ras0_low */
492     if (op_type == MTS_READ)
493     *data = swap32(0x00);
494     break;
495     case 0x404: /* ras0_high */
496     if (op_type == MTS_READ)
497     *data = swap32(0xFF);
498     break;
499     case 0x408: /* ras1_low */
500     if (op_type == MTS_READ)
501     *data = swap32(0x7F);
502     break;
503     case 0x40c: /* ras1_high */
504     if (op_type == MTS_READ)
505     *data = swap32(0x00);
506     break;
507     case 0x410: /* ras2_low */
508     if (op_type == MTS_READ)
509     *data = swap32(0x00);
510     break;
511     case 0x414: /* ras2_high */
512     if (op_type == MTS_READ)
513     *data = swap32(0xFF);
514     break;
515     case 0x418: /* ras3_low */
516     if (op_type == MTS_READ)
517     *data = swap32(0x7F);
518     break;
519     case 0x41c: /* ras3_high */
520     if (op_type == MTS_READ)
521     *data = swap32(0x00);
522     break;
523     case 0xc08: /* pci0_cs10 */
524     if (op_type == MTS_READ)
525     *data = swap32(0xFFF);
526     break;
527     case 0xc0c: /* pci0_cs32 */
528     if (op_type == MTS_READ)
529     *data = swap32(0xFFF);
530     break;
531    
532     case 0xc00: /* pci_cmd */
533     if (op_type == MTS_READ)
534     *data = swap32(0x00008001);
535     break;
536    
537     /* ===== Interrupt Cause Register ===== */
538     case 0xc18:
539     if (op_type == MTS_READ) {
540     *data = swap32(gt_data->int_cause_reg);
541     } else {
542     gt_data->int_cause_reg &= swap32(*data);
543     gt_update_irq_status(gt_data);
544     }
545     break;
546    
547     /* ===== Interrupt Mask Register ===== */
548     case 0xc1c:
549     if (op_type == MTS_READ)
550     *data = swap32(gt_data->int_mask_reg);
551     else {
552     gt_data->int_mask_reg = swap32(*data);
553     gt_update_irq_status(gt_data);
554     }
555     break;
556    
557     /* ===== PCI Configuration ===== */
558     case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
559     pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
560     break;
561    
562     case PCI_BUS_DATA: /* pci data address (0xcfc) */
563     pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
564     break;
565    
566     #if DEBUG_UNKNOWN
567     default:
568     if (op_type == MTS_READ) {
569     cpu_log(cpu,"GT64010","read from addr 0x%x, pc=0x%llx\n",
570 dpavlin 7 offset,cpu_get_pc(cpu));
571 dpavlin 4 } else {
572     cpu_log(cpu,"GT64010","write to addr 0x%x, value=0x%llx, "
573 dpavlin 7 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
574 dpavlin 4 }
575     #endif
576     }
577    
578     return NULL;
579     }
580    
581     /*
582     * dev_gt64120_access()
583     */
584 dpavlin 7 void *dev_gt64120_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
585 dpavlin 4 u_int op_size,u_int op_type,m_uint64_t *data)
586     {
587     struct gt_data *gt_data = dev->priv_data;
588    
589     if (op_type == MTS_READ)
590     *data = 0;
591    
592     if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
593     return NULL;
594    
595     switch(offset) {
596     case 0x008: /* ras10_low */
597     if (op_type == MTS_READ)
598     *data = swap32(0x000);
599     break;
600     case 0x010: /* ras10_high */
601     if (op_type == MTS_READ)
602     *data = swap32(0x7F);
603     break;
604     case 0x018: /* ras32_low */
605     if (op_type == MTS_READ)
606     *data = swap32(0x100);
607     break;
608     case 0x020: /* ras32_high */
609     if (op_type == MTS_READ)
610     *data = swap32(0x7F);
611     break;
612     case 0x400: /* ras0_low */
613     if (op_type == MTS_READ)
614     *data = swap32(0x00);
615     break;
616     case 0x404: /* ras0_high */
617     if (op_type == MTS_READ)
618     *data = swap32(0xFF);
619     break;
620     case 0x408: /* ras1_low */
621     if (op_type == MTS_READ)
622     *data = swap32(0x7F);
623     break;
624     case 0x40c: /* ras1_high */
625     if (op_type == MTS_READ)
626     *data = swap32(0x00);
627     break;
628     case 0x410: /* ras2_low */
629     if (op_type == MTS_READ)
630     *data = swap32(0x00);
631     break;
632     case 0x414: /* ras2_high */
633     if (op_type == MTS_READ)
634     *data = swap32(0xFF);
635     break;
636     case 0x418: /* ras3_low */
637     if (op_type == MTS_READ)
638     *data = swap32(0x7F);
639     break;
640     case 0x41c: /* ras3_high */
641     if (op_type == MTS_READ)
642     *data = swap32(0x00);
643     break;
644     case 0xc08: /* pci0_cs10 */
645     if (op_type == MTS_READ)
646     *data = swap32(0xFFF);
647     break;
648     case 0xc0c: /* pci0_cs32 */
649     if (op_type == MTS_READ)
650     *data = swap32(0xFFF);
651     break;
652    
653     case 0xc00: /* pci_cmd */
654     if (op_type == MTS_READ)
655     *data = swap32(0x00008001);
656     break;
657    
658     /* ===== Interrupt Cause Register ===== */
659     case 0xc18:
660     if (op_type == MTS_READ)
661     *data = swap32(gt_data->int_cause_reg);
662     else {
663     gt_data->int_cause_reg &= swap32(*data);
664     gt_update_irq_status(gt_data);
665     }
666     break;
667    
668     /* ===== Interrupt Mask Register ===== */
669     case 0xc1c:
670     if (op_type == MTS_READ) {
671     *data = swap32(gt_data->int_mask_reg);
672     } else {
673     gt_data->int_mask_reg = swap32(*data);
674     gt_update_irq_status(gt_data);
675     }
676     break;
677    
678     /* ===== PCI Bus 1 ===== */
679     case 0xcf0:
680     pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
681     break;
682    
683     case 0xcf4:
684     pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
685     break;
686    
687     /* ===== PCI Bus 0 ===== */
688     case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
689     pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
690     break;
691    
692     case PCI_BUS_DATA: /* pci data address (0xcfc) */
693     pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
694     break;
695    
696     #if DEBUG_UNKNOWN
697     default:
698     if (op_type == MTS_READ) {
699     cpu_log(cpu,"GT64120","read from addr 0x%x, pc=0x%llx\n",
700 dpavlin 7 offset,cpu_get_pc(cpu));
701 dpavlin 4 } else {
702     cpu_log(cpu,"GT64120","write to addr 0x%x, value=0x%llx, "
703 dpavlin 7 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
704 dpavlin 4 }
705     #endif
706     }
707    
708     return NULL;
709     }
710    
711     /* Update the Ethernet port interrupt status */
712     static void gt_eth_update_int_status(struct gt_data *d,struct eth_port *port)
713     {
714     if (port->icr & GT_ICR_MASK)
715     port->icr |= GT_ICR_INT_SUM;
716    
717     if (port->icr & port->imr & GT_ICR_MASK)
718     vm_set_irq(d->vm,d->eth_irq);
719     }
720    
721     /* Read a MII register */
722     static m_uint32_t gt_mii_read(struct gt_data *d)
723     {
724     m_uint8_t port,reg;
725     m_uint32_t res = 0;
726    
727     port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
728     reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
729    
730     #if DEBUG_MII
731     GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: reading.\n",port,reg);
732     #endif
733    
734     if ((port < GT_ETH_PORTS) && (reg < 32)) {
735     res = d->mii_regs[port][reg];
736    
737     switch(reg) {
738     case 0x00:
739     res &= ~0x8200; /* clear reset bit and autoneg restart */
740     break;
741     case 0x01:
742     #if 0
743     if (d->ports[port].nio && bcm5600_mii_port_status(d,port))
744     d->mii_output = 0x782C;
745     else
746     d->mii_output = 0;
747     #endif
748     res = 0x782c;
749     break;
750     case 0x02:
751     res = 0x40;
752     break;
753     case 0x03:
754     res = 0x61d4;
755     break;
756     case 0x04:
757     res = 0x1E1;
758     break;
759     case 0x05:
760     res = 0x41E1;
761     break;
762     default:
763     res = 0;
764     }
765     }
766    
767     /* Mark the data as ready */
768     res |= GT_SMIR_RVALID_FLAG;
769    
770     return(res);
771     }
772    
773     /* Write a MII register */
774     static void gt_mii_write(struct gt_data *d)
775     {
776     m_uint8_t port,reg;
777     m_uint16_t isolation;
778    
779     port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
780     reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
781    
782     if ((port < GT_ETH_PORTS) && (reg < 32))
783     {
784     #if DEBUG_MII
785     GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: writing 0x%4.4x\n",
786     port,reg,d->smi_reg & GT_SMIR_DATA_MASK);
787     #endif
788    
789     /* Check if PHY isolation status is changing */
790     if (reg == 0) {
791     isolation = (d->smi_reg ^ d->mii_regs[port][reg]) & 0x400;
792    
793     if (isolation) {
794     #if DEBUG_MII
795     GT_LOG(d,"MII: port 0x%4.4x: generating IRQ\n",port);
796     #endif
797     d->eth_ports[port].icr |= GT_ICR_MII_STC;
798     gt_eth_update_int_status(d,&d->eth_ports[port]);
799     }
800     }
801    
802     d->mii_regs[port][reg] = d->smi_reg & GT_SMIR_DATA_MASK;
803     }
804     }
805    
806     /* Handle registers of Ethernet ports */
807 dpavlin 7 static int gt_eth_access(cpu_gen_t *cpu,struct vdevice *dev,
808 dpavlin 4 m_uint32_t offset,u_int op_size,u_int op_type,
809     m_uint64_t *data)
810     {
811     struct gt_data *d = dev->priv_data;
812     struct eth_port *port;
813     u_int port_id = 0;
814     u_int queue;
815    
816     if ((offset < 0x80000) || (offset >= 0x90000))
817     return(FALSE);
818    
819     if (op_type == MTS_WRITE)
820     *data = swap32(*data);
821    
822     /* Detemine the Ethernet port */
823     if ((offset >= 0x84800) && (offset < 0x88800))
824     port_id = 0;
825    
826     if ((offset >= 0x88800) && (offset < 0x8c800))
827     port_id = 1;
828    
829     port = &d->eth_ports[port_id];
830    
831     switch(offset) {
832     /* SMI register */
833     case 0x80810:
834     if (op_type == MTS_WRITE) {
835     d->smi_reg = *data;
836    
837     if (!(d->smi_reg & GT_SMIR_OPCODE_READ))
838     gt_mii_write(d);
839     } else {
840     *data = 0;
841    
842     if (d->smi_reg & GT_SMIR_OPCODE_READ)
843     *data = gt_mii_read(d);
844     }
845     break;
846    
847     /* ICR: Interrupt Cause Register */
848     case 0x84850:
849     case 0x88850:
850     if (op_type == MTS_READ)
851     *data = port->icr;
852     else
853     port->icr &= *data;
854     break;
855    
856     /* IMR: Interrupt Mask Register */
857     case 0x84858:
858     case 0x88858:
859     if (op_type == MTS_READ)
860     *data = port->imr;
861     else
862     port->imr = *data;
863     break;
864    
865     /* PCR: Port Configuration Register */
866     case 0x84800:
867     case 0x88800:
868     if (op_type == MTS_READ)
869     *data = port->pcr;
870     else
871     port->pcr = *data;
872     break;
873    
874     /* PCXR: Port Configuration Extend Register */
875     case 0x84808:
876     case 0x88808:
877     if (op_type == MTS_READ) {
878     *data = port->pcxr;
879     *data |= GT_PCXR_SPEED;
880     } else
881     port->pcxr = *data;
882     break;
883    
884     /* PCMR: Port Command Register */
885     case 0x84810:
886     case 0x88810:
887     if (op_type == MTS_READ)
888     *data = port->pcmr;
889     else
890     port->pcmr = *data;
891     break;
892    
893     /* Port Status Register */
894     case 0x84818:
895     case 0x88818:
896     if (op_type == MTS_READ)
897     *data = 0x0F;
898     break;
899    
900     /* First RX descriptor */
901     case 0x84880:
902     case 0x88880:
903     case 0x84884:
904     case 0x88884:
905     case 0x84888:
906     case 0x88888:
907     case 0x8488C:
908     case 0x8888C:
909     queue = (offset >> 2) & 0x03;
910     if (op_type == MTS_READ)
911     *data = port->rx_start[queue];
912     else
913     port->rx_start[queue] = *data;
914     break;
915    
916     /* Current RX descriptor */
917     case 0x848A0:
918     case 0x888A0:
919     case 0x848A4:
920     case 0x888A4:
921     case 0x848A8:
922     case 0x888A8:
923     case 0x848AC:
924     case 0x888AC:
925     queue = (offset >> 2) & 0x03;
926     if (op_type == MTS_READ)
927     *data = port->rx_current[queue];
928     else
929     port->rx_current[queue] = *data;
930     break;
931    
932     /* Current TX descriptor */
933     case 0x848E0:
934     case 0x888E0:
935     case 0x848E4:
936     case 0x888E4:
937     queue = (offset >> 2) & 0x01;
938     if (op_type == MTS_READ)
939     *data = port->tx_current[queue];
940     else
941     port->tx_current[queue] = *data;
942     break;
943    
944     /* Hash Table Pointer */
945     case 0x84828:
946     case 0x88828:
947     if (op_type == MTS_READ)
948     *data = port->ht_addr;
949     else
950     port->ht_addr = *data;
951     break;
952    
953     /* SDCR: SDMA Configuration Register */
954     case 0x84840:
955     case 0x88840:
956     if (op_type == MTS_READ)
957     *data = port->sdcr;
958     else
959     port->sdcr = *data;
960     break;
961    
962     /* SDCMR: SDMA Command Register */
963     case 0x84848:
964     case 0x88848:
965     if (op_type == MTS_WRITE) {
966     /* Start RX DMA */
967     if (*data & GT_SDCMR_ERD) {
968     port->sdcmr |= GT_SDCMR_ERD;
969     port->sdcmr &= ~GT_SDCMR_AR;
970     }
971    
972     /* Abort RX DMA */
973     if (*data & GT_SDCMR_AR)
974     port->sdcmr &= ~GT_SDCMR_ERD;
975    
976     /* Start TX High */
977     if (*data & GT_SDCMR_TXDH) {
978     port->sdcmr |= GT_SDCMR_TXDH;
979     port->sdcmr &= ~GT_SDCMR_STDH;
980     }
981    
982     /* Start TX Low */
983     if (*data & GT_SDCMR_TXDL) {
984     port->sdcmr |= GT_SDCMR_TXDL;
985     port->sdcmr &= ~GT_SDCMR_STDL;
986     }
987    
988     /* Stop TX High */
989     if (*data & GT_SDCMR_STDH) {
990     port->sdcmr &= ~GT_SDCMR_TXDH;
991     port->sdcmr |= GT_SDCMR_STDH;
992     }
993    
994     /* Stop TX Low */
995     if (*data & GT_SDCMR_STDL) {
996     port->sdcmr &= ~GT_SDCMR_TXDL;
997     port->sdcmr |= GT_SDCMR_STDL;
998     }
999     } else {
1000     *data = port->sdcmr;
1001     }
1002     break;
1003    
1004     case 0x85800:
1005     case 0x89800:
1006     if (op_type == MTS_READ) {
1007     *data = port->rx_bytes;
1008     port->rx_bytes = 0;
1009     }
1010     break;
1011    
1012     case 0x85804:
1013     case 0x89804:
1014     if (op_type == MTS_READ) {
1015     *data = port->tx_bytes;
1016     port->tx_bytes = 0;
1017     }
1018     break;
1019    
1020     case 0x85808:
1021     case 0x89808:
1022     if (op_type == MTS_READ) {
1023     *data = port->rx_frames;
1024     port->rx_frames = 0;
1025     }
1026     break;
1027    
1028     case 0x8580C:
1029     case 0x8980C:
1030     if (op_type == MTS_READ) {
1031     *data = port->tx_frames;
1032     port->tx_frames = 0;
1033     }
1034     break;
1035    
1036     #if DEBUG_UNKNOWN
1037     default:
1038     if (op_type == MTS_READ) {
1039     cpu_log(cpu,"GT96100/ETH",
1040     "read access to unknown register 0x%x, pc=0x%llx\n",
1041 dpavlin 7 offset,cpu_get_pc(cpu));
1042 dpavlin 4 } else {
1043     cpu_log(cpu,"GT96100/ETH",
1044     "write access to unknown register 0x%x, value=0x%llx, "
1045 dpavlin 7 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
1046 dpavlin 4 }
1047     #endif
1048     }
1049    
1050     if (op_type == MTS_READ)
1051     *data = swap32(*data);
1052    
1053     return(TRUE);
1054     }
1055    
1056     /*
1057     * dev_gt96100_access()
1058     */
1059 dpavlin 7 void *dev_gt96100_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
1060 dpavlin 4 u_int op_size,u_int op_type,m_uint64_t *data)
1061     {
1062     struct gt_data *gt_data = dev->priv_data;
1063    
1064     if (op_type == MTS_READ)
1065     *data = 0;
1066    
1067     if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
1068     return NULL;
1069    
1070     if (gt_eth_access(cpu,dev,offset,op_size,op_type,data) != 0)
1071     return NULL;
1072    
1073     switch(offset) {
1074     /* Watchdog configuration register */
1075     case 0x101a80:
1076     break;
1077    
1078     /* Watchdog value register */
1079     case 0x101a84:
1080     break;
1081    
1082     case 0x008: /* ras10_low */
1083     if (op_type == MTS_READ)
1084     *data = swap32(0x000);
1085     break;
1086     case 0x010: /* ras10_high */
1087     if (op_type == MTS_READ)
1088     *data = swap32(0x7F);
1089     break;
1090     case 0x018: /* ras32_low */
1091     if (op_type == MTS_READ)
1092     *data = swap32(0x100);
1093     break;
1094     case 0x020: /* ras32_high */
1095     if (op_type == MTS_READ)
1096     *data = swap32(0x7F);
1097     break;
1098     case 0x400: /* ras0_low */
1099     if (op_type == MTS_READ)
1100     *data = swap32(0x00);
1101     break;
1102     case 0x404: /* ras0_high */
1103     if (op_type == MTS_READ)
1104     *data = swap32(0xFF);
1105     break;
1106     case 0x408: /* ras1_low */
1107     if (op_type == MTS_READ)
1108     *data = swap32(0x7F);
1109     break;
1110     case 0x40c: /* ras1_high */
1111     if (op_type == MTS_READ)
1112     *data = swap32(0x00);
1113     break;
1114     case 0x410: /* ras2_low */
1115     if (op_type == MTS_READ)
1116     *data = swap32(0x00);
1117     break;
1118     case 0x414: /* ras2_high */
1119     if (op_type == MTS_READ)
1120     *data = swap32(0xFF);
1121     break;
1122     case 0x418: /* ras3_low */
1123     if (op_type == MTS_READ)
1124     *data = swap32(0x7F);
1125     break;
1126     case 0x41c: /* ras3_high */
1127     if (op_type == MTS_READ)
1128     *data = swap32(0x00);
1129     break;
1130     case 0xc08: /* pci0_cs10 */
1131     if (op_type == MTS_READ)
1132     *data = swap32(0xFFF);
1133     break;
1134     case 0xc0c: /* pci0_cs32 */
1135     if (op_type == MTS_READ)
1136     *data = swap32(0xFFF);
1137     break;
1138    
1139     case 0xc00: /* pci_cmd */
1140     if (op_type == MTS_READ)
1141     *data = swap32(0x00008001);
1142     break;
1143    
1144     /* ===== Interrupt Main Cause Register ===== */
1145     case 0xc18:
1146     if (op_type == MTS_READ) {
1147     *data = gt_data->int_cause_reg;
1148    
1149     /* TODO: signal Eth0/Eth1 */
1150     //*data |= (1 << 30) | (1 << 31) | 1;
1151    
1152     *data = swap32(*data);
1153     } else {
1154     gt_data->int_cause_reg &= swap32(*data);
1155     gt_update_irq_status(gt_data);
1156     }
1157     break;
1158    
1159     /* ===== Interrupt Mask Register ===== */
1160     case 0xc1c:
1161     if (op_type == MTS_READ) {
1162     *data = swap32(gt_data->int_mask_reg);
1163     } else {
1164     gt_data->int_mask_reg = swap32(*data);
1165     gt_update_irq_status(gt_data);
1166     }
1167     break;
1168    
1169     /* ===== Interrupt High Cause Register ===== */
1170     case 0xc98:
1171     if (op_type == MTS_READ) {
1172     *data = 0;
1173    
1174     /* interrupt on ethernet port 0 ? */
1175     if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM)
1176     *data |= GT_IHCR_ETH0_SUM;
1177    
1178     /* interrupt on ethernet port 1 ? */
1179     if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM)
1180     *data |= GT_IHCR_ETH1_SUM;
1181    
1182     *data = swap32(*data);
1183     }
1184     break;
1185    
1186     /* Serial Cause Register */
1187     case 0x103a00:
1188     if (op_type == MTS_READ) {
1189     *data = 0;
1190    
1191     /* interrupt on ethernet port 0 ? */
1192     if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM)
1193     *data |= GT_SCR_ETH0_SUM;
1194    
1195     /* interrupt on ethernet port 1 ? */
1196     if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM)
1197     *data |= GT_SCR_ETH1_SUM;
1198    
1199     *data = swap32(*data);
1200     }
1201     break;
1202    
1203     /* ===== PCI Bus 1 ===== */
1204     case 0xcf0:
1205     pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
1206     break;
1207    
1208     case 0xcf4:
1209     pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
1210     break;
1211    
1212     /* ===== PCI Bus 0 ===== */
1213     case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
1214     pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
1215     break;
1216    
1217     case PCI_BUS_DATA: /* pci data address (0xcfc) */
1218     pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
1219     break;
1220    
1221     #if DEBUG_UNKNOWN
1222     default:
1223     if (op_type == MTS_READ) {
1224     cpu_log(cpu,"GT96100","read from addr 0x%x, pc=0x%llx\n",
1225 dpavlin 7 offset,cpu_get_pc(cpu));
1226 dpavlin 4 } else {
1227     cpu_log(cpu,"GT96100","write to addr 0x%x, value=0x%llx, "
1228 dpavlin 7 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
1229 dpavlin 4 }
1230     #endif
1231     }
1232    
1233     return NULL;
1234     }
1235    
1236     /* Read an Ethernet descriptor */
1237     static void gt_eth_desc_read(struct gt_data *d,m_uint32_t addr,
1238     struct eth_desc *desc)
1239     {
1240     physmem_copy_from_vm(d->vm,desc,addr,sizeof(struct eth_desc));
1241    
1242     /* byte-swapping */
1243     desc->cmd_stat = vmtoh32(desc->cmd_stat);
1244     desc->buf_size = vmtoh32(desc->buf_size);
1245     desc->next_ptr = vmtoh32(desc->next_ptr);
1246     desc->buf_ptr = vmtoh32(desc->buf_ptr);
1247     }
1248    
1249     /* Write an Ethernet descriptor */
1250     static void gt_eth_desc_write(struct gt_data *d,m_uint32_t addr,
1251     struct eth_desc *desc)
1252     {
1253     struct eth_desc tmp;
1254    
1255     /* byte-swapping */
1256     tmp.cmd_stat = vmtoh32(desc->cmd_stat);
1257     tmp.buf_size = vmtoh32(desc->buf_size);
1258     tmp.next_ptr = vmtoh32(desc->next_ptr);
1259     tmp.buf_ptr = vmtoh32(desc->buf_ptr);
1260    
1261     physmem_copy_to_vm(d->vm,&tmp,addr,sizeof(struct eth_desc));
1262     }
1263    
1264     /* Handle a TX queue (single packet) */
1265     static int gt_eth_handle_txqueue(struct gt_data *d,struct eth_port *port,
1266     int queue)
1267     {
1268     u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr;
1269     struct eth_desc txd0,ctxd,*ptxd;
1270     m_uint32_t tx_start,tx_current;
1271     m_uint32_t len,tot_len;
1272     int abort = FALSE;
1273    
1274     /* Check if this TX queue is active */
1275     if ((queue == 0) && (port->sdcmr & GT_SDCMR_STDL))
1276     return(FALSE);
1277    
1278     if ((queue == 1) && (port->sdcmr & GT_SDCMR_STDH))
1279     return(FALSE);
1280    
1281     /* Copy the current txring descriptor */
1282     tx_start = tx_current = port->tx_current[queue];
1283    
1284     if (!tx_start)
1285     goto done;
1286    
1287     ptxd = &txd0;
1288     gt_eth_desc_read(d,tx_start,ptxd);
1289    
1290     /* If we don't own the first descriptor, we cannot transmit */
1291     if (!(txd0.cmd_stat & GT_TXDESC_OWN))
1292     goto done;
1293    
1294     /* Empty packet for now */
1295     pkt_ptr = pkt;
1296     tot_len = 0;
1297    
1298     for(;;) {
1299     #if DEBUG_ETH_TX
1300     GT_LOG(d,"gt_eth_handle_txqueue: loop: "
1301     "cmd_stat=0x%x, buf_size=0x%x, next_ptr=0x%x, buf_ptr=0x%x\n",
1302     ptxd->cmd_stat,ptxd->buf_size,ptxd->next_ptr,ptxd->buf_ptr);
1303     #endif
1304    
1305     if (!(ptxd->cmd_stat & GT_TXDESC_OWN)) {
1306     GT_LOG(d,"gt_eth_handle_txqueue: descriptor not owned!\n");
1307     abort = TRUE;
1308     break;
1309     }
1310    
1311     /* Copy packet data to the buffer */
1312     len = (ptxd->buf_size & GT_TXDESC_BC_MASK) >> GT_TXDESC_BC_SHIFT;
1313    
1314     physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->buf_ptr,len);
1315     pkt_ptr += len;
1316     tot_len += len;
1317    
1318     /* Clear the OWN bit if this is not the first descriptor */
1319     if (!(ptxd->cmd_stat & GT_TXDESC_F)) {
1320     ptxd->cmd_stat &= ~GT_TXDESC_OWN;
1321     physmem_copy_u32_to_vm(d->vm,tx_current,ptxd->cmd_stat);
1322     }
1323    
1324     tx_current = ptxd->next_ptr;
1325    
1326     /* Last descriptor or no more desc available ? */
1327     if (ptxd->cmd_stat & GT_TXDESC_L)
1328     break;
1329    
1330     if (!tx_current) {
1331     abort = TRUE;
1332     break;
1333     }
1334    
1335     /* Fetch the next descriptor */
1336     gt_eth_desc_read(d,tx_current,&ctxd);
1337     ptxd = &ctxd;
1338     }
1339    
1340     if ((tot_len != 0) && !abort) {
1341     #if DEBUG_ETH_TX
1342     GT_LOG(d,"Ethernet: sending packet of %u bytes\n",tot_len);
1343     mem_dump(log_file,pkt,tot_len);
1344     #endif
1345     /* send it on wire */
1346     netio_send(port->nio,pkt,tot_len);
1347    
1348     /* Update MIB counters */
1349     port->tx_bytes += tot_len;
1350     port->tx_frames++;
1351     }
1352    
1353     /* Clear the OWN flag of the first descriptor */
1354     txd0.cmd_stat &= ~GT_TXDESC_OWN;
1355     physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.cmd_stat);
1356    
1357     port->tx_current[queue] = tx_current;
1358    
1359     /* Notify host about transmitted packet */
1360     if (queue == 0)
1361     port->icr |= GT_ICR_TXBUFL;
1362     else
1363     port->icr |= GT_ICR_TXBUFH;
1364    
1365     done:
1366     if (abort) {
1367     /* TX underrun */
1368     port->icr |= GT_ICR_TXUDR;
1369    
1370     if (queue == 0)
1371     port->icr |= GT_ICR_TXERRL;
1372     else
1373     port->icr |= GT_ICR_TXERRH;
1374     } else {
1375     /* End of queue has been reached */
1376     if (!tx_current) {
1377     if (queue == 0)
1378     port->icr |= GT_ICR_TXENDL;
1379     else
1380     port->icr |= GT_ICR_TXENDH;
1381     }
1382     }
1383    
1384     /* Update the interrupt status */
1385     gt_eth_update_int_status(d,port);
1386     return(TRUE);
1387     }
1388    
1389     /* Handle TX ring of the specified port */
1390     static void gt_eth_handle_port_txqueues(struct gt_data *d,u_int port)
1391     {
1392     gt_eth_handle_txqueue(d,&d->eth_ports[port],0); /* TX Low */
1393     gt_eth_handle_txqueue(d,&d->eth_ports[port],1); /* TX High */
1394     }
1395    
1396     /* Handle all TX rings of all Ethernet ports */
1397     static int gt_eth_handle_txqueues(struct gt_data *d)
1398     {
1399     int i;
1400    
1401     for(i=0;i<GT_ETH_PORTS;i++)
1402     gt_eth_handle_port_txqueues(d,i);
1403    
1404     return(TRUE);
1405     }
1406    
1407     /* Inverse a nibble */
1408     static const int inv_nibble[16] = {
1409     0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1410     0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF
1411     };
1412    
1413     /* Inverse a 9-bit value */
1414     static inline u_int gt_hash_inv_9bit(u_int val)
1415     {
1416     u_int res;
1417    
1418     res = inv_nibble[val & 0x0F] << 5;
1419     res |= inv_nibble[(val & 0xF0) >> 4] << 1;
1420     res |= (val & 0x100) >> 8;
1421     return(res);
1422     }
1423    
1424     /*
1425     * Compute hash value for Ethernet address filtering.
1426     * Two modes are available (p.271 of the GT96100 doc).
1427     */
1428     static u_int gt_eth_hash_value(n_eth_addr_t *addr,int mode)
1429     {
1430     m_uint64_t tmp;
1431     u_int res;
1432     int i;
1433    
1434     /* Swap the nibbles */
1435     for(i=0,tmp=0;i<N_ETH_ALEN;i++) {
1436     tmp <<= 8;
1437     tmp |= (inv_nibble[addr->eth_addr_byte[i] & 0x0F]) << 4;
1438     tmp |= inv_nibble[(addr->eth_addr_byte[i] & 0xF0) >> 4];
1439     }
1440    
1441     if (mode == 0) {
1442     /* Fill bits 0:8 */
1443     res = (tmp & 0x00000003) | ((tmp & 0x00007f00) >> 6);
1444     res ^= (tmp & 0x00ff8000) >> 15;
1445     res ^= (tmp & 0x1ff000000ULL) >> 24;
1446    
1447     /* Fill bits 9:14 */
1448     res |= (tmp & 0xfc) << 7;
1449     } else {
1450     /* Fill bits 0:8 */
1451     res = gt_hash_inv_9bit((tmp & 0x00007fc0) >> 6);
1452     res ^= gt_hash_inv_9bit((tmp & 0x00ff8000) >> 15);
1453     res ^= gt_hash_inv_9bit((tmp & 0x1ff000000ULL) >> 24);
1454    
1455     /* Fill bits 9:14 */
1456     res |= (tmp & 0x3f) << 9;
1457     }
1458    
1459     return(res);
1460     }
1461    
1462     /*
1463     * Walk through the Ethernet hash table.
1464     */
1465     static int gt_eth_hash_lookup(struct gt_data *d,struct eth_port *port,
1466     n_eth_addr_t *addr,m_uint64_t *entry)
1467     {
1468     m_uint64_t eth_val;
1469     m_uint32_t hte_addr;
1470     u_int hash_val;
1471     int i;
1472    
1473     eth_val = (m_uint64_t)addr->eth_addr_byte[0] << 3;
1474     eth_val |= (m_uint64_t)addr->eth_addr_byte[1] << 11;
1475     eth_val |= (m_uint64_t)addr->eth_addr_byte[2] << 19;
1476     eth_val |= (m_uint64_t)addr->eth_addr_byte[3] << 27;
1477     eth_val |= (m_uint64_t)addr->eth_addr_byte[4] << 35;
1478     eth_val |= (m_uint64_t)addr->eth_addr_byte[5] << 43;
1479    
1480     /* Compute hash value for Ethernet address filtering */
1481     hash_val = gt_eth_hash_value(addr,port->pcr & GT_PCR_HM);
1482    
1483     if (port->pcr & GT_PCR_HS) {
1484     /* 1/2K address filtering */
1485     hte_addr = port->ht_addr + ((hash_val & 0x7ff) << 3);
1486     } else {
1487     /* 8K address filtering */
1488     hte_addr = port->ht_addr + (hash_val << 3);
1489     }
1490    
1491     #if DEBUG_ETH_HASH
1492     GT_LOG(d,"Hash Lookup for Ethernet address "
1493     "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x: addr=0x%x\n",
1494     addr->eth_addr_byte[0], addr->eth_addr_byte[1],
1495     addr->eth_addr_byte[2], addr->eth_addr_byte[3],
1496     addr->eth_addr_byte[4], addr->eth_addr_byte[5],
1497     hte_addr);
1498     #endif
1499    
1500     for(i=0;i<GT_HTE_HOPNUM;i++,hte_addr+=8) {
1501     *entry = ((m_uint64_t)physmem_copy_u32_from_vm(d->vm,hte_addr)) << 32;
1502     *entry |= physmem_copy_u32_from_vm(d->vm,hte_addr+4);
1503    
1504     /* Empty entry ? */
1505     if (!(*entry & GT_HTE_VALID))
1506     return(GT_HTLOOKUP_MISS);
1507    
1508     /* Skip flag or different Ethernet address: jump to next entry */
1509     if ((*entry & GT_HTE_SKIP) || ((*entry & GT_HTE_ADDR_MASK) != eth_val))
1510     continue;
1511    
1512     /* We have the good MAC address in this entry */
1513     return(GT_HTLOOKUP_MATCH);
1514     }
1515    
1516     return(GT_HTLOOKUP_HOP_EXCEEDED);
1517     }
1518    
1519     /*
1520     * Check if a packet (given its destination address) must be handled
1521     * at RX path.
1522     *
1523     * Return values:
1524     * - 0: Discard packet ;
1525     * - 1: Receive packet ;
1526     * - 2: Receive packet and set "M" bit in RX descriptor.
1527     *
1528     * The documentation is not clear about the M bit in RX descriptor.
1529     * It is described as "Miss" or "Match" depending on the section.
1530     */
1531     static inline int gt_eth_handle_rx_daddr(struct gt_data *d,
1532     struct eth_port *port,
1533     u_int hash_res,
1534     m_uint64_t hash_entry)
1535     {
1536     /* Hop Number exceeded */
1537     if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
1538     return(1);
1539    
1540     /* Match and hash entry marked as "Receive" */
1541     if ((hash_res == GT_HTLOOKUP_MATCH) && (hash_entry & GT_HTE_RD))
1542     return(2);
1543    
1544     /* Miss but hash table default mode to forward ? */
1545     if ((hash_res == GT_HTLOOKUP_MISS) && (port->pcr & GT_PCR_HDM))
1546     return(2);
1547    
1548     /* Promiscous Mode */
1549     if (port->pcr & GT_PCR_PM)
1550     return(1);
1551    
1552     /* Drop packet for other cases */
1553     return(0);
1554     }
1555    
1556     /* Put a packet in buffer of a descriptor */
1557     static void gt_eth_rxdesc_put_pkt(struct gt_data *d,struct eth_desc *rxd,
1558     u_char **pkt,ssize_t *pkt_len)
1559     {
1560     ssize_t len,cp_len;
1561    
1562     len = (rxd->buf_size & GT_RXDESC_BS_MASK) >> GT_RXDESC_BS_SHIFT;
1563    
1564     /* compute the data length to copy */
1565     cp_len = m_min(len,*pkt_len);
1566    
1567     /* copy packet data to the VM physical RAM */
1568     physmem_copy_to_vm(d->vm,*pkt,rxd->buf_ptr,cp_len);
1569    
1570     /* set the byte count in descriptor */
1571     rxd->buf_size |= cp_len;
1572    
1573     *pkt += cp_len;
1574     *pkt_len -= cp_len;
1575     }
1576    
1577     /* Put a packet in the specified RX queue */
1578     static int gt_eth_handle_rxqueue(struct gt_data *d,u_int port_id,u_int queue,
1579     u_char *pkt,ssize_t pkt_len)
1580     {
1581     struct eth_port *port = &d->eth_ports[port_id];
1582     m_uint32_t rx_start,rx_current;
1583     struct eth_desc rxd0,rxdn,*rxdc;
1584     ssize_t tot_len = pkt_len;
1585     u_char *pkt_ptr = pkt;
1586     n_eth_dot1q_hdr_t *hdr;
1587     m_uint64_t hash_entry;
1588     int i,hash_res,addr_action;
1589    
1590     /* Truncate the packet if it is too big */
1591     pkt_len = m_min(pkt_len,GT_MAX_PKT_SIZE);
1592    
1593     /* Copy the first RX descriptor */
1594     if (!(rx_start = rx_current = port->rx_start[queue]))
1595     goto dma_error;
1596    
1597     /* Analyze the Ethernet header */
1598     hdr = (n_eth_dot1q_hdr_t *)pkt;
1599    
1600     /* Hash table lookup for address filtering */
1601     hash_res = gt_eth_hash_lookup(d,port,&hdr->daddr,&hash_entry);
1602    
1603     #if DEBUG_ETH_HASH
1604     GT_LOG(d,"Hash result: %d, hash_entry=0x%llx\n",hash_res,hash_entry);
1605     #endif
1606    
1607     if (!(addr_action = gt_eth_handle_rx_daddr(d,port,hash_res,hash_entry)))
1608     return(FALSE);
1609    
1610     /* Load the first RX descriptor */
1611     gt_eth_desc_read(d,rx_start,&rxd0);
1612    
1613     #if DEBUG_ETH_RX
1614     GT_LOG(d,"port %u/queue %u: reading desc at 0x%8.8x "
1615     "[buf_size=0x%8.8x,cmd_stat=0x%8.8x,"
1616     "next_ptr=0x%8.8x,buf_ptr=0x%8.8x]\n",
1617     port_id,queue,rx_start,
1618     rxd0.buf_size,rxd0.cmd_stat,rxd0.next_ptr,rxd0.buf_ptr);
1619     #endif
1620    
1621     for(i=0,rxdc=&rxd0;tot_len>0;i++)
1622     {
1623     /* We must own the descriptor */
1624     if (!(rxdc->cmd_stat & GT_RXDESC_OWN))
1625     goto dma_error;
1626    
1627     /* Put data into the descriptor buffer */
1628     gt_eth_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
1629    
1630     /* Clear the OWN bit */
1631     rxdc->cmd_stat &= ~GT_RXDESC_OWN;
1632    
1633     /* We have finished if the complete packet has been stored */
1634     if (tot_len == 0) {
1635     rxdc->cmd_stat |= GT_RXDESC_L;
1636     rxdc->buf_size += 4; /* Add 4 bytes for CRC */
1637     }
1638    
1639     /* Update the descriptor in host memory (but not the 1st) */
1640     if (i != 0)
1641     gt_eth_desc_write(d,rx_current,rxdc);
1642    
1643     /* Get address of the next descriptor */
1644     rx_current = rxdc->next_ptr;
1645    
1646     if (tot_len == 0)
1647     break;
1648    
1649     if (!rx_current)
1650     goto dma_error;
1651    
1652     /* Read the next descriptor from VM physical RAM */
1653     gt_eth_desc_read(d,rx_current,&rxdn);
1654     rxdc = &rxdn;
1655     }
1656    
1657     /* Update the RX pointers */
1658     port->rx_start[queue] = port->rx_current[queue] = rx_current;
1659    
1660     /* Update the first RX descriptor */
1661     rxd0.cmd_stat |= GT_RXDESC_F;
1662    
1663     if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
1664     rxd0.cmd_stat |= GT_RXDESC_HE;
1665    
1666     if (addr_action == 2)
1667     rxd0.cmd_stat |= GT_RXDESC_M;
1668    
1669     if (ntohs(hdr->type) <= N_ETH_MTU) /* 802.3 frame */
1670     rxd0.cmd_stat |= GT_RXDESC_FT;
1671    
1672     gt_eth_desc_write(d,rx_start,&rxd0);
1673    
1674     /* Update MIB counters */
1675     port->rx_bytes += pkt_len;
1676     port->rx_frames++;
1677    
1678     /* Indicate that we have a frame ready */
1679     port->icr |= (GT_ICR_RXBUFQ0 << queue) | GT_ICR_RXBUF;
1680     gt_eth_update_int_status(d,port);
1681     return(TRUE);
1682    
1683     dma_error:
1684     port->icr |= (GT_ICR_RXERRQ0 << queue) | GT_ICR_RXERR;
1685     gt_eth_update_int_status(d,port);
1686     return(FALSE);
1687     }
1688    
1689     /* Handle RX packet for an Ethernet port */
1690     static int gt_eth_handle_rx_pkt(netio_desc_t *nio,
1691     u_char *pkt,ssize_t pkt_len,
1692     struct gt_data *d,void *arg)
1693     {
1694     u_int queue,port_id = (int)arg;
1695     struct eth_port *port;
1696    
1697     port = &d->eth_ports[port_id];
1698    
1699     /* Check if RX DMA is active */
1700     if (!(port->sdcmr & GT_SDCMR_ERD))
1701     return(FALSE);
1702    
1703     queue = 0; /* At this time, only put packet in queue 0 */
1704     gt_eth_handle_rxqueue(d,port_id,queue,pkt,pkt_len);
1705     return(TRUE);
1706     }
1707    
1708     /* Shutdown a GT system controller */
1709     void dev_gt_shutdown(vm_instance_t *vm,struct gt_data *d)
1710     {
1711     if (d != NULL) {
1712     /* Stop the TX ring scanner */
1713     ptask_remove(d->eth_tx_tid);
1714    
1715     /* Remove the device */
1716     dev_remove(vm,&d->dev);
1717    
1718     /* Remove the PCI device */
1719     pci_dev_remove(d->pci_dev);
1720    
1721     /* Free the structure itself */
1722     free(d);
1723     }
1724     }
1725    
1726     /* Create a new GT64010 controller */
1727     int dev_gt64010_init(vm_instance_t *vm,char *name,
1728     m_uint64_t paddr,m_uint32_t len,u_int irq)
1729     {
1730     struct gt_data *d;
1731    
1732     if (!(d = malloc(sizeof(*d)))) {
1733     fprintf(stderr,"gt64010: unable to create device data.\n");
1734     return(-1);
1735     }
1736    
1737     memset(d,0,sizeof(*d));
1738     d->vm = vm;
1739     d->bus[0] = vm->pci_bus[0];
1740    
1741     vm_object_init(&d->vm_obj);
1742     d->vm_obj.name = name;
1743     d->vm_obj.data = d;
1744     d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1745    
1746     dev_init(&d->dev);
1747     d->dev.name = name;
1748     d->dev.priv_data = d;
1749     d->dev.phys_addr = paddr;
1750     d->dev.phys_len = len;
1751     d->dev.handler = dev_gt64010_access;
1752    
1753     /* Add the controller as a PCI device */
1754     if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1755     d->pci_dev = pci_dev_add(d->bus[0],name,
1756     PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64010,
1757     0,0,irq,d,NULL,NULL,NULL);
1758    
1759     if (!d->pci_dev) {
1760     fprintf(stderr,"gt64010: unable to create PCI device.\n");
1761     return(-1);
1762     }
1763     }
1764    
1765     /* Map this device to the VM */
1766     vm_bind_device(vm,&d->dev);
1767     vm_object_add(vm,&d->vm_obj);
1768     return(0);
1769     }
1770    
1771     /*
1772     * pci_gt64120_read()
1773     *
1774     * Read a PCI register.
1775     */
1776 dpavlin 7 static m_uint32_t pci_gt64120_read(cpu_gen_t *cpu,struct pci_device *dev,
1777 dpavlin 4 int reg)
1778     {
1779     switch (reg) {
1780     case 0x08:
1781     return(0x03008005);
1782     default:
1783     return(0);
1784     }
1785     }
1786    
1787     /* Create a new GT64120 controller */
1788     int dev_gt64120_init(vm_instance_t *vm,char *name,
1789     m_uint64_t paddr,m_uint32_t len,u_int irq)
1790     {
1791     struct gt_data *d;
1792    
1793     if (!(d = malloc(sizeof(*d)))) {
1794     fprintf(stderr,"gt64120: unable to create device data.\n");
1795     return(-1);
1796     }
1797    
1798     memset(d,0,sizeof(*d));
1799     d->vm = vm;
1800     d->bus[0] = vm->pci_bus[0];
1801     d->bus[1] = vm->pci_bus[1];
1802    
1803     vm_object_init(&d->vm_obj);
1804     d->vm_obj.name = name;
1805     d->vm_obj.data = d;
1806     d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1807    
1808     dev_init(&d->dev);
1809     d->dev.name = name;
1810     d->dev.priv_data = d;
1811     d->dev.phys_addr = paddr;
1812     d->dev.phys_len = len;
1813     d->dev.handler = dev_gt64120_access;
1814    
1815     /* Add the controller as a PCI device */
1816     if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1817     d->pci_dev = pci_dev_add(d->bus[0],name,
1818     PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64120,
1819     0,0,irq,d,NULL,pci_gt64120_read,NULL);
1820     if (!d->pci_dev) {
1821     fprintf(stderr,"gt64120: unable to create PCI device.\n");
1822     return(-1);
1823     }
1824     }
1825    
1826     /* Map this device to the VM */
1827     vm_bind_device(vm,&d->dev);
1828     vm_object_add(vm,&d->vm_obj);
1829     return(0);
1830     }
1831    
1832     /*
1833     * pci_gt96100_read()
1834     *
1835     * Read a PCI register.
1836     */
1837 dpavlin 7 static m_uint32_t pci_gt96100_read(cpu_gen_t *cpu,struct pci_device *dev,
1838 dpavlin 4 int reg)
1839     {
1840     switch (reg) {
1841     case 0x08:
1842     return(0x03008005);
1843     default:
1844     return(0);
1845     }
1846     }
1847    
1848     /* Create a new GT96100 controller */
1849     int dev_gt96100_init(vm_instance_t *vm,char *name,
1850     m_uint64_t paddr,m_uint32_t len,
1851     u_int dma_irq,u_int eth_irq)
1852     {
1853     struct gt_data *d;
1854    
1855     if (!(d = malloc(sizeof(*d)))) {
1856     fprintf(stderr,"gt96100: unable to create device data.\n");
1857     return(-1);
1858     }
1859    
1860     memset(d,0,sizeof(*d));
1861     d->name = name;
1862     d->vm = vm;
1863     d->eth_irq = eth_irq;
1864     d->bus[0] = vm->pci_bus[0];
1865     d->bus[1] = vm->pci_bus[1];
1866    
1867     vm_object_init(&d->vm_obj);
1868     d->vm_obj.name = name;
1869     d->vm_obj.data = d;
1870     d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1871    
1872     dev_init(&d->dev);
1873     d->dev.name = name;
1874     d->dev.priv_data = d;
1875     d->dev.phys_addr = paddr;
1876     d->dev.phys_len = len;
1877     d->dev.handler = dev_gt96100_access;
1878    
1879     /* Add the controller as a PCI device */
1880     if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1881     d->pci_dev = pci_dev_add(d->bus[0],name,
1882     PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT96100,
1883     0,0,dma_irq,d,NULL,pci_gt96100_read,NULL);
1884     if (!d->pci_dev) {
1885     fprintf(stderr,"gt96100: unable to create PCI device.\n");
1886     return(-1);
1887     }
1888     }
1889    
1890     /* Start the TX ring scanner */
1891     d->eth_tx_tid = ptask_add((ptask_callback)gt_eth_handle_txqueues,d,NULL);
1892    
1893     /* Map this device to the VM */
1894     vm_bind_device(vm,&d->dev);
1895     vm_object_add(vm,&d->vm_obj);
1896     return(0);
1897     }
1898    
1899     /* Bind a NIO to GT96100 device */
1900     int dev_gt96100_set_nio(struct gt_data *d,u_int port_id,netio_desc_t *nio)
1901     {
1902     struct eth_port *port;
1903    
1904     if (port_id >= GT_ETH_PORTS)
1905     return(-1);
1906    
1907     port = &d->eth_ports[port_id];
1908    
1909     /* check that a NIO is not already bound */
1910     if (port->nio != NULL)
1911     return(-1);
1912    
1913     port->nio = nio;
1914     netio_rxl_add(nio,(netio_rx_handler_t)gt_eth_handle_rx_pkt,
1915     d,(void *)port_id);
1916     return(0);
1917     }
1918    
1919     /* Unbind a NIO from a GT96100 device */
1920     int dev_gt96100_unset_nio(struct gt_data *d,u_int port_id)
1921     {
1922     struct eth_port *port;
1923    
1924     if (port_id >= GT_ETH_PORTS)
1925     return(-1);
1926    
1927     port = &d->eth_ports[port_id];
1928    
1929     if (port->nio != NULL) {
1930     netio_rxl_remove(port->nio);
1931     port->nio = NULL;
1932     }
1933    
1934     return(0);
1935     }

  ViewVC Help
Powered by ViewVC 1.1.26