/[dynamips]/upstream/dynamips-0.2.7/dev_gt.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.7/dev_gt.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 10 - (show annotations)
Sat Oct 6 16:29:14 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 57730 byte(s)
dynamips-0.2.7

1 /*
2 * Cisco router simulation platform.
3 * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4 *
5 * Galileo GT64010/GT64120A/GT96100A system controller.
6 *
7 * The DMA stuff is not complete, only "normal" transfers are working
8 * (source and destination addresses incrementing).
9 *
10 * Also, these transfers are "instantaneous" from a CPU point-of-view: when
11 * a channel is enabled, the transfer is immediately done. So, this is not
12 * very realistic.
13 */
14
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18
19 #include "utils.h"
20 #include "net.h"
21 #include "cpu.h"
22 #include "vm.h"
23 #include "dynamips.h"
24 #include "memory.h"
25 #include "device.h"
26 #include "net_io.h"
27 #include "ptask.h"
28 #include "dev_gt.h"
29
30 /* Debugging flags */
31 #define DEBUG_UNKNOWN 0
32 #define DEBUG_DMA 0
33 #define DEBUG_MII 0
34 #define DEBUG_ETH_TX 0
35 #define DEBUG_ETH_RX 0
36 #define DEBUG_ETH_HASH 0
37
38 /* PCI identification */
39 #define PCI_VENDOR_GALILEO 0x11ab /* Galileo Technology */
40 #define PCI_PRODUCT_GALILEO_GT64010 0x0146 /* GT-64010 */
41 #define PCI_PRODUCT_GALILEO_GT64011 0x4146 /* GT-64011 */
42 #define PCI_PRODUCT_GALILEO_GT64120 0x4620 /* GT-64120 */
43 #define PCI_PRODUCT_GALILEO_GT96100 0x9653 /* GT-96100 */
44
45 /* === Global definitions === */
46
47 /* Interrupt High Cause Register */
48 #define GT_IHCR_ETH0_SUM 0x00000001
49 #define GT_IHCR_ETH1_SUM 0x00000002
50 #define GT_IHCR_SDMA_SUM 0x00000010
51
52 /* Serial Cause Register */
53 #define GT_SCR_ETH0_SUM 0x00000001
54 #define GT_SCR_ETH1_SUM 0x00000002
55 #define GT_SCR_SDMA_SUM 0x00000010
56
57 /* === DMA definitions === */
58 #define GT_DMA_CHANNELS 4
59
60 #define GT_DMA_FLYBY_ENABLE 0x00000001 /* FlyBy Enable */
61 #define GT_DMA_FLYBY_RDWR 0x00000002 /* SDRAM Read/Write (FlyBy) */
62 #define GT_DMA_SRC_DIR 0x0000000c /* Source Direction */
63 #define GT_DMA_DST_DIR 0x00000030 /* Destination Direction */
64 #define GT_DMA_DATA_LIMIT 0x000001c0 /* Data Transfer Limit */
65 #define GT_DMA_CHAIN_MODE 0x00000200 /* Chained Mode */
66 #define GT_DMA_INT_MODE 0x00000400 /* Interrupt Mode */
67 #define GT_DMA_TRANS_MODE 0x00000800 /* Transfer Mode */
68 #define GT_DMA_CHAN_ENABLE 0x00001000 /* Channel Enable */
69 #define GT_DMA_FETCH_NEXT 0x00002000 /* Fetch Next Record */
70 #define GT_DMA_ACT_STATUS 0x00004000 /* DMA Activity Status */
71 #define GT_DMA_SDA 0x00008000 /* Source/Destination Alignment */
72 #define GT_DMA_MDREQ 0x00010000 /* Mask DMA Requests */
73 #define GT_DMA_CDE 0x00020000 /* Close Descriptor Enable */
74 #define GT_DMA_EOTE 0x00040000 /* End-of-Transfer (EOT) Enable */
75 #define GT_DMA_EOTIE 0x00080000 /* EOT Interrupt Enable */
76 #define GT_DMA_ABORT 0x00100000 /* Abort DMA Transfer */
77 #define GT_DMA_SLP 0x00600000 /* Override Source Address */
78 #define GT_DMA_DLP 0x01800000 /* Override Dest Address */
79 #define GT_DMA_RLP 0x06000000 /* Override Record Address */
80 #define GT_DMA_REQ_SRC 0x10000000 /* DMA Request Source */
81
82 /* Galileo DMA channel */
83 struct dma_channel {
84 m_uint32_t byte_count;
85 m_uint32_t src_addr;
86 m_uint32_t dst_addr;
87 m_uint32_t cdptr;
88 m_uint32_t nrptr;
89 m_uint32_t ctrl;
90 };
91
92 /* === Ethernet definitions === */
93 #define GT_ETH_PORTS 2
94 #define GT_MAX_PKT_SIZE 2048
95
96 /* SMI register */
97 #define GT_SMIR_DATA_MASK 0x0000FFFF
98 #define GT_SMIR_PHYAD_MASK 0x001F0000 /* PHY Device Address */
99 #define GT_SMIR_PHYAD_SHIFT 16
100 #define GT_SMIR_REGAD_MASK 0x03e00000 /* PHY Device Register Address */
101 #define GT_SMIR_REGAD_SHIFT 21
102 #define GT_SMIR_OPCODE_MASK 0x04000000 /* Opcode (0: write, 1: read) */
103 #define GT_SMIR_OPCODE_READ 0x04000000
104 #define GT_SMIR_RVALID_FLAG 0x08000000 /* Read Valid */
105 #define GT_SMIR_BUSY_FLAG 0x10000000 /* Busy: 1=op in progress */
106
107 /* PCR: Port Configuration Register */
108 #define GT_PCR_PM 0x00000001 /* Promiscuous mode */
109 #define GT_PCR_RBM 0x00000002 /* Reject broadcast mode */
110 #define GT_PCR_PBF 0x00000004 /* Pass bad frames */
111 #define GT_PCR_EN 0x00000080 /* Port Enabled/Disabled */
112 #define GT_PCR_LPBK 0x00000300 /* Loopback mode */
113 #define GT_PCR_FC 0x00000400 /* Force collision */
114 #define GT_PCR_HS 0x00001000 /* Hash size */
115 #define GT_PCR_HM 0x00002000 /* Hash mode */
116 #define GT_PCR_HDM 0x00004000 /* Hash default mode */
117 #define GT_PCR_HD 0x00008000 /* Duplex Mode */
118 #define GT_PCR_ISL 0x70000000 /* ISL enabled (0x06) */
119 #define GT_PCR_ACCS 0x80000000 /* Accelerate Slot Time */
120
121 /* PCXR: Port Configuration Extend Register */
122 #define GT_PCXR_IGMP 0x00000001 /* IGMP packet capture */
123 #define GT_PCXR_SPAN 0x00000002 /* BPDU packet capture */
124 #define GT_PCXR_PAR 0x00000004 /* Partition Enable */
125 #define GT_PCXR_PRIOTX 0x00000038 /* Priority weight for TX */
126 #define GT_PCXR_PRIORX 0x000000C0 /* Priority weight for RX */
127 #define GT_PCXR_PRIORX_OV 0x00000100 /* Prio RX override */
128 #define GT_PCXR_DPLX_EN 0x00000200 /* Autoneg for Duplex */
129 #define GT_PCXR_FCTL_EN 0x00000400 /* Autoneg for 802.3x */
130 #define GT_PCXR_FLP 0x00000800 /* Force Link Pass */
131 #define GT_PCXR_FCTL 0x00001000 /* Flow Control Mode */
132 #define GT_PCXR_MFL 0x0000C000 /* Maximum Frame Length */
133 #define GT_PCXR_MIB_CLR_MODE 0x00010000 /* MIB counters clear mode */
134 #define GT_PCXR_SPEED 0x00040000 /* Port Speed */
135 #define GT_PCXR_SPEED_EN 0x00080000 /* Autoneg for Speed */
136 #define GT_PCXR_RMII_EN 0x00100000 /* RMII Enable */
137 #define GT_PCXR_DSCP_EN 0x00200000 /* DSCP decoding enable */
138
139 /* PCMR: Port Command Register */
140 #define GT_PCMR_FJ 0x00008000 /* Force Jam / Flow Control */
141
142 /* PSR: Port Status Register */
143 #define GT_PSR_SPEED 0x00000001 /* Speed: 10/100 Mb/s (100=>1)*/
144 #define GT_PSR_DUPLEX 0x00000002 /* Duplex (1: full) */
145 #define GT_PSR_FCTL 0x00000004 /* Flow Control Mode */
146 #define GT_PSR_LINK 0x00000008 /* Link Up/Down */
147 #define GT_PSR_PAUSE 0x00000010 /* Flow-control disabled state */
148 #define GT_PSR_TXLOW 0x00000020 /* TX Low priority status */
149 #define GT_PSR_TXHIGH 0x00000040 /* TX High priority status */
150 #define GT_PSR_TXINP 0x00000080 /* TX in Progress */
151
152 /* SDCR: SDMA Configuration Register */
153 #define GT_SDCR_RC 0x0000003c /* Retransmit count */
154 #define GT_SDCR_BLMR 0x00000040 /* Big/Little Endian RX mode */
155 #define GT_SDCR_BLMT 0x00000080 /* Big/Litlle Endian TX mode */
156 #define GT_SDCR_POVR 0x00000100 /* PCI override */
157 #define GT_SDCR_RIFB 0x00000200 /* RX IRQ on frame boundary */
158 #define GT_SDCR_BSZ 0x00003000 /* Burst size */
159
160 /* SDCMR: SDMA Command Register */
161 #define GT_SDCMR_ERD 0x00000080 /* Enable RX DMA */
162 #define GT_SDCMR_AR 0x00008000 /* Abort Receive */
163 #define GT_SDCMR_STDH 0x00010000 /* Stop TX High */
164 #define GT_SDCMR_STDL 0x00020000 /* Stop TX Low */
165 #define GT_SDCMR_TXDH 0x00800000 /* Start TX High */
166 #define GT_SDCMR_TXDL 0x01000000 /* Start TX Low */
167 #define GT_SDCMR_AT 0x80000000 /* Abort Transmit */
168
169 /* ICR: Interrupt Cause Register */
170 #define GT_ICR_RXBUF 0x00000001 /* RX Buffer returned to host */
171 #define GT_ICR_TXBUFH 0x00000004 /* TX Buffer High */
172 #define GT_ICR_TXBUFL 0x00000008 /* TX Buffer Low */
173 #define GT_ICR_TXENDH 0x00000040 /* TX End High */
174 #define GT_ICR_TXENDL 0x00000080 /* TX End Low */
175 #define GT_ICR_RXERR 0x00000100 /* RX Error */
176 #define GT_ICR_TXERRH 0x00000400 /* TX Error High */
177 #define GT_ICR_TXERRL 0x00000800 /* TX Error Low */
178 #define GT_ICR_RXOVR 0x00001000 /* RX Overrun */
179 #define GT_ICR_TXUDR 0x00002000 /* TX Underrun */
180 #define GT_ICR_RXBUFQ0 0x00010000 /* RX Buffer in Prio Queue 0 */
181 #define GT_ICR_RXBUFQ1 0x00020000 /* RX Buffer in Prio Queue 1 */
182 #define GT_ICR_RXBUFQ2 0x00040000 /* RX Buffer in Prio Queue 2 */
183 #define GT_ICR_RXBUFQ3 0x00080000 /* RX Buffer in Prio Queue 3 */
184 #define GT_ICR_RXERRQ0 0x00010000 /* RX Error in Prio Queue 0 */
185 #define GT_ICR_RXERRQ1 0x00020000 /* RX Error in Prio Queue 1 */
186 #define GT_ICR_RXERRQ2 0x00040000 /* RX Error in Prio Queue 2 */
187 #define GT_ICR_RXERRQ3 0x00080000 /* RX Error in Prio Queue 3 */
188 #define GT_ICR_MII_STC 0x10000000 /* MII PHY Status Change */
189 #define GT_ICR_SMI_DONE 0x20000000 /* SMI Command Done */
190 #define GT_ICR_INT_SUM 0x80000000 /* Ethernet Interrupt Summary */
191 #define GT_ICR_MASK 0x7FFFFFFF
192
193 /* Ethernet hash entry */
194 #define GT_HTE_VALID 0x00000001 /* Valid entry */
195 #define GT_HTE_SKIP 0x00000002 /* Skip entry in a chain */
196 #define GT_HTE_RD 0x00000004 /* 0: Discard, 1: Receive */
197 #define GT_HTE_ADDR_MASK 0x7fffffffffff8ULL
198
199 #define GT_HTE_HOPNUM 12 /* Hash Table Hop Number */
200
201 enum {
202 GT_HTLOOKUP_MISS,
203 GT_HTLOOKUP_MATCH,
204 GT_HTLOOKUP_HOP_EXCEEDED,
205 };
206
207 /* TX Descriptor */
208 #define GT_TXDESC_OWN 0x80000000 /* Ownership */
209 #define GT_TXDESC_AM 0x40000000 /* Auto-mode */
210 #define GT_TXDESC_EI 0x00800000 /* Enable Interrupt */
211 #define GT_TXDESC_GC 0x00400000 /* Generate CRC */
212 #define GT_TXDESC_P 0x00040000 /* Padding */
213 #define GT_TXDESC_F 0x00020000 /* First buffer of packet */
214 #define GT_TXDESC_L 0x00010000 /* Last buffer of packet */
215 #define GT_TXDESC_ES 0x00008000 /* Error Summary */
216 #define GT_TXDESC_RC 0x00003c00 /* Retransmit Count */
217 #define GT_TXDESC_COL 0x00000200 /* Collision */
218 #define GT_TXDESC_RL 0x00000100 /* Retransmit Limit Error */
219 #define GT_TXDESC_UR 0x00000040 /* Underrun Error */
220 #define GT_TXDESC_LC 0x00000020 /* Late Collision Error */
221
222 #define GT_TXDESC_BC_MASK 0xFFFF0000 /* Number of bytes to transmit */
223 #define GT_TXDESC_BC_SHIFT 16
224
225 /* RX Descriptor */
226 #define GT_RXDESC_OWN 0x80000000 /* Ownership */
227 #define GT_RXDESC_AM 0x40000000 /* Auto-mode */
228 #define GT_RXDESC_EI 0x00800000 /* Enable Interrupt */
229 #define GT_RXDESC_F 0x00020000 /* First buffer of packet */
230 #define GT_RXDESC_L 0x00010000 /* Last buffer of packet */
231 #define GT_RXDESC_ES 0x00008000 /* Error Summary */
232 #define GT_RXDESC_IGMP 0x00004000 /* IGMP packet detected */
233 #define GT_RXDESC_HE 0x00002000 /* Hash Table Expired */
234 #define GT_RXDESC_M 0x00001000 /* Missed Frame */
235 #define GT_RXDESC_FT 0x00000800 /* Frame Type (802.3/Ethernet) */
236 #define GT_RXDESC_SF 0x00000100 /* Short Frame Error */
237 #define GT_RXDESC_MFL 0x00000080 /* Maximum Frame Length Error */
238 #define GT_RXDESC_OR 0x00000040 /* Overrun Error */
239 #define GT_RXDESC_COL 0x00000010 /* Collision */
240 #define GT_RXDESC_CE 0x00000001 /* CRC Error */
241
242 #define GT_RXDESC_BC_MASK 0x0000FFFF /* Byte count */
243 #define GT_RXDESC_BS_MASK 0xFFFF0000 /* Buffer size */
244 #define GT_RXDESC_BS_SHIFT 16
245
246 /* RX/TX descriptor */
247 struct eth_desc {
248 m_uint32_t buf_size;
249 m_uint32_t cmd_stat;
250 m_uint32_t next_ptr;
251 m_uint32_t buf_ptr;
252 };
253
254 /* Galileo Ethernet port */
255 struct eth_port {
256 netio_desc_t *nio;
257
258 /* First and Current RX descriptors (4 queues) */
259 m_uint32_t rx_start[4],rx_current[4];
260
261 /* Current TX descriptors (2 queues) */
262 m_uint32_t tx_current[2];
263
264 /* Port registers */
265 m_uint32_t pcr,pcxr,pcmr,psr;
266
267 /* SDMA registers */
268 m_uint32_t sdcr,sdcmr;
269
270 /* Interrupt register */
271 m_uint32_t icr,imr;
272
273 /* Hash Table pointer */
274 m_uint32_t ht_addr;
275
276 /* Ethernet MIB counters */
277 m_uint32_t rx_bytes,tx_bytes,rx_frames,tx_frames;
278 };
279
280 /* Galileo GT64xxx/GT96xxx system controller */
281 struct gt_data {
282 char *name;
283 vm_obj_t vm_obj;
284 struct vdevice dev;
285 struct pci_device *pci_dev;
286 vm_instance_t *vm;
287
288 struct pci_bus *bus[2];
289 struct dma_channel dma[GT_DMA_CHANNELS];
290 m_uint32_t int_cause_reg;
291 m_uint32_t int_mask_reg;
292
293 /* Ethernet ports (GT-96100) */
294 u_int eth_irq;
295 ptask_id_t eth_tx_tid;
296 struct eth_port eth_ports[GT_ETH_PORTS];
297 m_uint32_t smi_reg;
298 m_uint16_t mii_regs[32][32];
299 };
300
301 /* Log a GT message */
302 #define GT_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
303
304 /* Update the interrupt status */
305 static void gt_update_irq_status(struct gt_data *gt_data)
306 {
307 if (gt_data->pci_dev) {
308 if (gt_data->int_cause_reg & gt_data->int_mask_reg)
309 pci_dev_trigger_irq(gt_data->vm,gt_data->pci_dev);
310 else
311 pci_dev_clear_irq(gt_data->vm,gt_data->pci_dev);
312 }
313 }
314
315 /* Fetch a DMA record (chained mode) */
316 static void gt_dma_fetch_rec(vm_instance_t *vm,struct dma_channel *channel)
317 {
318 m_uint32_t ptr;
319
320 #if DEBUG_DMA
321 vm_log(vm,"GT_DMA","fetching record at address 0x%x\n",channel->nrptr);
322 #endif
323
324 /* fetch the record from RAM */
325 ptr = channel->nrptr;
326 channel->byte_count = swap32(physmem_copy_u32_from_vm(vm,ptr));
327 channel->src_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x04));
328 channel->dst_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x08));
329 channel->nrptr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x0c));
330
331 /* clear the "fetch next record bit" */
332 channel->ctrl &= ~GT_DMA_FETCH_NEXT;
333 }
334
335 /* Handle control register of a DMA channel */
336 static void gt_dma_handle_ctrl(struct gt_data *gt_data,int chan_id)
337 {
338 struct dma_channel *channel = &gt_data->dma[chan_id];
339 vm_instance_t *vm = gt_data->vm;
340 int done;
341
342 if (channel->ctrl & GT_DMA_FETCH_NEXT) {
343 if (channel->nrptr == 0) {
344 vm_log(vm,"GT_DMA","trying to load a NULL DMA record...\n");
345 return;
346 }
347
348 gt_dma_fetch_rec(vm,channel);
349 }
350
351 if (channel->ctrl & GT_DMA_CHAN_ENABLE)
352 {
353 do {
354 done = TRUE;
355
356 #if DEBUG_DMA
357 vm_log(vm,"GT_DMA",
358 "starting transfer from 0x%x to 0x%x (size=%u bytes)\n",
359 channel->src_addr,channel->dst_addr,
360 channel->byte_count & 0xFFFF);
361 #endif
362 physmem_dma_transfer(vm,channel->src_addr,channel->dst_addr,
363 channel->byte_count & 0xFFFF);
364
365 /* chained mode */
366 if (!(channel->ctrl & GT_DMA_CHAIN_MODE)) {
367 if (channel->nrptr) {
368 gt_dma_fetch_rec(vm,channel);
369 done = FALSE;
370 }
371 }
372 }while(!done);
373
374 #if DEBUG_DMA
375 vm_log(vm,"GT_DMA","finished transfer.\n");
376 #endif
377 /* Trigger DMA interrupt */
378 gt_data->int_cause_reg |= 1 << (4 + chan_id);
379 gt_update_irq_status(gt_data);
380 }
381 }
382
383 #define DMA_REG(ch,reg_name) \
384 if (op_type == MTS_WRITE) \
385 gt_data->dma[ch].reg_name = swap32(*data); \
386 else \
387 *data = swap32(gt_data->dma[ch].reg_name);
388
389 /* Handle a DMA channel */
390 static int gt_dma_access(cpu_gen_t *cpu,struct vdevice *dev,
391 m_uint32_t offset,u_int op_size,u_int op_type,
392 m_uint64_t *data)
393 {
394 struct gt_data *gt_data = dev->priv_data;
395
396 switch(offset) {
397 /* DMA Source Address */
398 case 0x810: DMA_REG(0,src_addr); return(1);
399 case 0x814: DMA_REG(1,src_addr); return(1);
400 case 0x818: DMA_REG(2,src_addr); return(1);
401 case 0x81c: DMA_REG(3,src_addr); return(1);
402
403 /* DMA Destination Address */
404 case 0x820: DMA_REG(0,dst_addr); return(1);
405 case 0x824: DMA_REG(1,dst_addr); return(1);
406 case 0x828: DMA_REG(2,dst_addr); return(1);
407 case 0x82c: DMA_REG(3,dst_addr); return(1);
408
409 /* DMA Next Record Pointer */
410 case 0x830:
411 gt_data->dma[0].cdptr = *data;
412 DMA_REG(0,nrptr);
413 return(1);
414
415 case 0x834:
416 gt_data->dma[1].cdptr = *data;
417 DMA_REG(1,nrptr);
418 return(1);
419
420 case 0x838:
421 gt_data->dma[2].cdptr = *data;
422 DMA_REG(2,nrptr);
423 return(1);
424
425 case 0x83c:
426 gt_data->dma[3].cdptr = *data;
427 DMA_REG(3,nrptr);
428 return(1);
429
430 /* DMA Channel Control */
431 case 0x840:
432 DMA_REG(0,ctrl);
433 if (op_type == MTS_WRITE)
434 gt_dma_handle_ctrl(gt_data,0);
435 return(1);
436
437 case 0x844:
438 DMA_REG(1,ctrl);
439 if (op_type == MTS_WRITE)
440 gt_dma_handle_ctrl(gt_data,1);
441 return(1);
442
443 case 0x848:
444 DMA_REG(2,ctrl);
445 if (op_type == MTS_WRITE)
446 gt_dma_handle_ctrl(gt_data,2);
447 return(1);
448
449 case 0x84c:
450 DMA_REG(3,ctrl);
451 if (op_type == MTS_WRITE)
452 gt_dma_handle_ctrl(gt_data,3);
453 return(1);
454 }
455
456 return(0);
457 }
458
459 /*
460 * dev_gt64010_access()
461 */
462 void *dev_gt64010_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
463 u_int op_size,u_int op_type,m_uint64_t *data)
464 {
465 struct gt_data *gt_data = dev->priv_data;
466
467 if (op_type == MTS_READ)
468 *data = 0;
469
470 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
471 return NULL;
472
473 switch(offset) {
474 /* ===== DRAM Settings (completely faked, 128 Mb) ===== */
475 case 0x008: /* ras10_low */
476 if (op_type == MTS_READ)
477 *data = swap32(0x000);
478 break;
479 case 0x010: /* ras10_high */
480 if (op_type == MTS_READ)
481 *data = swap32(0x7F);
482 break;
483 case 0x018: /* ras32_low */
484 if (op_type == MTS_READ)
485 *data = swap32(0x080);
486 break;
487 case 0x020: /* ras32_high */
488 if (op_type == MTS_READ)
489 *data = swap32(0x7F);
490 break;
491 case 0x400: /* ras0_low */
492 if (op_type == MTS_READ)
493 *data = swap32(0x00);
494 break;
495 case 0x404: /* ras0_high */
496 if (op_type == MTS_READ)
497 *data = swap32(0xFF);
498 break;
499 case 0x408: /* ras1_low */
500 if (op_type == MTS_READ)
501 *data = swap32(0x7F);
502 break;
503 case 0x40c: /* ras1_high */
504 if (op_type == MTS_READ)
505 *data = swap32(0x00);
506 break;
507 case 0x410: /* ras2_low */
508 if (op_type == MTS_READ)
509 *data = swap32(0x00);
510 break;
511 case 0x414: /* ras2_high */
512 if (op_type == MTS_READ)
513 *data = swap32(0xFF);
514 break;
515 case 0x418: /* ras3_low */
516 if (op_type == MTS_READ)
517 *data = swap32(0x7F);
518 break;
519 case 0x41c: /* ras3_high */
520 if (op_type == MTS_READ)
521 *data = swap32(0x00);
522 break;
523 case 0xc08: /* pci0_cs10 */
524 if (op_type == MTS_READ)
525 *data = swap32(0xFFF);
526 break;
527 case 0xc0c: /* pci0_cs32 */
528 if (op_type == MTS_READ)
529 *data = swap32(0xFFF);
530 break;
531
532 case 0xc00: /* pci_cmd */
533 if (op_type == MTS_READ)
534 *data = swap32(0x00008001);
535 break;
536
537 /* ===== Interrupt Cause Register ===== */
538 case 0xc18:
539 if (op_type == MTS_READ) {
540 *data = swap32(gt_data->int_cause_reg);
541 } else {
542 gt_data->int_cause_reg &= swap32(*data);
543 gt_update_irq_status(gt_data);
544 }
545 break;
546
547 /* ===== Interrupt Mask Register ===== */
548 case 0xc1c:
549 if (op_type == MTS_READ)
550 *data = swap32(gt_data->int_mask_reg);
551 else {
552 gt_data->int_mask_reg = swap32(*data);
553 gt_update_irq_status(gt_data);
554 }
555 break;
556
557 /* ===== PCI Configuration ===== */
558 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
559 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
560 break;
561
562 case PCI_BUS_DATA: /* pci data address (0xcfc) */
563 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
564 break;
565
566 #if DEBUG_UNKNOWN
567 default:
568 if (op_type == MTS_READ) {
569 cpu_log(cpu,"GT64010","read from addr 0x%x, pc=0x%llx\n",
570 offset,cpu_get_pc(cpu));
571 } else {
572 cpu_log(cpu,"GT64010","write to addr 0x%x, value=0x%llx, "
573 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
574 }
575 #endif
576 }
577
578 return NULL;
579 }
580
581 /*
582 * dev_gt64120_access()
583 */
584 void *dev_gt64120_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
585 u_int op_size,u_int op_type,m_uint64_t *data)
586 {
587 struct gt_data *gt_data = dev->priv_data;
588
589 if (op_type == MTS_READ)
590 *data = 0;
591
592 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
593 return NULL;
594
595 switch(offset) {
596 case 0x008: /* ras10_low */
597 if (op_type == MTS_READ)
598 *data = swap32(0x000);
599 break;
600 case 0x010: /* ras10_high */
601 if (op_type == MTS_READ)
602 *data = swap32(0x7F);
603 break;
604 case 0x018: /* ras32_low */
605 if (op_type == MTS_READ)
606 *data = swap32(0x100);
607 break;
608 case 0x020: /* ras32_high */
609 if (op_type == MTS_READ)
610 *data = swap32(0x7F);
611 break;
612 case 0x400: /* ras0_low */
613 if (op_type == MTS_READ)
614 *data = swap32(0x00);
615 break;
616 case 0x404: /* ras0_high */
617 if (op_type == MTS_READ)
618 *data = swap32(0xFF);
619 break;
620 case 0x408: /* ras1_low */
621 if (op_type == MTS_READ)
622 *data = swap32(0x7F);
623 break;
624 case 0x40c: /* ras1_high */
625 if (op_type == MTS_READ)
626 *data = swap32(0x00);
627 break;
628 case 0x410: /* ras2_low */
629 if (op_type == MTS_READ)
630 *data = swap32(0x00);
631 break;
632 case 0x414: /* ras2_high */
633 if (op_type == MTS_READ)
634 *data = swap32(0xFF);
635 break;
636 case 0x418: /* ras3_low */
637 if (op_type == MTS_READ)
638 *data = swap32(0x7F);
639 break;
640 case 0x41c: /* ras3_high */
641 if (op_type == MTS_READ)
642 *data = swap32(0x00);
643 break;
644 case 0xc08: /* pci0_cs10 */
645 if (op_type == MTS_READ)
646 *data = swap32(0xFFF);
647 break;
648 case 0xc0c: /* pci0_cs32 */
649 if (op_type == MTS_READ)
650 *data = swap32(0xFFF);
651 break;
652
653 case 0xc00: /* pci_cmd */
654 if (op_type == MTS_READ)
655 *data = swap32(0x00008001);
656 break;
657
658 /* ===== Interrupt Cause Register ===== */
659 case 0xc18:
660 if (op_type == MTS_READ)
661 *data = swap32(gt_data->int_cause_reg);
662 else {
663 gt_data->int_cause_reg &= swap32(*data);
664 gt_update_irq_status(gt_data);
665 }
666 break;
667
668 /* ===== Interrupt Mask Register ===== */
669 case 0xc1c:
670 if (op_type == MTS_READ) {
671 *data = swap32(gt_data->int_mask_reg);
672 } else {
673 gt_data->int_mask_reg = swap32(*data);
674 gt_update_irq_status(gt_data);
675 }
676 break;
677
678 /* ===== PCI Bus 1 ===== */
679 case 0xcf0:
680 pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
681 break;
682
683 case 0xcf4:
684 pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
685 break;
686
687 /* ===== PCI Bus 0 ===== */
688 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
689 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
690 break;
691
692 case PCI_BUS_DATA: /* pci data address (0xcfc) */
693 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
694 break;
695
696 #if DEBUG_UNKNOWN
697 default:
698 if (op_type == MTS_READ) {
699 cpu_log(cpu,"GT64120","read from addr 0x%x, pc=0x%llx\n",
700 offset,cpu_get_pc(cpu));
701 } else {
702 cpu_log(cpu,"GT64120","write to addr 0x%x, value=0x%llx, "
703 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
704 }
705 #endif
706 }
707
708 return NULL;
709 }
710
711 /* Trigger/clear Ethernet interrupt if one or both port have pending events */
712 static void gt_eth_set_int_status(struct gt_data *d)
713 {
714 if ((d->eth_ports[0].icr & GT_ICR_INT_SUM) ||
715 (d->eth_ports[1].icr & GT_ICR_INT_SUM))
716 vm_set_irq(d->vm,d->eth_irq);
717 else
718 vm_clear_irq(d->vm,d->eth_irq);
719 }
720
721 /* Update the Ethernet port interrupt status */
722 static void gt_eth_update_int_status(struct gt_data *d,struct eth_port *port)
723 {
724 if (port->icr & port->imr & GT_ICR_MASK) {
725 port->icr |= GT_ICR_INT_SUM;
726 } else {
727 port->icr &= ~GT_ICR_INT_SUM;
728 }
729
730 gt_eth_set_int_status(d);
731 }
732
733 /* Read a MII register */
734 static m_uint32_t gt_mii_read(struct gt_data *d)
735 {
736 m_uint8_t port,reg;
737 m_uint32_t res = 0;
738
739 port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
740 reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
741
742 #if DEBUG_MII
743 GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: reading.\n",port,reg);
744 #endif
745
746 if ((port < GT_ETH_PORTS) && (reg < 32)) {
747 res = d->mii_regs[port][reg];
748
749 switch(reg) {
750 case 0x00:
751 res &= ~0x8200; /* clear reset bit and autoneg restart */
752 break;
753 case 0x01:
754 #if 0
755 if (d->ports[port].nio && bcm5600_mii_port_status(d,port))
756 d->mii_output = 0x782C;
757 else
758 d->mii_output = 0;
759 #endif
760 res = 0x782c;
761 break;
762 case 0x02:
763 res = 0x40;
764 break;
765 case 0x03:
766 res = 0x61d4;
767 break;
768 case 0x04:
769 res = 0x1E1;
770 break;
771 case 0x05:
772 res = 0x41E1;
773 break;
774 default:
775 res = 0;
776 }
777 }
778
779 /* Mark the data as ready */
780 res |= GT_SMIR_RVALID_FLAG;
781
782 return(res);
783 }
784
785 /* Write a MII register */
786 static void gt_mii_write(struct gt_data *d)
787 {
788 m_uint8_t port,reg;
789 m_uint16_t isolation;
790
791 port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
792 reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
793
794 if ((port < GT_ETH_PORTS) && (reg < 32))
795 {
796 #if DEBUG_MII
797 GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: writing 0x%4.4x\n",
798 port,reg,d->smi_reg & GT_SMIR_DATA_MASK);
799 #endif
800
801 /* Check if PHY isolation status is changing */
802 if (reg == 0) {
803 isolation = (d->smi_reg ^ d->mii_regs[port][reg]) & 0x400;
804
805 if (isolation) {
806 #if DEBUG_MII
807 GT_LOG(d,"MII: port 0x%4.4x: generating IRQ\n",port);
808 #endif
809 d->eth_ports[port].icr |= GT_ICR_MII_STC;
810 gt_eth_update_int_status(d,&d->eth_ports[port]);
811 }
812 }
813
814 d->mii_regs[port][reg] = d->smi_reg & GT_SMIR_DATA_MASK;
815 }
816 }
817
818 /* Handle registers of Ethernet ports */
819 static int gt_eth_access(cpu_gen_t *cpu,struct vdevice *dev,
820 m_uint32_t offset,u_int op_size,u_int op_type,
821 m_uint64_t *data)
822 {
823 struct gt_data *d = dev->priv_data;
824 struct eth_port *port;
825 u_int port_id = 0;
826 u_int queue;
827
828 if ((offset < 0x80000) || (offset >= 0x90000))
829 return(FALSE);
830
831 if (op_type == MTS_WRITE)
832 *data = swap32(*data);
833
834 /* Detemine the Ethernet port */
835 if ((offset >= 0x84800) && (offset < 0x88800))
836 port_id = 0;
837
838 if ((offset >= 0x88800) && (offset < 0x8c800))
839 port_id = 1;
840
841 port = &d->eth_ports[port_id];
842
843 switch(offset) {
844 /* SMI register */
845 case 0x80810:
846 if (op_type == MTS_WRITE) {
847 d->smi_reg = *data;
848
849 if (!(d->smi_reg & GT_SMIR_OPCODE_READ))
850 gt_mii_write(d);
851 } else {
852 *data = 0;
853
854 if (d->smi_reg & GT_SMIR_OPCODE_READ)
855 *data = gt_mii_read(d);
856 }
857 break;
858
859 /* ICR: Interrupt Cause Register */
860 case 0x84850:
861 case 0x88850:
862 if (op_type == MTS_READ) {
863 *data = port->icr;
864 } else {
865 port->icr &= *data;
866 gt_eth_update_int_status(d,port);
867 }
868 break;
869
870 /* IMR: Interrupt Mask Register */
871 case 0x84858:
872 case 0x88858:
873 if (op_type == MTS_READ) {
874 *data = port->imr;
875 } else {
876 port->imr = *data;
877 gt_eth_update_int_status(d,port);
878 }
879 break;
880
881 /* PCR: Port Configuration Register */
882 case 0x84800:
883 case 0x88800:
884 if (op_type == MTS_READ)
885 *data = port->pcr;
886 else
887 port->pcr = *data;
888 break;
889
890 /* PCXR: Port Configuration Extend Register */
891 case 0x84808:
892 case 0x88808:
893 if (op_type == MTS_READ) {
894 *data = port->pcxr;
895 *data |= GT_PCXR_SPEED;
896 } else
897 port->pcxr = *data;
898 break;
899
900 /* PCMR: Port Command Register */
901 case 0x84810:
902 case 0x88810:
903 if (op_type == MTS_READ)
904 *data = port->pcmr;
905 else
906 port->pcmr = *data;
907 break;
908
909 /* Port Status Register */
910 case 0x84818:
911 case 0x88818:
912 if (op_type == MTS_READ)
913 *data = 0x0F;
914 break;
915
916 /* First RX descriptor */
917 case 0x84880:
918 case 0x88880:
919 case 0x84884:
920 case 0x88884:
921 case 0x84888:
922 case 0x88888:
923 case 0x8488C:
924 case 0x8888C:
925 queue = (offset >> 2) & 0x03;
926 if (op_type == MTS_READ)
927 *data = port->rx_start[queue];
928 else
929 port->rx_start[queue] = *data;
930 break;
931
932 /* Current RX descriptor */
933 case 0x848A0:
934 case 0x888A0:
935 case 0x848A4:
936 case 0x888A4:
937 case 0x848A8:
938 case 0x888A8:
939 case 0x848AC:
940 case 0x888AC:
941 queue = (offset >> 2) & 0x03;
942 if (op_type == MTS_READ)
943 *data = port->rx_current[queue];
944 else
945 port->rx_current[queue] = *data;
946 break;
947
948 /* Current TX descriptor */
949 case 0x848E0:
950 case 0x888E0:
951 case 0x848E4:
952 case 0x888E4:
953 queue = (offset >> 2) & 0x01;
954 if (op_type == MTS_READ)
955 *data = port->tx_current[queue];
956 else
957 port->tx_current[queue] = *data;
958 break;
959
960 /* Hash Table Pointer */
961 case 0x84828:
962 case 0x88828:
963 if (op_type == MTS_READ)
964 *data = port->ht_addr;
965 else
966 port->ht_addr = *data;
967 break;
968
969 /* SDCR: SDMA Configuration Register */
970 case 0x84840:
971 case 0x88840:
972 if (op_type == MTS_READ)
973 *data = port->sdcr;
974 else
975 port->sdcr = *data;
976 break;
977
978 /* SDCMR: SDMA Command Register */
979 case 0x84848:
980 case 0x88848:
981 if (op_type == MTS_WRITE) {
982 /* Start RX DMA */
983 if (*data & GT_SDCMR_ERD) {
984 port->sdcmr |= GT_SDCMR_ERD;
985 port->sdcmr &= ~GT_SDCMR_AR;
986 }
987
988 /* Abort RX DMA */
989 if (*data & GT_SDCMR_AR)
990 port->sdcmr &= ~GT_SDCMR_ERD;
991
992 /* Start TX High */
993 if (*data & GT_SDCMR_TXDH) {
994 port->sdcmr |= GT_SDCMR_TXDH;
995 port->sdcmr &= ~GT_SDCMR_STDH;
996 }
997
998 /* Start TX Low */
999 if (*data & GT_SDCMR_TXDL) {
1000 port->sdcmr |= GT_SDCMR_TXDL;
1001 port->sdcmr &= ~GT_SDCMR_STDL;
1002 }
1003
1004 /* Stop TX High */
1005 if (*data & GT_SDCMR_STDH) {
1006 port->sdcmr &= ~GT_SDCMR_TXDH;
1007 port->sdcmr |= GT_SDCMR_STDH;
1008 }
1009
1010 /* Stop TX Low */
1011 if (*data & GT_SDCMR_STDL) {
1012 port->sdcmr &= ~GT_SDCMR_TXDL;
1013 port->sdcmr |= GT_SDCMR_STDL;
1014 }
1015 } else {
1016 *data = port->sdcmr;
1017 }
1018 break;
1019
1020 case 0x85800:
1021 case 0x89800:
1022 if (op_type == MTS_READ) {
1023 *data = port->rx_bytes;
1024 port->rx_bytes = 0;
1025 }
1026 break;
1027
1028 case 0x85804:
1029 case 0x89804:
1030 if (op_type == MTS_READ) {
1031 *data = port->tx_bytes;
1032 port->tx_bytes = 0;
1033 }
1034 break;
1035
1036 case 0x85808:
1037 case 0x89808:
1038 if (op_type == MTS_READ) {
1039 *data = port->rx_frames;
1040 port->rx_frames = 0;
1041 }
1042 break;
1043
1044 case 0x8580C:
1045 case 0x8980C:
1046 if (op_type == MTS_READ) {
1047 *data = port->tx_frames;
1048 port->tx_frames = 0;
1049 }
1050 break;
1051
1052 #if DEBUG_UNKNOWN
1053 default:
1054 if (op_type == MTS_READ) {
1055 cpu_log(cpu,"GT96100/ETH",
1056 "read access to unknown register 0x%x, pc=0x%llx\n",
1057 offset,cpu_get_pc(cpu));
1058 } else {
1059 cpu_log(cpu,"GT96100/ETH",
1060 "write access to unknown register 0x%x, value=0x%llx, "
1061 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
1062 }
1063 #endif
1064 }
1065
1066 if (op_type == MTS_READ)
1067 *data = swap32(*data);
1068
1069 return(TRUE);
1070 }
1071
1072 /*
1073 * dev_gt96100_access()
1074 */
1075 void *dev_gt96100_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
1076 u_int op_size,u_int op_type,m_uint64_t *data)
1077 {
1078 struct gt_data *gt_data = dev->priv_data;
1079
1080 if (op_type == MTS_READ)
1081 *data = 0;
1082
1083 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
1084 return NULL;
1085
1086 if (gt_eth_access(cpu,dev,offset,op_size,op_type,data) != 0)
1087 return NULL;
1088
1089 switch(offset) {
1090 /* Watchdog configuration register */
1091 case 0x101a80:
1092 break;
1093
1094 /* Watchdog value register */
1095 case 0x101a84:
1096 break;
1097
1098 case 0x008: /* ras10_low */
1099 if (op_type == MTS_READ)
1100 *data = swap32(0x000);
1101 break;
1102 case 0x010: /* ras10_high */
1103 if (op_type == MTS_READ)
1104 *data = swap32(0x7F);
1105 break;
1106 case 0x018: /* ras32_low */
1107 if (op_type == MTS_READ)
1108 *data = swap32(0x100);
1109 break;
1110 case 0x020: /* ras32_high */
1111 if (op_type == MTS_READ)
1112 *data = swap32(0x7F);
1113 break;
1114 case 0x400: /* ras0_low */
1115 if (op_type == MTS_READ)
1116 *data = swap32(0x00);
1117 break;
1118 case 0x404: /* ras0_high */
1119 if (op_type == MTS_READ)
1120 *data = swap32(0xFF);
1121 break;
1122 case 0x408: /* ras1_low */
1123 if (op_type == MTS_READ)
1124 *data = swap32(0x7F);
1125 break;
1126 case 0x40c: /* ras1_high */
1127 if (op_type == MTS_READ)
1128 *data = swap32(0x00);
1129 break;
1130 case 0x410: /* ras2_low */
1131 if (op_type == MTS_READ)
1132 *data = swap32(0x00);
1133 break;
1134 case 0x414: /* ras2_high */
1135 if (op_type == MTS_READ)
1136 *data = swap32(0xFF);
1137 break;
1138 case 0x418: /* ras3_low */
1139 if (op_type == MTS_READ)
1140 *data = swap32(0x7F);
1141 break;
1142 case 0x41c: /* ras3_high */
1143 if (op_type == MTS_READ)
1144 *data = swap32(0x00);
1145 break;
1146 case 0xc08: /* pci0_cs10 */
1147 if (op_type == MTS_READ)
1148 *data = swap32(0xFFF);
1149 break;
1150 case 0xc0c: /* pci0_cs32 */
1151 if (op_type == MTS_READ)
1152 *data = swap32(0xFFF);
1153 break;
1154
1155 case 0xc00: /* pci_cmd */
1156 if (op_type == MTS_READ)
1157 *data = swap32(0x00008001);
1158 break;
1159
1160 /* ===== Interrupt Main Cause Register ===== */
1161 case 0xc18:
1162 if (op_type == MTS_READ) {
1163 *data = gt_data->int_cause_reg;
1164
1165 /* TODO: signal Eth0/Eth1 */
1166 //*data |= (1 << 30) | (1 << 31) | 1;
1167
1168 *data = swap32(*data);
1169 } else {
1170 gt_data->int_cause_reg &= swap32(*data);
1171 gt_update_irq_status(gt_data);
1172 }
1173 break;
1174
1175 /* ===== Interrupt Mask Register ===== */
1176 case 0xc1c:
1177 if (op_type == MTS_READ) {
1178 *data = swap32(gt_data->int_mask_reg);
1179 } else {
1180 gt_data->int_mask_reg = swap32(*data);
1181 gt_update_irq_status(gt_data);
1182 }
1183 break;
1184
1185 /* ===== Interrupt High Cause Register ===== */
1186 case 0xc98:
1187 if (op_type == MTS_READ) {
1188 *data = 0;
1189
1190 /* interrupt on ethernet port 0 ? */
1191 if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM)
1192 *data |= GT_IHCR_ETH0_SUM;
1193
1194 /* interrupt on ethernet port 1 ? */
1195 if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM)
1196 *data |= GT_IHCR_ETH1_SUM;
1197
1198 *data = swap32(*data);
1199 }
1200 break;
1201
1202 /* Serial Cause Register */
1203 case 0x103a00:
1204 if (op_type == MTS_READ) {
1205 *data = 0;
1206
1207 /* interrupt on ethernet port 0 ? */
1208 if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM)
1209 *data |= GT_SCR_ETH0_SUM;
1210
1211 /* interrupt on ethernet port 1 ? */
1212 if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM)
1213 *data |= GT_SCR_ETH1_SUM;
1214
1215 gt_update_irq_status(gt_data);
1216 *data = swap32(*data);
1217 }
1218 break;
1219
1220 /* ===== PCI Bus 1 ===== */
1221 case 0xcf0:
1222 pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
1223 break;
1224
1225 case 0xcf4:
1226 pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
1227 break;
1228
1229 /* ===== PCI Bus 0 ===== */
1230 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
1231 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
1232 break;
1233
1234 case PCI_BUS_DATA: /* pci data address (0xcfc) */
1235 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
1236 break;
1237
1238 #if DEBUG_UNKNOWN
1239 default:
1240 if (op_type == MTS_READ) {
1241 cpu_log(cpu,"GT96100","read from addr 0x%x, pc=0x%llx\n",
1242 offset,cpu_get_pc(cpu));
1243 } else {
1244 cpu_log(cpu,"GT96100","write to addr 0x%x, value=0x%llx, "
1245 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
1246 }
1247 #endif
1248 }
1249
1250 return NULL;
1251 }
1252
1253 /* Read an Ethernet descriptor */
1254 static void gt_eth_desc_read(struct gt_data *d,m_uint32_t addr,
1255 struct eth_desc *desc)
1256 {
1257 physmem_copy_from_vm(d->vm,desc,addr,sizeof(struct eth_desc));
1258
1259 /* byte-swapping */
1260 desc->cmd_stat = vmtoh32(desc->cmd_stat);
1261 desc->buf_size = vmtoh32(desc->buf_size);
1262 desc->next_ptr = vmtoh32(desc->next_ptr);
1263 desc->buf_ptr = vmtoh32(desc->buf_ptr);
1264 }
1265
1266 /* Write an Ethernet descriptor */
1267 static void gt_eth_desc_write(struct gt_data *d,m_uint32_t addr,
1268 struct eth_desc *desc)
1269 {
1270 struct eth_desc tmp;
1271
1272 /* byte-swapping */
1273 tmp.cmd_stat = vmtoh32(desc->cmd_stat);
1274 tmp.buf_size = vmtoh32(desc->buf_size);
1275 tmp.next_ptr = vmtoh32(desc->next_ptr);
1276 tmp.buf_ptr = vmtoh32(desc->buf_ptr);
1277
1278 physmem_copy_to_vm(d->vm,&tmp,addr,sizeof(struct eth_desc));
1279 }
1280
1281 /* Handle a TX queue (single packet) */
1282 static int gt_eth_handle_txqueue(struct gt_data *d,struct eth_port *port,
1283 int queue)
1284 {
1285 u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr;
1286 struct eth_desc txd0,ctxd,*ptxd;
1287 m_uint32_t tx_start,tx_current;
1288 m_uint32_t len,tot_len;
1289 int abort = FALSE;
1290
1291 /* Check if this TX queue is active */
1292 if ((queue == 0) && (port->sdcmr & GT_SDCMR_STDL))
1293 return(FALSE);
1294
1295 if ((queue == 1) && (port->sdcmr & GT_SDCMR_STDH))
1296 return(FALSE);
1297
1298 /* Copy the current txring descriptor */
1299 tx_start = tx_current = port->tx_current[queue];
1300
1301 if (!tx_start)
1302 return(FALSE);
1303
1304 ptxd = &txd0;
1305 gt_eth_desc_read(d,tx_start,ptxd);
1306
1307 /* If we don't own the first descriptor, we cannot transmit */
1308 if (!(txd0.cmd_stat & GT_TXDESC_OWN))
1309 return(FALSE);
1310
1311 /* Empty packet for now */
1312 pkt_ptr = pkt;
1313 tot_len = 0;
1314
1315 for(;;) {
1316 #if DEBUG_ETH_TX
1317 GT_LOG(d,"gt_eth_handle_txqueue: loop: "
1318 "cmd_stat=0x%x, buf_size=0x%x, next_ptr=0x%x, buf_ptr=0x%x\n",
1319 ptxd->cmd_stat,ptxd->buf_size,ptxd->next_ptr,ptxd->buf_ptr);
1320 #endif
1321
1322 if (!(ptxd->cmd_stat & GT_TXDESC_OWN)) {
1323 GT_LOG(d,"gt_eth_handle_txqueue: descriptor not owned!\n");
1324 abort = TRUE;
1325 break;
1326 }
1327
1328 /* Copy packet data to the buffer */
1329 len = (ptxd->buf_size & GT_TXDESC_BC_MASK) >> GT_TXDESC_BC_SHIFT;
1330
1331 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->buf_ptr,len);
1332 pkt_ptr += len;
1333 tot_len += len;
1334
1335 /* Clear the OWN bit if this is not the first descriptor */
1336 if (!(ptxd->cmd_stat & GT_TXDESC_F)) {
1337 ptxd->cmd_stat &= ~GT_TXDESC_OWN;
1338 physmem_copy_u32_to_vm(d->vm,tx_current,ptxd->cmd_stat);
1339 }
1340
1341 tx_current = ptxd->next_ptr;
1342
1343 /* Last descriptor or no more desc available ? */
1344 if (ptxd->cmd_stat & GT_TXDESC_L)
1345 break;
1346
1347 if (!tx_current) {
1348 abort = TRUE;
1349 break;
1350 }
1351
1352 /* Fetch the next descriptor */
1353 gt_eth_desc_read(d,tx_current,&ctxd);
1354 ptxd = &ctxd;
1355 }
1356
1357 if ((tot_len != 0) && !abort) {
1358 #if DEBUG_ETH_TX
1359 GT_LOG(d,"Ethernet: sending packet of %u bytes\n",tot_len);
1360 mem_dump(log_file,pkt,tot_len);
1361 #endif
1362 /* send it on wire */
1363 netio_send(port->nio,pkt,tot_len);
1364
1365 /* Update MIB counters */
1366 port->tx_bytes += tot_len;
1367 port->tx_frames++;
1368 }
1369
1370 /* Clear the OWN flag of the first descriptor */
1371 txd0.cmd_stat &= ~GT_TXDESC_OWN;
1372 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.cmd_stat);
1373
1374 port->tx_current[queue] = tx_current;
1375
1376 /* Notify host about transmitted packet */
1377 if (queue == 0)
1378 port->icr |= GT_ICR_TXBUFL;
1379 else
1380 port->icr |= GT_ICR_TXBUFH;
1381
1382 if (abort) {
1383 /* TX underrun */
1384 port->icr |= GT_ICR_TXUDR;
1385
1386 if (queue == 0)
1387 port->icr |= GT_ICR_TXERRL;
1388 else
1389 port->icr |= GT_ICR_TXERRH;
1390 } else {
1391 /* End of queue has been reached */
1392 if (!tx_current) {
1393 if (queue == 0)
1394 port->icr |= GT_ICR_TXENDL;
1395 else
1396 port->icr |= GT_ICR_TXENDH;
1397 }
1398 }
1399
1400 /* Update the interrupt status */
1401 gt_eth_update_int_status(d,port);
1402 return(TRUE);
1403 }
1404
1405 /* Handle TX ring of the specified port */
1406 static void gt_eth_handle_port_txqueues(struct gt_data *d,u_int port)
1407 {
1408 gt_eth_handle_txqueue(d,&d->eth_ports[port],0); /* TX Low */
1409 gt_eth_handle_txqueue(d,&d->eth_ports[port],1); /* TX High */
1410 }
1411
1412 /* Handle all TX rings of all Ethernet ports */
1413 static int gt_eth_handle_txqueues(struct gt_data *d)
1414 {
1415 int i;
1416
1417 for(i=0;i<GT_ETH_PORTS;i++)
1418 gt_eth_handle_port_txqueues(d,i);
1419
1420 return(TRUE);
1421 }
1422
1423 /* Inverse a nibble */
1424 static const int inv_nibble[16] = {
1425 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1426 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF
1427 };
1428
1429 /* Inverse a 9-bit value */
1430 static inline u_int gt_hash_inv_9bit(u_int val)
1431 {
1432 u_int res;
1433
1434 res = inv_nibble[val & 0x0F] << 5;
1435 res |= inv_nibble[(val & 0xF0) >> 4] << 1;
1436 res |= (val & 0x100) >> 8;
1437 return(res);
1438 }
1439
1440 /*
1441 * Compute hash value for Ethernet address filtering.
1442 * Two modes are available (p.271 of the GT96100 doc).
1443 */
1444 static u_int gt_eth_hash_value(n_eth_addr_t *addr,int mode)
1445 {
1446 m_uint64_t tmp;
1447 u_int res;
1448 int i;
1449
1450 /* Swap the nibbles */
1451 for(i=0,tmp=0;i<N_ETH_ALEN;i++) {
1452 tmp <<= 8;
1453 tmp |= (inv_nibble[addr->eth_addr_byte[i] & 0x0F]) << 4;
1454 tmp |= inv_nibble[(addr->eth_addr_byte[i] & 0xF0) >> 4];
1455 }
1456
1457 if (mode == 0) {
1458 /* Fill bits 0:8 */
1459 res = (tmp & 0x00000003) | ((tmp & 0x00007f00) >> 6);
1460 res ^= (tmp & 0x00ff8000) >> 15;
1461 res ^= (tmp & 0x1ff000000ULL) >> 24;
1462
1463 /* Fill bits 9:14 */
1464 res |= (tmp & 0xfc) << 7;
1465 } else {
1466 /* Fill bits 0:8 */
1467 res = gt_hash_inv_9bit((tmp & 0x00007fc0) >> 6);
1468 res ^= gt_hash_inv_9bit((tmp & 0x00ff8000) >> 15);
1469 res ^= gt_hash_inv_9bit((tmp & 0x1ff000000ULL) >> 24);
1470
1471 /* Fill bits 9:14 */
1472 res |= (tmp & 0x3f) << 9;
1473 }
1474
1475 return(res);
1476 }
1477
1478 /*
1479 * Walk through the Ethernet hash table.
1480 */
1481 static int gt_eth_hash_lookup(struct gt_data *d,struct eth_port *port,
1482 n_eth_addr_t *addr,m_uint64_t *entry)
1483 {
1484 m_uint64_t eth_val;
1485 m_uint32_t hte_addr;
1486 u_int hash_val;
1487 int i;
1488
1489 eth_val = (m_uint64_t)addr->eth_addr_byte[0] << 3;
1490 eth_val |= (m_uint64_t)addr->eth_addr_byte[1] << 11;
1491 eth_val |= (m_uint64_t)addr->eth_addr_byte[2] << 19;
1492 eth_val |= (m_uint64_t)addr->eth_addr_byte[3] << 27;
1493 eth_val |= (m_uint64_t)addr->eth_addr_byte[4] << 35;
1494 eth_val |= (m_uint64_t)addr->eth_addr_byte[5] << 43;
1495
1496 /* Compute hash value for Ethernet address filtering */
1497 hash_val = gt_eth_hash_value(addr,port->pcr & GT_PCR_HM);
1498
1499 if (port->pcr & GT_PCR_HS) {
1500 /* 1/2K address filtering */
1501 hte_addr = port->ht_addr + ((hash_val & 0x7ff) << 3);
1502 } else {
1503 /* 8K address filtering */
1504 hte_addr = port->ht_addr + (hash_val << 3);
1505 }
1506
1507 #if DEBUG_ETH_HASH
1508 GT_LOG(d,"Hash Lookup for Ethernet address "
1509 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x: addr=0x%x\n",
1510 addr->eth_addr_byte[0], addr->eth_addr_byte[1],
1511 addr->eth_addr_byte[2], addr->eth_addr_byte[3],
1512 addr->eth_addr_byte[4], addr->eth_addr_byte[5],
1513 hte_addr);
1514 #endif
1515
1516 for(i=0;i<GT_HTE_HOPNUM;i++,hte_addr+=8) {
1517 *entry = ((m_uint64_t)physmem_copy_u32_from_vm(d->vm,hte_addr)) << 32;
1518 *entry |= physmem_copy_u32_from_vm(d->vm,hte_addr+4);
1519
1520 /* Empty entry ? */
1521 if (!(*entry & GT_HTE_VALID))
1522 return(GT_HTLOOKUP_MISS);
1523
1524 /* Skip flag or different Ethernet address: jump to next entry */
1525 if ((*entry & GT_HTE_SKIP) || ((*entry & GT_HTE_ADDR_MASK) != eth_val))
1526 continue;
1527
1528 /* We have the good MAC address in this entry */
1529 return(GT_HTLOOKUP_MATCH);
1530 }
1531
1532 return(GT_HTLOOKUP_HOP_EXCEEDED);
1533 }
1534
1535 /*
1536 * Check if a packet (given its destination address) must be handled
1537 * at RX path.
1538 *
1539 * Return values:
1540 * - 0: Discard packet ;
1541 * - 1: Receive packet ;
1542 * - 2: Receive packet and set "M" bit in RX descriptor.
1543 *
1544 * The documentation is not clear about the M bit in RX descriptor.
1545 * It is described as "Miss" or "Match" depending on the section.
1546 */
1547 static inline int gt_eth_handle_rx_daddr(struct gt_data *d,
1548 struct eth_port *port,
1549 u_int hash_res,
1550 m_uint64_t hash_entry)
1551 {
1552 /* Hop Number exceeded */
1553 if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
1554 return(1);
1555
1556 /* Match and hash entry marked as "Receive" */
1557 if ((hash_res == GT_HTLOOKUP_MATCH) && (hash_entry & GT_HTE_RD))
1558 return(2);
1559
1560 /* Miss but hash table default mode to forward ? */
1561 if ((hash_res == GT_HTLOOKUP_MISS) && (port->pcr & GT_PCR_HDM))
1562 return(2);
1563
1564 /* Promiscous Mode */
1565 if (port->pcr & GT_PCR_PM)
1566 return(1);
1567
1568 /* Drop packet for other cases */
1569 return(0);
1570 }
1571
1572 /* Put a packet in buffer of a descriptor */
1573 static void gt_eth_rxdesc_put_pkt(struct gt_data *d,struct eth_desc *rxd,
1574 u_char **pkt,ssize_t *pkt_len)
1575 {
1576 ssize_t len,cp_len;
1577
1578 len = (rxd->buf_size & GT_RXDESC_BS_MASK) >> GT_RXDESC_BS_SHIFT;
1579
1580 /* compute the data length to copy */
1581 cp_len = m_min(len,*pkt_len);
1582
1583 /* copy packet data to the VM physical RAM */
1584 physmem_copy_to_vm(d->vm,*pkt,rxd->buf_ptr,cp_len);
1585
1586 /* set the byte count in descriptor */
1587 rxd->buf_size |= cp_len;
1588
1589 *pkt += cp_len;
1590 *pkt_len -= cp_len;
1591 }
1592
1593 /* Put a packet in the specified RX queue */
1594 static int gt_eth_handle_rxqueue(struct gt_data *d,u_int port_id,u_int queue,
1595 u_char *pkt,ssize_t pkt_len)
1596 {
1597 struct eth_port *port = &d->eth_ports[port_id];
1598 m_uint32_t rx_start,rx_current;
1599 struct eth_desc rxd0,rxdn,*rxdc;
1600 ssize_t tot_len = pkt_len;
1601 u_char *pkt_ptr = pkt;
1602 n_eth_dot1q_hdr_t *hdr;
1603 m_uint64_t hash_entry;
1604 int i,hash_res,addr_action;
1605
1606 /* Truncate the packet if it is too big */
1607 pkt_len = m_min(pkt_len,GT_MAX_PKT_SIZE);
1608
1609 /* Copy the first RX descriptor */
1610 if (!(rx_start = rx_current = port->rx_start[queue]))
1611 goto dma_error;
1612
1613 /* Analyze the Ethernet header */
1614 hdr = (n_eth_dot1q_hdr_t *)pkt;
1615
1616 /* Hash table lookup for address filtering */
1617 hash_res = gt_eth_hash_lookup(d,port,&hdr->daddr,&hash_entry);
1618
1619 #if DEBUG_ETH_HASH
1620 GT_LOG(d,"Hash result: %d, hash_entry=0x%llx\n",hash_res,hash_entry);
1621 #endif
1622
1623 if (!(addr_action = gt_eth_handle_rx_daddr(d,port,hash_res,hash_entry)))
1624 return(FALSE);
1625
1626 /* Load the first RX descriptor */
1627 gt_eth_desc_read(d,rx_start,&rxd0);
1628
1629 #if DEBUG_ETH_RX
1630 GT_LOG(d,"port %u/queue %u: reading desc at 0x%8.8x "
1631 "[buf_size=0x%8.8x,cmd_stat=0x%8.8x,"
1632 "next_ptr=0x%8.8x,buf_ptr=0x%8.8x]\n",
1633 port_id,queue,rx_start,
1634 rxd0.buf_size,rxd0.cmd_stat,rxd0.next_ptr,rxd0.buf_ptr);
1635 #endif
1636
1637 for(i=0,rxdc=&rxd0;tot_len>0;i++)
1638 {
1639 /* We must own the descriptor */
1640 if (!(rxdc->cmd_stat & GT_RXDESC_OWN))
1641 goto dma_error;
1642
1643 /* Put data into the descriptor buffer */
1644 gt_eth_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
1645
1646 /* Clear the OWN bit */
1647 rxdc->cmd_stat &= ~GT_RXDESC_OWN;
1648
1649 /* We have finished if the complete packet has been stored */
1650 if (tot_len == 0) {
1651 rxdc->cmd_stat |= GT_RXDESC_L;
1652 rxdc->buf_size += 4; /* Add 4 bytes for CRC */
1653 }
1654
1655 /* Update the descriptor in host memory (but not the 1st) */
1656 if (i != 0)
1657 gt_eth_desc_write(d,rx_current,rxdc);
1658
1659 /* Get address of the next descriptor */
1660 rx_current = rxdc->next_ptr;
1661
1662 if (tot_len == 0)
1663 break;
1664
1665 if (!rx_current)
1666 goto dma_error;
1667
1668 /* Read the next descriptor from VM physical RAM */
1669 gt_eth_desc_read(d,rx_current,&rxdn);
1670 rxdc = &rxdn;
1671 }
1672
1673 /* Update the RX pointers */
1674 port->rx_start[queue] = port->rx_current[queue] = rx_current;
1675
1676 /* Update the first RX descriptor */
1677 rxd0.cmd_stat |= GT_RXDESC_F;
1678
1679 if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
1680 rxd0.cmd_stat |= GT_RXDESC_HE;
1681
1682 if (addr_action == 2)
1683 rxd0.cmd_stat |= GT_RXDESC_M;
1684
1685 if (ntohs(hdr->type) <= N_ETH_MTU) /* 802.3 frame */
1686 rxd0.cmd_stat |= GT_RXDESC_FT;
1687
1688 gt_eth_desc_write(d,rx_start,&rxd0);
1689
1690 /* Update MIB counters */
1691 port->rx_bytes += pkt_len;
1692 port->rx_frames++;
1693
1694 /* Indicate that we have a frame ready */
1695 port->icr |= (GT_ICR_RXBUFQ0 << queue) | GT_ICR_RXBUF;
1696 gt_eth_update_int_status(d,port);
1697 return(TRUE);
1698
1699 dma_error:
1700 port->icr |= (GT_ICR_RXERRQ0 << queue) | GT_ICR_RXERR;
1701 gt_eth_update_int_status(d,port);
1702 return(FALSE);
1703 }
1704
1705 /* Handle RX packet for an Ethernet port */
1706 static int gt_eth_handle_rx_pkt(netio_desc_t *nio,
1707 u_char *pkt,ssize_t pkt_len,
1708 struct gt_data *d,void *arg)
1709 {
1710 u_int queue,port_id = (int)arg;
1711 struct eth_port *port;
1712
1713 port = &d->eth_ports[port_id];
1714
1715 /* Check if RX DMA is active */
1716 if (!(port->sdcmr & GT_SDCMR_ERD))
1717 return(FALSE);
1718
1719 queue = 0; /* At this time, only put packet in queue 0 */
1720 gt_eth_handle_rxqueue(d,port_id,queue,pkt,pkt_len);
1721 return(TRUE);
1722 }
1723
1724 /* Shutdown a GT system controller */
1725 void dev_gt_shutdown(vm_instance_t *vm,struct gt_data *d)
1726 {
1727 if (d != NULL) {
1728 /* Stop the TX ring scanner */
1729 ptask_remove(d->eth_tx_tid);
1730
1731 /* Remove the device */
1732 dev_remove(vm,&d->dev);
1733
1734 /* Remove the PCI device */
1735 pci_dev_remove(d->pci_dev);
1736
1737 /* Free the structure itself */
1738 free(d);
1739 }
1740 }
1741
1742 /* Create a new GT64010 controller */
1743 int dev_gt64010_init(vm_instance_t *vm,char *name,
1744 m_uint64_t paddr,m_uint32_t len,u_int irq)
1745 {
1746 struct gt_data *d;
1747
1748 if (!(d = malloc(sizeof(*d)))) {
1749 fprintf(stderr,"gt64010: unable to create device data.\n");
1750 return(-1);
1751 }
1752
1753 memset(d,0,sizeof(*d));
1754 d->vm = vm;
1755 d->bus[0] = vm->pci_bus[0];
1756
1757 vm_object_init(&d->vm_obj);
1758 d->vm_obj.name = name;
1759 d->vm_obj.data = d;
1760 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1761
1762 dev_init(&d->dev);
1763 d->dev.name = name;
1764 d->dev.priv_data = d;
1765 d->dev.phys_addr = paddr;
1766 d->dev.phys_len = len;
1767 d->dev.handler = dev_gt64010_access;
1768
1769 /* Add the controller as a PCI device */
1770 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1771 d->pci_dev = pci_dev_add(d->bus[0],name,
1772 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64010,
1773 0,0,irq,d,NULL,NULL,NULL);
1774
1775 if (!d->pci_dev) {
1776 fprintf(stderr,"gt64010: unable to create PCI device.\n");
1777 return(-1);
1778 }
1779 }
1780
1781 /* Map this device to the VM */
1782 vm_bind_device(vm,&d->dev);
1783 vm_object_add(vm,&d->vm_obj);
1784 return(0);
1785 }
1786
1787 /*
1788 * pci_gt64120_read()
1789 *
1790 * Read a PCI register.
1791 */
1792 static m_uint32_t pci_gt64120_read(cpu_gen_t *cpu,struct pci_device *dev,
1793 int reg)
1794 {
1795 switch (reg) {
1796 case 0x08:
1797 return(0x03008005);
1798 default:
1799 return(0);
1800 }
1801 }
1802
1803 /* Create a new GT64120 controller */
1804 int dev_gt64120_init(vm_instance_t *vm,char *name,
1805 m_uint64_t paddr,m_uint32_t len,u_int irq)
1806 {
1807 struct gt_data *d;
1808
1809 if (!(d = malloc(sizeof(*d)))) {
1810 fprintf(stderr,"gt64120: unable to create device data.\n");
1811 return(-1);
1812 }
1813
1814 memset(d,0,sizeof(*d));
1815 d->vm = vm;
1816 d->bus[0] = vm->pci_bus[0];
1817 d->bus[1] = vm->pci_bus[1];
1818
1819 vm_object_init(&d->vm_obj);
1820 d->vm_obj.name = name;
1821 d->vm_obj.data = d;
1822 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1823
1824 dev_init(&d->dev);
1825 d->dev.name = name;
1826 d->dev.priv_data = d;
1827 d->dev.phys_addr = paddr;
1828 d->dev.phys_len = len;
1829 d->dev.handler = dev_gt64120_access;
1830
1831 /* Add the controller as a PCI device */
1832 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1833 d->pci_dev = pci_dev_add(d->bus[0],name,
1834 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64120,
1835 0,0,irq,d,NULL,pci_gt64120_read,NULL);
1836 if (!d->pci_dev) {
1837 fprintf(stderr,"gt64120: unable to create PCI device.\n");
1838 return(-1);
1839 }
1840 }
1841
1842 /* Map this device to the VM */
1843 vm_bind_device(vm,&d->dev);
1844 vm_object_add(vm,&d->vm_obj);
1845 return(0);
1846 }
1847
1848 /*
1849 * pci_gt96100_read()
1850 *
1851 * Read a PCI register.
1852 */
1853 static m_uint32_t pci_gt96100_read(cpu_gen_t *cpu,struct pci_device *dev,
1854 int reg)
1855 {
1856 switch (reg) {
1857 case 0x08:
1858 return(0x03008005);
1859 default:
1860 return(0);
1861 }
1862 }
1863
1864 /* Create a new GT96100 controller */
1865 int dev_gt96100_init(vm_instance_t *vm,char *name,
1866 m_uint64_t paddr,m_uint32_t len,
1867 u_int dma_irq,u_int eth_irq)
1868 {
1869 struct gt_data *d;
1870
1871 if (!(d = malloc(sizeof(*d)))) {
1872 fprintf(stderr,"gt96100: unable to create device data.\n");
1873 return(-1);
1874 }
1875
1876 memset(d,0,sizeof(*d));
1877 d->name = name;
1878 d->vm = vm;
1879 d->eth_irq = eth_irq;
1880 d->bus[0] = vm->pci_bus[0];
1881 d->bus[1] = vm->pci_bus[1];
1882
1883 vm_object_init(&d->vm_obj);
1884 d->vm_obj.name = name;
1885 d->vm_obj.data = d;
1886 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1887
1888 dev_init(&d->dev);
1889 d->dev.name = name;
1890 d->dev.priv_data = d;
1891 d->dev.phys_addr = paddr;
1892 d->dev.phys_len = len;
1893 d->dev.handler = dev_gt96100_access;
1894
1895 /* Add the controller as a PCI device */
1896 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1897 d->pci_dev = pci_dev_add(d->bus[0],name,
1898 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT96100,
1899 0,0,dma_irq,d,NULL,pci_gt96100_read,NULL);
1900 if (!d->pci_dev) {
1901 fprintf(stderr,"gt96100: unable to create PCI device.\n");
1902 return(-1);
1903 }
1904 }
1905
1906 /* Start the TX ring scanner */
1907 d->eth_tx_tid = ptask_add((ptask_callback)gt_eth_handle_txqueues,d,NULL);
1908
1909 /* Map this device to the VM */
1910 vm_bind_device(vm,&d->dev);
1911 vm_object_add(vm,&d->vm_obj);
1912 return(0);
1913 }
1914
1915 /* Bind a NIO to GT96100 device */
1916 int dev_gt96100_set_nio(struct gt_data *d,u_int port_id,netio_desc_t *nio)
1917 {
1918 struct eth_port *port;
1919
1920 if (port_id >= GT_ETH_PORTS)
1921 return(-1);
1922
1923 port = &d->eth_ports[port_id];
1924
1925 /* check that a NIO is not already bound */
1926 if (port->nio != NULL)
1927 return(-1);
1928
1929 port->nio = nio;
1930 netio_rxl_add(nio,(netio_rx_handler_t)gt_eth_handle_rx_pkt,
1931 d,(void *)port_id);
1932 return(0);
1933 }
1934
1935 /* Unbind a NIO from a GT96100 device */
1936 int dev_gt96100_unset_nio(struct gt_data *d,u_int port_id)
1937 {
1938 struct eth_port *port;
1939
1940 if (port_id >= GT_ETH_PORTS)
1941 return(-1);
1942
1943 port = &d->eth_ports[port_id];
1944
1945 if (port->nio != NULL) {
1946 netio_rxl_remove(port->nio);
1947 port->nio = NULL;
1948 }
1949
1950 return(0);
1951 }

  ViewVC Help
Powered by ViewVC 1.1.26