/[dynamips]/upstream/dynamips-0.2.7-RC2/dev_gt.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.7-RC2/dev_gt.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 8 - (show annotations)
Sat Oct 6 16:24:54 2007 UTC (12 years, 1 month ago) by dpavlin
File MIME type: text/plain
File size: 57458 byte(s)
dynamips-0.2.7-RC2

1 /*
2 * Cisco router simulation platform.
3 * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4 *
5 * Galileo GT64010/GT64120A/GT96100A system controller.
6 *
7 * The DMA stuff is not complete, only "normal" transfers are working
8 * (source and destination addresses incrementing).
9 *
10 * Also, these transfers are "instantaneous" from a CPU point-of-view: when
11 * a channel is enabled, the transfer is immediately done. So, this is not
12 * very realistic.
13 */
14
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18
19 #include "utils.h"
20 #include "net.h"
21 #include "cpu.h"
22 #include "vm.h"
23 #include "dynamips.h"
24 #include "memory.h"
25 #include "device.h"
26 #include "net_io.h"
27 #include "ptask.h"
28 #include "dev_gt.h"
29
30 /* Debugging flags */
31 #define DEBUG_UNKNOWN 0
32 #define DEBUG_DMA 0
33 #define DEBUG_MII 0
34 #define DEBUG_ETH_TX 0
35 #define DEBUG_ETH_RX 0
36 #define DEBUG_ETH_HASH 0
37
38 /* PCI identification */
39 #define PCI_VENDOR_GALILEO 0x11ab /* Galileo Technology */
40 #define PCI_PRODUCT_GALILEO_GT64010 0x0146 /* GT-64010 */
41 #define PCI_PRODUCT_GALILEO_GT64011 0x4146 /* GT-64011 */
42 #define PCI_PRODUCT_GALILEO_GT64120 0x4620 /* GT-64120 */
43 #define PCI_PRODUCT_GALILEO_GT96100 0x9653 /* GT-96100 */
44
45 /* === Global definitions === */
46
47 /* Interrupt High Cause Register */
48 #define GT_IHCR_ETH0_SUM 0x00000001
49 #define GT_IHCR_ETH1_SUM 0x00000002
50 #define GT_IHCR_SDMA_SUM 0x00000010
51
52 /* Serial Cause Register */
53 #define GT_SCR_ETH0_SUM 0x00000001
54 #define GT_SCR_ETH1_SUM 0x00000002
55 #define GT_SCR_SDMA_SUM 0x00000010
56
57 /* === DMA definitions === */
58 #define GT_DMA_CHANNELS 4
59
60 #define GT_DMA_FLYBY_ENABLE 0x00000001 /* FlyBy Enable */
61 #define GT_DMA_FLYBY_RDWR 0x00000002 /* SDRAM Read/Write (FlyBy) */
62 #define GT_DMA_SRC_DIR 0x0000000c /* Source Direction */
63 #define GT_DMA_DST_DIR 0x00000030 /* Destination Direction */
64 #define GT_DMA_DATA_LIMIT 0x000001c0 /* Data Transfer Limit */
65 #define GT_DMA_CHAIN_MODE 0x00000200 /* Chained Mode */
66 #define GT_DMA_INT_MODE 0x00000400 /* Interrupt Mode */
67 #define GT_DMA_TRANS_MODE 0x00000800 /* Transfer Mode */
68 #define GT_DMA_CHAN_ENABLE 0x00001000 /* Channel Enable */
69 #define GT_DMA_FETCH_NEXT 0x00002000 /* Fetch Next Record */
70 #define GT_DMA_ACT_STATUS 0x00004000 /* DMA Activity Status */
71 #define GT_DMA_SDA 0x00008000 /* Source/Destination Alignment */
72 #define GT_DMA_MDREQ 0x00010000 /* Mask DMA Requests */
73 #define GT_DMA_CDE 0x00020000 /* Close Descriptor Enable */
74 #define GT_DMA_EOTE 0x00040000 /* End-of-Transfer (EOT) Enable */
75 #define GT_DMA_EOTIE 0x00080000 /* EOT Interrupt Enable */
76 #define GT_DMA_ABORT 0x00100000 /* Abort DMA Transfer */
77 #define GT_DMA_SLP 0x00600000 /* Override Source Address */
78 #define GT_DMA_DLP 0x01800000 /* Override Dest Address */
79 #define GT_DMA_RLP 0x06000000 /* Override Record Address */
80 #define GT_DMA_REQ_SRC 0x10000000 /* DMA Request Source */
81
82 /* Galileo DMA channel */
83 struct dma_channel {
84 m_uint32_t byte_count;
85 m_uint32_t src_addr;
86 m_uint32_t dst_addr;
87 m_uint32_t cdptr;
88 m_uint32_t nrptr;
89 m_uint32_t ctrl;
90 };
91
92 /* === Ethernet definitions === */
93 #define GT_ETH_PORTS 2
94 #define GT_MAX_PKT_SIZE 2048
95
96 /* SMI register */
97 #define GT_SMIR_DATA_MASK 0x0000FFFF
98 #define GT_SMIR_PHYAD_MASK 0x001F0000 /* PHY Device Address */
99 #define GT_SMIR_PHYAD_SHIFT 16
100 #define GT_SMIR_REGAD_MASK 0x03e00000 /* PHY Device Register Address */
101 #define GT_SMIR_REGAD_SHIFT 21
102 #define GT_SMIR_OPCODE_MASK 0x04000000 /* Opcode (0: write, 1: read) */
103 #define GT_SMIR_OPCODE_READ 0x04000000
104 #define GT_SMIR_RVALID_FLAG 0x08000000 /* Read Valid */
105 #define GT_SMIR_BUSY_FLAG 0x10000000 /* Busy: 1=op in progress */
106
107 /* PCR: Port Configuration Register */
108 #define GT_PCR_PM 0x00000001 /* Promiscuous mode */
109 #define GT_PCR_RBM 0x00000002 /* Reject broadcast mode */
110 #define GT_PCR_PBF 0x00000004 /* Pass bad frames */
111 #define GT_PCR_EN 0x00000080 /* Port Enabled/Disabled */
112 #define GT_PCR_LPBK 0x00000300 /* Loopback mode */
113 #define GT_PCR_FC 0x00000400 /* Force collision */
114 #define GT_PCR_HS 0x00001000 /* Hash size */
115 #define GT_PCR_HM 0x00002000 /* Hash mode */
116 #define GT_PCR_HDM 0x00004000 /* Hash default mode */
117 #define GT_PCR_HD 0x00008000 /* Duplex Mode */
118 #define GT_PCR_ISL 0x70000000 /* ISL enabled (0x06) */
119 #define GT_PCR_ACCS 0x80000000 /* Accelerate Slot Time */
120
121 /* PCXR: Port Configuration Extend Register */
122 #define GT_PCXR_IGMP 0x00000001 /* IGMP packet capture */
123 #define GT_PCXR_SPAN 0x00000002 /* BPDU packet capture */
124 #define GT_PCXR_PAR 0x00000004 /* Partition Enable */
125 #define GT_PCXR_PRIOTX 0x00000038 /* Priority weight for TX */
126 #define GT_PCXR_PRIORX 0x000000C0 /* Priority weight for RX */
127 #define GT_PCXR_PRIORX_OV 0x00000100 /* Prio RX override */
128 #define GT_PCXR_DPLX_EN 0x00000200 /* Autoneg for Duplex */
129 #define GT_PCXR_FCTL_EN 0x00000400 /* Autoneg for 802.3x */
130 #define GT_PCXR_FLP 0x00000800 /* Force Link Pass */
131 #define GT_PCXR_FCTL 0x00001000 /* Flow Control Mode */
132 #define GT_PCXR_MFL 0x0000C000 /* Maximum Frame Length */
133 #define GT_PCXR_MIB_CLR_MODE 0x00010000 /* MIB counters clear mode */
134 #define GT_PCXR_SPEED 0x00040000 /* Port Speed */
135 #define GT_PCXR_SPEED_EN 0x00080000 /* Autoneg for Speed */
136 #define GT_PCXR_RMII_EN 0x00100000 /* RMII Enable */
137 #define GT_PCXR_DSCP_EN 0x00200000 /* DSCP decoding enable */
138
139 /* PCMR: Port Command Register */
140 #define GT_PCMR_FJ 0x00008000 /* Force Jam / Flow Control */
141
142 /* PSR: Port Status Register */
143 #define GT_PSR_SPEED 0x00000001 /* Speed: 10/100 Mb/s (100=>1)*/
144 #define GT_PSR_DUPLEX 0x00000002 /* Duplex (1: full) */
145 #define GT_PSR_FCTL 0x00000004 /* Flow Control Mode */
146 #define GT_PSR_LINK 0x00000008 /* Link Up/Down */
147 #define GT_PSR_PAUSE 0x00000010 /* Flow-control disabled state */
148 #define GT_PSR_TXLOW 0x00000020 /* TX Low priority status */
149 #define GT_PSR_TXHIGH 0x00000040 /* TX High priority status */
150 #define GT_PSR_TXINP 0x00000080 /* TX in Progress */
151
152 /* SDCR: SDMA Configuration Register */
153 #define GT_SDCR_RC 0x0000003c /* Retransmit count */
154 #define GT_SDCR_BLMR 0x00000040 /* Big/Little Endian RX mode */
155 #define GT_SDCR_BLMT 0x00000080 /* Big/Litlle Endian TX mode */
156 #define GT_SDCR_POVR 0x00000100 /* PCI override */
157 #define GT_SDCR_RIFB 0x00000200 /* RX IRQ on frame boundary */
158 #define GT_SDCR_BSZ 0x00003000 /* Burst size */
159
160 /* SDCMR: SDMA Command Register */
161 #define GT_SDCMR_ERD 0x00000080 /* Enable RX DMA */
162 #define GT_SDCMR_AR 0x00008000 /* Abort Receive */
163 #define GT_SDCMR_STDH 0x00010000 /* Stop TX High */
164 #define GT_SDCMR_STDL 0x00020000 /* Stop TX Low */
165 #define GT_SDCMR_TXDH 0x00800000 /* Start TX High */
166 #define GT_SDCMR_TXDL 0x01000000 /* Start TX Low */
167 #define GT_SDCMR_AT 0x80000000 /* Abort Transmit */
168
169 /* ICR: Interrupt Cause Register */
170 #define GT_ICR_RXBUF 0x00000001 /* RX Buffer returned to host */
171 #define GT_ICR_TXBUFH 0x00000004 /* TX Buffer High */
172 #define GT_ICR_TXBUFL 0x00000008 /* TX Buffer Low */
173 #define GT_ICR_TXENDH 0x00000040 /* TX End High */
174 #define GT_ICR_TXENDL 0x00000080 /* TX End Low */
175 #define GT_ICR_RXERR 0x00000100 /* RX Error */
176 #define GT_ICR_TXERRH 0x00000400 /* TX Error High */
177 #define GT_ICR_TXERRL 0x00000800 /* TX Error Low */
178 #define GT_ICR_RXOVR 0x00001000 /* RX Overrun */
179 #define GT_ICR_TXUDR 0x00002000 /* TX Underrun */
180 #define GT_ICR_RXBUFQ0 0x00010000 /* RX Buffer in Prio Queue 0 */
181 #define GT_ICR_RXBUFQ1 0x00020000 /* RX Buffer in Prio Queue 1 */
182 #define GT_ICR_RXBUFQ2 0x00040000 /* RX Buffer in Prio Queue 2 */
183 #define GT_ICR_RXBUFQ3 0x00080000 /* RX Buffer in Prio Queue 3 */
184 #define GT_ICR_RXERRQ0 0x00010000 /* RX Error in Prio Queue 0 */
185 #define GT_ICR_RXERRQ1 0x00020000 /* RX Error in Prio Queue 1 */
186 #define GT_ICR_RXERRQ2 0x00040000 /* RX Error in Prio Queue 2 */
187 #define GT_ICR_RXERRQ3 0x00080000 /* RX Error in Prio Queue 3 */
188 #define GT_ICR_MII_STC 0x10000000 /* MII PHY Status Change */
189 #define GT_ICR_SMI_DONE 0x20000000 /* SMI Command Done */
190 #define GT_ICR_INT_SUM 0x80000000 /* Ethernet Interrupt Summary */
191 #define GT_ICR_MASK 0x7FFFFFFF
192
193 /* Ethernet hash entry */
194 #define GT_HTE_VALID 0x00000001 /* Valid entry */
195 #define GT_HTE_SKIP 0x00000002 /* Skip entry in a chain */
196 #define GT_HTE_RD 0x00000004 /* 0: Discard, 1: Receive */
197 #define GT_HTE_ADDR_MASK 0x7fffffffffff8ULL
198
199 #define GT_HTE_HOPNUM 12 /* Hash Table Hop Number */
200
201 enum {
202 GT_HTLOOKUP_MISS,
203 GT_HTLOOKUP_MATCH,
204 GT_HTLOOKUP_HOP_EXCEEDED,
205 };
206
207 /* TX Descriptor */
208 #define GT_TXDESC_OWN 0x80000000 /* Ownership */
209 #define GT_TXDESC_AM 0x40000000 /* Auto-mode */
210 #define GT_TXDESC_EI 0x00800000 /* Enable Interrupt */
211 #define GT_TXDESC_GC 0x00400000 /* Generate CRC */
212 #define GT_TXDESC_P 0x00040000 /* Padding */
213 #define GT_TXDESC_F 0x00020000 /* First buffer of packet */
214 #define GT_TXDESC_L 0x00010000 /* Last buffer of packet */
215 #define GT_TXDESC_ES 0x00008000 /* Error Summary */
216 #define GT_TXDESC_RC 0x00003c00 /* Retransmit Count */
217 #define GT_TXDESC_COL 0x00000200 /* Collision */
218 #define GT_TXDESC_RL 0x00000100 /* Retransmit Limit Error */
219 #define GT_TXDESC_UR 0x00000040 /* Underrun Error */
220 #define GT_TXDESC_LC 0x00000020 /* Late Collision Error */
221
222 #define GT_TXDESC_BC_MASK 0xFFFF0000 /* Number of bytes to transmit */
223 #define GT_TXDESC_BC_SHIFT 16
224
225 /* RX Descriptor */
226 #define GT_RXDESC_OWN 0x80000000 /* Ownership */
227 #define GT_RXDESC_AM 0x40000000 /* Auto-mode */
228 #define GT_RXDESC_EI 0x00800000 /* Enable Interrupt */
229 #define GT_RXDESC_F 0x00020000 /* First buffer of packet */
230 #define GT_RXDESC_L 0x00010000 /* Last buffer of packet */
231 #define GT_RXDESC_ES 0x00008000 /* Error Summary */
232 #define GT_RXDESC_IGMP 0x00004000 /* IGMP packet detected */
233 #define GT_RXDESC_HE 0x00002000 /* Hash Table Expired */
234 #define GT_RXDESC_M 0x00001000 /* Missed Frame */
235 #define GT_RXDESC_FT 0x00000800 /* Frame Type (802.3/Ethernet) */
236 #define GT_RXDESC_SF 0x00000100 /* Short Frame Error */
237 #define GT_RXDESC_MFL 0x00000080 /* Maximum Frame Length Error */
238 #define GT_RXDESC_OR 0x00000040 /* Overrun Error */
239 #define GT_RXDESC_COL 0x00000010 /* Collision */
240 #define GT_RXDESC_CE 0x00000001 /* CRC Error */
241
242 #define GT_RXDESC_BC_MASK 0x0000FFFF /* Byte count */
243 #define GT_RXDESC_BS_MASK 0xFFFF0000 /* Buffer size */
244 #define GT_RXDESC_BS_SHIFT 16
245
246 /* RX/TX descriptor */
247 struct eth_desc {
248 m_uint32_t buf_size;
249 m_uint32_t cmd_stat;
250 m_uint32_t next_ptr;
251 m_uint32_t buf_ptr;
252 };
253
254 /* Galileo Ethernet port */
255 struct eth_port {
256 netio_desc_t *nio;
257
258 /* First and Current RX descriptors (4 queues) */
259 m_uint32_t rx_start[4],rx_current[4];
260
261 /* Current TX descriptors (2 queues) */
262 m_uint32_t tx_current[2];
263
264 /* Port registers */
265 m_uint32_t pcr,pcxr,pcmr,psr;
266
267 /* SDMA registers */
268 m_uint32_t sdcr,sdcmr;
269
270 /* Interrupt register */
271 m_uint32_t icr,imr;
272
273 /* Hash Table pointer */
274 m_uint32_t ht_addr;
275
276 /* Ethernet MIB counters */
277 m_uint32_t rx_bytes,tx_bytes,rx_frames,tx_frames;
278 };
279
280 /* Galileo GT64xxx/GT96xxx system controller */
281 struct gt_data {
282 char *name;
283 vm_obj_t vm_obj;
284 struct vdevice dev;
285 struct pci_device *pci_dev;
286 vm_instance_t *vm;
287
288 struct pci_bus *bus[2];
289 struct dma_channel dma[GT_DMA_CHANNELS];
290 m_uint32_t int_cause_reg;
291 m_uint32_t int_mask_reg;
292
293 /* Ethernet ports (GT-96100) */
294 u_int eth_irq;
295 ptask_id_t eth_tx_tid;
296 struct eth_port eth_ports[GT_ETH_PORTS];
297 m_uint32_t smi_reg;
298 m_uint16_t mii_regs[32][32];
299 };
300
301 /* Log a GT message */
302 #define GT_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
303
304 /* Update the interrupt status */
305 static void gt_update_irq_status(struct gt_data *gt_data)
306 {
307 if (gt_data->pci_dev) {
308 if (gt_data->int_cause_reg & gt_data->int_mask_reg)
309 pci_dev_trigger_irq(gt_data->vm,gt_data->pci_dev);
310 else
311 pci_dev_clear_irq(gt_data->vm,gt_data->pci_dev);
312 }
313 }
314
315 /* Fetch a DMA record (chained mode) */
316 static void gt_dma_fetch_rec(vm_instance_t *vm,struct dma_channel *channel)
317 {
318 m_uint32_t ptr;
319
320 #if DEBUG_DMA
321 vm_log(vm,"GT_DMA","fetching record at address 0x%x\n",channel->nrptr);
322 #endif
323
324 /* fetch the record from RAM */
325 ptr = channel->nrptr;
326 channel->byte_count = swap32(physmem_copy_u32_from_vm(vm,ptr));
327 channel->src_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x04));
328 channel->dst_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x08));
329 channel->nrptr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x0c));
330
331 /* clear the "fetch next record bit" */
332 channel->ctrl &= ~GT_DMA_FETCH_NEXT;
333 }
334
335 /* Handle control register of a DMA channel */
336 static void gt_dma_handle_ctrl(struct gt_data *gt_data,int chan_id)
337 {
338 struct dma_channel *channel = &gt_data->dma[chan_id];
339 vm_instance_t *vm = gt_data->vm;
340 int done;
341
342 if (channel->ctrl & GT_DMA_FETCH_NEXT) {
343 if (channel->nrptr == 0) {
344 vm_log(vm,"GT_DMA","trying to load a NULL DMA record...\n");
345 return;
346 }
347
348 gt_dma_fetch_rec(vm,channel);
349 }
350
351 if (channel->ctrl & GT_DMA_CHAN_ENABLE)
352 {
353 do {
354 done = TRUE;
355
356 #if DEBUG_DMA
357 vm_log(vm,"GT_DMA",
358 "starting transfer from 0x%x to 0x%x (size=%u bytes)\n",
359 channel->src_addr,channel->dst_addr,
360 channel->byte_count & 0xFFFF);
361 #endif
362 physmem_dma_transfer(vm,channel->src_addr,channel->dst_addr,
363 channel->byte_count & 0xFFFF);
364
365 /* chained mode */
366 if (!(channel->ctrl & GT_DMA_CHAIN_MODE)) {
367 if (channel->nrptr) {
368 gt_dma_fetch_rec(vm,channel);
369 done = FALSE;
370 }
371 }
372 }while(!done);
373
374 #if DEBUG_DMA
375 vm_log(vm,"GT_DMA","finished transfer.\n");
376 #endif
377 /* Trigger DMA interrupt */
378 gt_data->int_cause_reg |= 1 << (4 + chan_id);
379 gt_update_irq_status(gt_data);
380 }
381 }
382
383 #define DMA_REG(ch,reg_name) \
384 if (op_type == MTS_WRITE) \
385 gt_data->dma[ch].reg_name = swap32(*data); \
386 else \
387 *data = swap32(gt_data->dma[ch].reg_name);
388
389 /* Handle a DMA channel */
390 static int gt_dma_access(cpu_gen_t *cpu,struct vdevice *dev,
391 m_uint32_t offset,u_int op_size,u_int op_type,
392 m_uint64_t *data)
393 {
394 struct gt_data *gt_data = dev->priv_data;
395
396 switch(offset) {
397 /* DMA Source Address */
398 case 0x810: DMA_REG(0,src_addr); return(1);
399 case 0x814: DMA_REG(1,src_addr); return(1);
400 case 0x818: DMA_REG(2,src_addr); return(1);
401 case 0x81c: DMA_REG(3,src_addr); return(1);
402
403 /* DMA Destination Address */
404 case 0x820: DMA_REG(0,dst_addr); return(1);
405 case 0x824: DMA_REG(1,dst_addr); return(1);
406 case 0x828: DMA_REG(2,dst_addr); return(1);
407 case 0x82c: DMA_REG(3,dst_addr); return(1);
408
409 /* DMA Next Record Pointer */
410 case 0x830:
411 gt_data->dma[0].cdptr = *data;
412 DMA_REG(0,nrptr);
413 return(1);
414
415 case 0x834:
416 gt_data->dma[1].cdptr = *data;
417 DMA_REG(1,nrptr);
418 return(1);
419
420 case 0x838:
421 gt_data->dma[2].cdptr = *data;
422 DMA_REG(2,nrptr);
423 return(1);
424
425 case 0x83c:
426 gt_data->dma[3].cdptr = *data;
427 DMA_REG(3,nrptr);
428 return(1);
429
430 /* DMA Channel Control */
431 case 0x840:
432 DMA_REG(0,ctrl);
433 if (op_type == MTS_WRITE)
434 gt_dma_handle_ctrl(gt_data,0);
435 return(1);
436
437 case 0x844:
438 DMA_REG(1,ctrl);
439 if (op_type == MTS_WRITE)
440 gt_dma_handle_ctrl(gt_data,1);
441 return(1);
442
443 case 0x848:
444 DMA_REG(2,ctrl);
445 if (op_type == MTS_WRITE)
446 gt_dma_handle_ctrl(gt_data,2);
447 return(1);
448
449 case 0x84c:
450 DMA_REG(3,ctrl);
451 if (op_type == MTS_WRITE)
452 gt_dma_handle_ctrl(gt_data,3);
453 return(1);
454 }
455
456 return(0);
457 }
458
459 /*
460 * dev_gt64010_access()
461 */
462 void *dev_gt64010_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
463 u_int op_size,u_int op_type,m_uint64_t *data)
464 {
465 struct gt_data *gt_data = dev->priv_data;
466
467 if (op_type == MTS_READ)
468 *data = 0;
469
470 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
471 return NULL;
472
473 switch(offset) {
474 /* ===== DRAM Settings (completely faked, 128 Mb) ===== */
475 case 0x008: /* ras10_low */
476 if (op_type == MTS_READ)
477 *data = swap32(0x000);
478 break;
479 case 0x010: /* ras10_high */
480 if (op_type == MTS_READ)
481 *data = swap32(0x7F);
482 break;
483 case 0x018: /* ras32_low */
484 if (op_type == MTS_READ)
485 *data = swap32(0x080);
486 break;
487 case 0x020: /* ras32_high */
488 if (op_type == MTS_READ)
489 *data = swap32(0x7F);
490 break;
491 case 0x400: /* ras0_low */
492 if (op_type == MTS_READ)
493 *data = swap32(0x00);
494 break;
495 case 0x404: /* ras0_high */
496 if (op_type == MTS_READ)
497 *data = swap32(0xFF);
498 break;
499 case 0x408: /* ras1_low */
500 if (op_type == MTS_READ)
501 *data = swap32(0x7F);
502 break;
503 case 0x40c: /* ras1_high */
504 if (op_type == MTS_READ)
505 *data = swap32(0x00);
506 break;
507 case 0x410: /* ras2_low */
508 if (op_type == MTS_READ)
509 *data = swap32(0x00);
510 break;
511 case 0x414: /* ras2_high */
512 if (op_type == MTS_READ)
513 *data = swap32(0xFF);
514 break;
515 case 0x418: /* ras3_low */
516 if (op_type == MTS_READ)
517 *data = swap32(0x7F);
518 break;
519 case 0x41c: /* ras3_high */
520 if (op_type == MTS_READ)
521 *data = swap32(0x00);
522 break;
523 case 0xc08: /* pci0_cs10 */
524 if (op_type == MTS_READ)
525 *data = swap32(0xFFF);
526 break;
527 case 0xc0c: /* pci0_cs32 */
528 if (op_type == MTS_READ)
529 *data = swap32(0xFFF);
530 break;
531
532 case 0xc00: /* pci_cmd */
533 if (op_type == MTS_READ)
534 *data = swap32(0x00008001);
535 break;
536
537 /* ===== Interrupt Cause Register ===== */
538 case 0xc18:
539 if (op_type == MTS_READ) {
540 *data = swap32(gt_data->int_cause_reg);
541 } else {
542 gt_data->int_cause_reg &= swap32(*data);
543 gt_update_irq_status(gt_data);
544 }
545 break;
546
547 /* ===== Interrupt Mask Register ===== */
548 case 0xc1c:
549 if (op_type == MTS_READ)
550 *data = swap32(gt_data->int_mask_reg);
551 else {
552 gt_data->int_mask_reg = swap32(*data);
553 gt_update_irq_status(gt_data);
554 }
555 break;
556
557 /* ===== PCI Configuration ===== */
558 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
559 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
560 break;
561
562 case PCI_BUS_DATA: /* pci data address (0xcfc) */
563 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
564 break;
565
566 #if DEBUG_UNKNOWN
567 default:
568 if (op_type == MTS_READ) {
569 cpu_log(cpu,"GT64010","read from addr 0x%x, pc=0x%llx\n",
570 offset,cpu_get_pc(cpu));
571 } else {
572 cpu_log(cpu,"GT64010","write to addr 0x%x, value=0x%llx, "
573 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
574 }
575 #endif
576 }
577
578 return NULL;
579 }
580
581 /*
582 * dev_gt64120_access()
583 */
584 void *dev_gt64120_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
585 u_int op_size,u_int op_type,m_uint64_t *data)
586 {
587 struct gt_data *gt_data = dev->priv_data;
588
589 if (op_type == MTS_READ)
590 *data = 0;
591
592 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
593 return NULL;
594
595 switch(offset) {
596 case 0x008: /* ras10_low */
597 if (op_type == MTS_READ)
598 *data = swap32(0x000);
599 break;
600 case 0x010: /* ras10_high */
601 if (op_type == MTS_READ)
602 *data = swap32(0x7F);
603 break;
604 case 0x018: /* ras32_low */
605 if (op_type == MTS_READ)
606 *data = swap32(0x100);
607 break;
608 case 0x020: /* ras32_high */
609 if (op_type == MTS_READ)
610 *data = swap32(0x7F);
611 break;
612 case 0x400: /* ras0_low */
613 if (op_type == MTS_READ)
614 *data = swap32(0x00);
615 break;
616 case 0x404: /* ras0_high */
617 if (op_type == MTS_READ)
618 *data = swap32(0xFF);
619 break;
620 case 0x408: /* ras1_low */
621 if (op_type == MTS_READ)
622 *data = swap32(0x7F);
623 break;
624 case 0x40c: /* ras1_high */
625 if (op_type == MTS_READ)
626 *data = swap32(0x00);
627 break;
628 case 0x410: /* ras2_low */
629 if (op_type == MTS_READ)
630 *data = swap32(0x00);
631 break;
632 case 0x414: /* ras2_high */
633 if (op_type == MTS_READ)
634 *data = swap32(0xFF);
635 break;
636 case 0x418: /* ras3_low */
637 if (op_type == MTS_READ)
638 *data = swap32(0x7F);
639 break;
640 case 0x41c: /* ras3_high */
641 if (op_type == MTS_READ)
642 *data = swap32(0x00);
643 break;
644 case 0xc08: /* pci0_cs10 */
645 if (op_type == MTS_READ)
646 *data = swap32(0xFFF);
647 break;
648 case 0xc0c: /* pci0_cs32 */
649 if (op_type == MTS_READ)
650 *data = swap32(0xFFF);
651 break;
652
653 case 0xc00: /* pci_cmd */
654 if (op_type == MTS_READ)
655 *data = swap32(0x00008001);
656 break;
657
658 /* ===== Interrupt Cause Register ===== */
659 case 0xc18:
660 if (op_type == MTS_READ)
661 *data = swap32(gt_data->int_cause_reg);
662 else {
663 gt_data->int_cause_reg &= swap32(*data);
664 gt_update_irq_status(gt_data);
665 }
666 break;
667
668 /* ===== Interrupt Mask Register ===== */
669 case 0xc1c:
670 if (op_type == MTS_READ) {
671 *data = swap32(gt_data->int_mask_reg);
672 } else {
673 gt_data->int_mask_reg = swap32(*data);
674 gt_update_irq_status(gt_data);
675 }
676 break;
677
678 /* ===== PCI Bus 1 ===== */
679 case 0xcf0:
680 pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
681 break;
682
683 case 0xcf4:
684 pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
685 break;
686
687 /* ===== PCI Bus 0 ===== */
688 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
689 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
690 break;
691
692 case PCI_BUS_DATA: /* pci data address (0xcfc) */
693 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
694 break;
695
696 #if DEBUG_UNKNOWN
697 default:
698 if (op_type == MTS_READ) {
699 cpu_log(cpu,"GT64120","read from addr 0x%x, pc=0x%llx\n",
700 offset,cpu_get_pc(cpu));
701 } else {
702 cpu_log(cpu,"GT64120","write to addr 0x%x, value=0x%llx, "
703 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
704 }
705 #endif
706 }
707
708 return NULL;
709 }
710
711 /* Update the Ethernet port interrupt status */
712 static void gt_eth_update_int_status(struct gt_data *d,struct eth_port *port)
713 {
714 if (port->icr & port->imr & GT_ICR_MASK) {
715 port->icr |= GT_ICR_INT_SUM;
716 vm_set_irq(d->vm,d->eth_irq);
717 } else {
718 port->icr &= ~GT_ICR_INT_SUM;
719 vm_clear_irq(d->vm,d->eth_irq);
720 }
721 }
722
723 /* Read a MII register */
724 static m_uint32_t gt_mii_read(struct gt_data *d)
725 {
726 m_uint8_t port,reg;
727 m_uint32_t res = 0;
728
729 port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
730 reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
731
732 #if DEBUG_MII
733 GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: reading.\n",port,reg);
734 #endif
735
736 if ((port < GT_ETH_PORTS) && (reg < 32)) {
737 res = d->mii_regs[port][reg];
738
739 switch(reg) {
740 case 0x00:
741 res &= ~0x8200; /* clear reset bit and autoneg restart */
742 break;
743 case 0x01:
744 #if 0
745 if (d->ports[port].nio && bcm5600_mii_port_status(d,port))
746 d->mii_output = 0x782C;
747 else
748 d->mii_output = 0;
749 #endif
750 res = 0x782c;
751 break;
752 case 0x02:
753 res = 0x40;
754 break;
755 case 0x03:
756 res = 0x61d4;
757 break;
758 case 0x04:
759 res = 0x1E1;
760 break;
761 case 0x05:
762 res = 0x41E1;
763 break;
764 default:
765 res = 0;
766 }
767 }
768
769 /* Mark the data as ready */
770 res |= GT_SMIR_RVALID_FLAG;
771
772 return(res);
773 }
774
775 /* Write a MII register */
776 static void gt_mii_write(struct gt_data *d)
777 {
778 m_uint8_t port,reg;
779 m_uint16_t isolation;
780
781 port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
782 reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
783
784 if ((port < GT_ETH_PORTS) && (reg < 32))
785 {
786 #if DEBUG_MII
787 GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: writing 0x%4.4x\n",
788 port,reg,d->smi_reg & GT_SMIR_DATA_MASK);
789 #endif
790
791 /* Check if PHY isolation status is changing */
792 if (reg == 0) {
793 isolation = (d->smi_reg ^ d->mii_regs[port][reg]) & 0x400;
794
795 if (isolation) {
796 #if DEBUG_MII
797 GT_LOG(d,"MII: port 0x%4.4x: generating IRQ\n",port);
798 #endif
799 d->eth_ports[port].icr |= GT_ICR_MII_STC;
800 gt_eth_update_int_status(d,&d->eth_ports[port]);
801 }
802 }
803
804 d->mii_regs[port][reg] = d->smi_reg & GT_SMIR_DATA_MASK;
805 }
806 }
807
808 /* Handle registers of Ethernet ports */
809 static int gt_eth_access(cpu_gen_t *cpu,struct vdevice *dev,
810 m_uint32_t offset,u_int op_size,u_int op_type,
811 m_uint64_t *data)
812 {
813 struct gt_data *d = dev->priv_data;
814 struct eth_port *port;
815 u_int port_id = 0;
816 u_int queue;
817
818 if ((offset < 0x80000) || (offset >= 0x90000))
819 return(FALSE);
820
821 if (op_type == MTS_WRITE)
822 *data = swap32(*data);
823
824 /* Detemine the Ethernet port */
825 if ((offset >= 0x84800) && (offset < 0x88800))
826 port_id = 0;
827
828 if ((offset >= 0x88800) && (offset < 0x8c800))
829 port_id = 1;
830
831 port = &d->eth_ports[port_id];
832
833 switch(offset) {
834 /* SMI register */
835 case 0x80810:
836 if (op_type == MTS_WRITE) {
837 d->smi_reg = *data;
838
839 if (!(d->smi_reg & GT_SMIR_OPCODE_READ))
840 gt_mii_write(d);
841 } else {
842 *data = 0;
843
844 if (d->smi_reg & GT_SMIR_OPCODE_READ)
845 *data = gt_mii_read(d);
846 }
847 break;
848
849 /* ICR: Interrupt Cause Register */
850 case 0x84850:
851 case 0x88850:
852 if (op_type == MTS_READ) {
853 *data = port->icr;
854 } else {
855 port->icr &= *data;
856 gt_eth_update_int_status(d,port);
857 }
858 break;
859
860 /* IMR: Interrupt Mask Register */
861 case 0x84858:
862 case 0x88858:
863 if (op_type == MTS_READ) {
864 *data = port->imr;
865 } else {
866 port->imr = *data;
867 gt_eth_update_int_status(d,port);
868 }
869 break;
870
871 /* PCR: Port Configuration Register */
872 case 0x84800:
873 case 0x88800:
874 if (op_type == MTS_READ)
875 *data = port->pcr;
876 else
877 port->pcr = *data;
878 break;
879
880 /* PCXR: Port Configuration Extend Register */
881 case 0x84808:
882 case 0x88808:
883 if (op_type == MTS_READ) {
884 *data = port->pcxr;
885 *data |= GT_PCXR_SPEED;
886 } else
887 port->pcxr = *data;
888 break;
889
890 /* PCMR: Port Command Register */
891 case 0x84810:
892 case 0x88810:
893 if (op_type == MTS_READ)
894 *data = port->pcmr;
895 else
896 port->pcmr = *data;
897 break;
898
899 /* Port Status Register */
900 case 0x84818:
901 case 0x88818:
902 if (op_type == MTS_READ)
903 *data = 0x0F;
904 break;
905
906 /* First RX descriptor */
907 case 0x84880:
908 case 0x88880:
909 case 0x84884:
910 case 0x88884:
911 case 0x84888:
912 case 0x88888:
913 case 0x8488C:
914 case 0x8888C:
915 queue = (offset >> 2) & 0x03;
916 if (op_type == MTS_READ)
917 *data = port->rx_start[queue];
918 else
919 port->rx_start[queue] = *data;
920 break;
921
922 /* Current RX descriptor */
923 case 0x848A0:
924 case 0x888A0:
925 case 0x848A4:
926 case 0x888A4:
927 case 0x848A8:
928 case 0x888A8:
929 case 0x848AC:
930 case 0x888AC:
931 queue = (offset >> 2) & 0x03;
932 if (op_type == MTS_READ)
933 *data = port->rx_current[queue];
934 else
935 port->rx_current[queue] = *data;
936 break;
937
938 /* Current TX descriptor */
939 case 0x848E0:
940 case 0x888E0:
941 case 0x848E4:
942 case 0x888E4:
943 queue = (offset >> 2) & 0x01;
944 if (op_type == MTS_READ)
945 *data = port->tx_current[queue];
946 else
947 port->tx_current[queue] = *data;
948 break;
949
950 /* Hash Table Pointer */
951 case 0x84828:
952 case 0x88828:
953 if (op_type == MTS_READ)
954 *data = port->ht_addr;
955 else
956 port->ht_addr = *data;
957 break;
958
959 /* SDCR: SDMA Configuration Register */
960 case 0x84840:
961 case 0x88840:
962 if (op_type == MTS_READ)
963 *data = port->sdcr;
964 else
965 port->sdcr = *data;
966 break;
967
968 /* SDCMR: SDMA Command Register */
969 case 0x84848:
970 case 0x88848:
971 if (op_type == MTS_WRITE) {
972 /* Start RX DMA */
973 if (*data & GT_SDCMR_ERD) {
974 port->sdcmr |= GT_SDCMR_ERD;
975 port->sdcmr &= ~GT_SDCMR_AR;
976 }
977
978 /* Abort RX DMA */
979 if (*data & GT_SDCMR_AR)
980 port->sdcmr &= ~GT_SDCMR_ERD;
981
982 /* Start TX High */
983 if (*data & GT_SDCMR_TXDH) {
984 port->sdcmr |= GT_SDCMR_TXDH;
985 port->sdcmr &= ~GT_SDCMR_STDH;
986 }
987
988 /* Start TX Low */
989 if (*data & GT_SDCMR_TXDL) {
990 port->sdcmr |= GT_SDCMR_TXDL;
991 port->sdcmr &= ~GT_SDCMR_STDL;
992 }
993
994 /* Stop TX High */
995 if (*data & GT_SDCMR_STDH) {
996 port->sdcmr &= ~GT_SDCMR_TXDH;
997 port->sdcmr |= GT_SDCMR_STDH;
998 }
999
1000 /* Stop TX Low */
1001 if (*data & GT_SDCMR_STDL) {
1002 port->sdcmr &= ~GT_SDCMR_TXDL;
1003 port->sdcmr |= GT_SDCMR_STDL;
1004 }
1005 } else {
1006 *data = port->sdcmr;
1007 }
1008 break;
1009
1010 case 0x85800:
1011 case 0x89800:
1012 if (op_type == MTS_READ) {
1013 *data = port->rx_bytes;
1014 port->rx_bytes = 0;
1015 }
1016 break;
1017
1018 case 0x85804:
1019 case 0x89804:
1020 if (op_type == MTS_READ) {
1021 *data = port->tx_bytes;
1022 port->tx_bytes = 0;
1023 }
1024 break;
1025
1026 case 0x85808:
1027 case 0x89808:
1028 if (op_type == MTS_READ) {
1029 *data = port->rx_frames;
1030 port->rx_frames = 0;
1031 }
1032 break;
1033
1034 case 0x8580C:
1035 case 0x8980C:
1036 if (op_type == MTS_READ) {
1037 *data = port->tx_frames;
1038 port->tx_frames = 0;
1039 }
1040 break;
1041
1042 #if DEBUG_UNKNOWN
1043 default:
1044 if (op_type == MTS_READ) {
1045 cpu_log(cpu,"GT96100/ETH",
1046 "read access to unknown register 0x%x, pc=0x%llx\n",
1047 offset,cpu_get_pc(cpu));
1048 } else {
1049 cpu_log(cpu,"GT96100/ETH",
1050 "write access to unknown register 0x%x, value=0x%llx, "
1051 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
1052 }
1053 #endif
1054 }
1055
1056 if (op_type == MTS_READ)
1057 *data = swap32(*data);
1058
1059 return(TRUE);
1060 }
1061
1062 /*
1063 * dev_gt96100_access()
1064 */
1065 void *dev_gt96100_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
1066 u_int op_size,u_int op_type,m_uint64_t *data)
1067 {
1068 struct gt_data *gt_data = dev->priv_data;
1069
1070 if (op_type == MTS_READ)
1071 *data = 0;
1072
1073 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
1074 return NULL;
1075
1076 if (gt_eth_access(cpu,dev,offset,op_size,op_type,data) != 0)
1077 return NULL;
1078
1079 switch(offset) {
1080 /* Watchdog configuration register */
1081 case 0x101a80:
1082 break;
1083
1084 /* Watchdog value register */
1085 case 0x101a84:
1086 break;
1087
1088 case 0x008: /* ras10_low */
1089 if (op_type == MTS_READ)
1090 *data = swap32(0x000);
1091 break;
1092 case 0x010: /* ras10_high */
1093 if (op_type == MTS_READ)
1094 *data = swap32(0x7F);
1095 break;
1096 case 0x018: /* ras32_low */
1097 if (op_type == MTS_READ)
1098 *data = swap32(0x100);
1099 break;
1100 case 0x020: /* ras32_high */
1101 if (op_type == MTS_READ)
1102 *data = swap32(0x7F);
1103 break;
1104 case 0x400: /* ras0_low */
1105 if (op_type == MTS_READ)
1106 *data = swap32(0x00);
1107 break;
1108 case 0x404: /* ras0_high */
1109 if (op_type == MTS_READ)
1110 *data = swap32(0xFF);
1111 break;
1112 case 0x408: /* ras1_low */
1113 if (op_type == MTS_READ)
1114 *data = swap32(0x7F);
1115 break;
1116 case 0x40c: /* ras1_high */
1117 if (op_type == MTS_READ)
1118 *data = swap32(0x00);
1119 break;
1120 case 0x410: /* ras2_low */
1121 if (op_type == MTS_READ)
1122 *data = swap32(0x00);
1123 break;
1124 case 0x414: /* ras2_high */
1125 if (op_type == MTS_READ)
1126 *data = swap32(0xFF);
1127 break;
1128 case 0x418: /* ras3_low */
1129 if (op_type == MTS_READ)
1130 *data = swap32(0x7F);
1131 break;
1132 case 0x41c: /* ras3_high */
1133 if (op_type == MTS_READ)
1134 *data = swap32(0x00);
1135 break;
1136 case 0xc08: /* pci0_cs10 */
1137 if (op_type == MTS_READ)
1138 *data = swap32(0xFFF);
1139 break;
1140 case 0xc0c: /* pci0_cs32 */
1141 if (op_type == MTS_READ)
1142 *data = swap32(0xFFF);
1143 break;
1144
1145 case 0xc00: /* pci_cmd */
1146 if (op_type == MTS_READ)
1147 *data = swap32(0x00008001);
1148 break;
1149
1150 /* ===== Interrupt Main Cause Register ===== */
1151 case 0xc18:
1152 if (op_type == MTS_READ) {
1153 *data = gt_data->int_cause_reg;
1154
1155 /* TODO: signal Eth0/Eth1 */
1156 //*data |= (1 << 30) | (1 << 31) | 1;
1157
1158 *data = swap32(*data);
1159 } else {
1160 gt_data->int_cause_reg &= swap32(*data);
1161 gt_update_irq_status(gt_data);
1162 }
1163 break;
1164
1165 /* ===== Interrupt Mask Register ===== */
1166 case 0xc1c:
1167 if (op_type == MTS_READ) {
1168 *data = swap32(gt_data->int_mask_reg);
1169 } else {
1170 gt_data->int_mask_reg = swap32(*data);
1171 gt_update_irq_status(gt_data);
1172 }
1173 break;
1174
1175 /* ===== Interrupt High Cause Register ===== */
1176 case 0xc98:
1177 if (op_type == MTS_READ) {
1178 *data = 0;
1179
1180 /* interrupt on ethernet port 0 ? */
1181 if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM)
1182 *data |= GT_IHCR_ETH0_SUM;
1183
1184 /* interrupt on ethernet port 1 ? */
1185 if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM)
1186 *data |= GT_IHCR_ETH1_SUM;
1187
1188 *data = swap32(*data);
1189 }
1190 break;
1191
1192 /* Serial Cause Register */
1193 case 0x103a00:
1194 if (op_type == MTS_READ) {
1195 *data = 0;
1196
1197 /* interrupt on ethernet port 0 ? */
1198 if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM)
1199 *data |= GT_SCR_ETH0_SUM;
1200
1201 /* interrupt on ethernet port 1 ? */
1202 if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM)
1203 *data |= GT_SCR_ETH1_SUM;
1204
1205 gt_update_irq_status(gt_data);
1206 *data = swap32(*data);
1207 }
1208 break;
1209
1210 /* ===== PCI Bus 1 ===== */
1211 case 0xcf0:
1212 pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
1213 break;
1214
1215 case 0xcf4:
1216 pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
1217 break;
1218
1219 /* ===== PCI Bus 0 ===== */
1220 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
1221 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
1222 break;
1223
1224 case PCI_BUS_DATA: /* pci data address (0xcfc) */
1225 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
1226 break;
1227
1228 #if DEBUG_UNKNOWN
1229 default:
1230 if (op_type == MTS_READ) {
1231 cpu_log(cpu,"GT96100","read from addr 0x%x, pc=0x%llx\n",
1232 offset,cpu_get_pc(cpu));
1233 } else {
1234 cpu_log(cpu,"GT96100","write to addr 0x%x, value=0x%llx, "
1235 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
1236 }
1237 #endif
1238 }
1239
1240 return NULL;
1241 }
1242
1243 /* Read an Ethernet descriptor */
1244 static void gt_eth_desc_read(struct gt_data *d,m_uint32_t addr,
1245 struct eth_desc *desc)
1246 {
1247 physmem_copy_from_vm(d->vm,desc,addr,sizeof(struct eth_desc));
1248
1249 /* byte-swapping */
1250 desc->cmd_stat = vmtoh32(desc->cmd_stat);
1251 desc->buf_size = vmtoh32(desc->buf_size);
1252 desc->next_ptr = vmtoh32(desc->next_ptr);
1253 desc->buf_ptr = vmtoh32(desc->buf_ptr);
1254 }
1255
1256 /* Write an Ethernet descriptor */
1257 static void gt_eth_desc_write(struct gt_data *d,m_uint32_t addr,
1258 struct eth_desc *desc)
1259 {
1260 struct eth_desc tmp;
1261
1262 /* byte-swapping */
1263 tmp.cmd_stat = vmtoh32(desc->cmd_stat);
1264 tmp.buf_size = vmtoh32(desc->buf_size);
1265 tmp.next_ptr = vmtoh32(desc->next_ptr);
1266 tmp.buf_ptr = vmtoh32(desc->buf_ptr);
1267
1268 physmem_copy_to_vm(d->vm,&tmp,addr,sizeof(struct eth_desc));
1269 }
1270
1271 /* Handle a TX queue (single packet) */
1272 static int gt_eth_handle_txqueue(struct gt_data *d,struct eth_port *port,
1273 int queue)
1274 {
1275 u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr;
1276 struct eth_desc txd0,ctxd,*ptxd;
1277 m_uint32_t tx_start,tx_current;
1278 m_uint32_t len,tot_len;
1279 int abort = FALSE;
1280
1281 /* Check if this TX queue is active */
1282 if ((queue == 0) && (port->sdcmr & GT_SDCMR_STDL))
1283 return(FALSE);
1284
1285 if ((queue == 1) && (port->sdcmr & GT_SDCMR_STDH))
1286 return(FALSE);
1287
1288 /* Copy the current txring descriptor */
1289 tx_start = tx_current = port->tx_current[queue];
1290
1291 if (!tx_start)
1292 goto done;
1293
1294 ptxd = &txd0;
1295 gt_eth_desc_read(d,tx_start,ptxd);
1296
1297 /* If we don't own the first descriptor, we cannot transmit */
1298 if (!(txd0.cmd_stat & GT_TXDESC_OWN))
1299 goto done;
1300
1301 /* Empty packet for now */
1302 pkt_ptr = pkt;
1303 tot_len = 0;
1304
1305 for(;;) {
1306 #if DEBUG_ETH_TX
1307 GT_LOG(d,"gt_eth_handle_txqueue: loop: "
1308 "cmd_stat=0x%x, buf_size=0x%x, next_ptr=0x%x, buf_ptr=0x%x\n",
1309 ptxd->cmd_stat,ptxd->buf_size,ptxd->next_ptr,ptxd->buf_ptr);
1310 #endif
1311
1312 if (!(ptxd->cmd_stat & GT_TXDESC_OWN)) {
1313 GT_LOG(d,"gt_eth_handle_txqueue: descriptor not owned!\n");
1314 abort = TRUE;
1315 break;
1316 }
1317
1318 /* Copy packet data to the buffer */
1319 len = (ptxd->buf_size & GT_TXDESC_BC_MASK) >> GT_TXDESC_BC_SHIFT;
1320
1321 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->buf_ptr,len);
1322 pkt_ptr += len;
1323 tot_len += len;
1324
1325 /* Clear the OWN bit if this is not the first descriptor */
1326 if (!(ptxd->cmd_stat & GT_TXDESC_F)) {
1327 ptxd->cmd_stat &= ~GT_TXDESC_OWN;
1328 physmem_copy_u32_to_vm(d->vm,tx_current,ptxd->cmd_stat);
1329 }
1330
1331 tx_current = ptxd->next_ptr;
1332
1333 /* Last descriptor or no more desc available ? */
1334 if (ptxd->cmd_stat & GT_TXDESC_L)
1335 break;
1336
1337 if (!tx_current) {
1338 abort = TRUE;
1339 break;
1340 }
1341
1342 /* Fetch the next descriptor */
1343 gt_eth_desc_read(d,tx_current,&ctxd);
1344 ptxd = &ctxd;
1345 }
1346
1347 if ((tot_len != 0) && !abort) {
1348 #if DEBUG_ETH_TX
1349 GT_LOG(d,"Ethernet: sending packet of %u bytes\n",tot_len);
1350 mem_dump(log_file,pkt,tot_len);
1351 #endif
1352 /* send it on wire */
1353 netio_send(port->nio,pkt,tot_len);
1354
1355 /* Update MIB counters */
1356 port->tx_bytes += tot_len;
1357 port->tx_frames++;
1358 }
1359
1360 /* Clear the OWN flag of the first descriptor */
1361 txd0.cmd_stat &= ~GT_TXDESC_OWN;
1362 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.cmd_stat);
1363
1364 port->tx_current[queue] = tx_current;
1365
1366 /* Notify host about transmitted packet */
1367 if (queue == 0)
1368 port->icr |= GT_ICR_TXBUFL;
1369 else
1370 port->icr |= GT_ICR_TXBUFH;
1371
1372 done:
1373 if (abort) {
1374 /* TX underrun */
1375 port->icr |= GT_ICR_TXUDR;
1376
1377 if (queue == 0)
1378 port->icr |= GT_ICR_TXERRL;
1379 else
1380 port->icr |= GT_ICR_TXERRH;
1381 } else {
1382 /* End of queue has been reached */
1383 if (!tx_current) {
1384 if (queue == 0)
1385 port->icr |= GT_ICR_TXENDL;
1386 else
1387 port->icr |= GT_ICR_TXENDH;
1388 }
1389 }
1390
1391 /* Update the interrupt status */
1392 gt_eth_update_int_status(d,port);
1393 return(TRUE);
1394 }
1395
1396 /* Handle TX ring of the specified port */
1397 static void gt_eth_handle_port_txqueues(struct gt_data *d,u_int port)
1398 {
1399 gt_eth_handle_txqueue(d,&d->eth_ports[port],0); /* TX Low */
1400 gt_eth_handle_txqueue(d,&d->eth_ports[port],1); /* TX High */
1401 }
1402
1403 /* Handle all TX rings of all Ethernet ports */
1404 static int gt_eth_handle_txqueues(struct gt_data *d)
1405 {
1406 int i;
1407
1408 for(i=0;i<GT_ETH_PORTS;i++)
1409 gt_eth_handle_port_txqueues(d,i);
1410
1411 return(TRUE);
1412 }
1413
1414 /* Inverse a nibble */
1415 static const int inv_nibble[16] = {
1416 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1417 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF
1418 };
1419
1420 /* Inverse a 9-bit value */
1421 static inline u_int gt_hash_inv_9bit(u_int val)
1422 {
1423 u_int res;
1424
1425 res = inv_nibble[val & 0x0F] << 5;
1426 res |= inv_nibble[(val & 0xF0) >> 4] << 1;
1427 res |= (val & 0x100) >> 8;
1428 return(res);
1429 }
1430
1431 /*
1432 * Compute hash value for Ethernet address filtering.
1433 * Two modes are available (p.271 of the GT96100 doc).
1434 */
1435 static u_int gt_eth_hash_value(n_eth_addr_t *addr,int mode)
1436 {
1437 m_uint64_t tmp;
1438 u_int res;
1439 int i;
1440
1441 /* Swap the nibbles */
1442 for(i=0,tmp=0;i<N_ETH_ALEN;i++) {
1443 tmp <<= 8;
1444 tmp |= (inv_nibble[addr->eth_addr_byte[i] & 0x0F]) << 4;
1445 tmp |= inv_nibble[(addr->eth_addr_byte[i] & 0xF0) >> 4];
1446 }
1447
1448 if (mode == 0) {
1449 /* Fill bits 0:8 */
1450 res = (tmp & 0x00000003) | ((tmp & 0x00007f00) >> 6);
1451 res ^= (tmp & 0x00ff8000) >> 15;
1452 res ^= (tmp & 0x1ff000000ULL) >> 24;
1453
1454 /* Fill bits 9:14 */
1455 res |= (tmp & 0xfc) << 7;
1456 } else {
1457 /* Fill bits 0:8 */
1458 res = gt_hash_inv_9bit((tmp & 0x00007fc0) >> 6);
1459 res ^= gt_hash_inv_9bit((tmp & 0x00ff8000) >> 15);
1460 res ^= gt_hash_inv_9bit((tmp & 0x1ff000000ULL) >> 24);
1461
1462 /* Fill bits 9:14 */
1463 res |= (tmp & 0x3f) << 9;
1464 }
1465
1466 return(res);
1467 }
1468
1469 /*
1470 * Walk through the Ethernet hash table.
1471 */
1472 static int gt_eth_hash_lookup(struct gt_data *d,struct eth_port *port,
1473 n_eth_addr_t *addr,m_uint64_t *entry)
1474 {
1475 m_uint64_t eth_val;
1476 m_uint32_t hte_addr;
1477 u_int hash_val;
1478 int i;
1479
1480 eth_val = (m_uint64_t)addr->eth_addr_byte[0] << 3;
1481 eth_val |= (m_uint64_t)addr->eth_addr_byte[1] << 11;
1482 eth_val |= (m_uint64_t)addr->eth_addr_byte[2] << 19;
1483 eth_val |= (m_uint64_t)addr->eth_addr_byte[3] << 27;
1484 eth_val |= (m_uint64_t)addr->eth_addr_byte[4] << 35;
1485 eth_val |= (m_uint64_t)addr->eth_addr_byte[5] << 43;
1486
1487 /* Compute hash value for Ethernet address filtering */
1488 hash_val = gt_eth_hash_value(addr,port->pcr & GT_PCR_HM);
1489
1490 if (port->pcr & GT_PCR_HS) {
1491 /* 1/2K address filtering */
1492 hte_addr = port->ht_addr + ((hash_val & 0x7ff) << 3);
1493 } else {
1494 /* 8K address filtering */
1495 hte_addr = port->ht_addr + (hash_val << 3);
1496 }
1497
1498 #if DEBUG_ETH_HASH
1499 GT_LOG(d,"Hash Lookup for Ethernet address "
1500 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x: addr=0x%x\n",
1501 addr->eth_addr_byte[0], addr->eth_addr_byte[1],
1502 addr->eth_addr_byte[2], addr->eth_addr_byte[3],
1503 addr->eth_addr_byte[4], addr->eth_addr_byte[5],
1504 hte_addr);
1505 #endif
1506
1507 for(i=0;i<GT_HTE_HOPNUM;i++,hte_addr+=8) {
1508 *entry = ((m_uint64_t)physmem_copy_u32_from_vm(d->vm,hte_addr)) << 32;
1509 *entry |= physmem_copy_u32_from_vm(d->vm,hte_addr+4);
1510
1511 /* Empty entry ? */
1512 if (!(*entry & GT_HTE_VALID))
1513 return(GT_HTLOOKUP_MISS);
1514
1515 /* Skip flag or different Ethernet address: jump to next entry */
1516 if ((*entry & GT_HTE_SKIP) || ((*entry & GT_HTE_ADDR_MASK) != eth_val))
1517 continue;
1518
1519 /* We have the good MAC address in this entry */
1520 return(GT_HTLOOKUP_MATCH);
1521 }
1522
1523 return(GT_HTLOOKUP_HOP_EXCEEDED);
1524 }
1525
1526 /*
1527 * Check if a packet (given its destination address) must be handled
1528 * at RX path.
1529 *
1530 * Return values:
1531 * - 0: Discard packet ;
1532 * - 1: Receive packet ;
1533 * - 2: Receive packet and set "M" bit in RX descriptor.
1534 *
1535 * The documentation is not clear about the M bit in RX descriptor.
1536 * It is described as "Miss" or "Match" depending on the section.
1537 */
1538 static inline int gt_eth_handle_rx_daddr(struct gt_data *d,
1539 struct eth_port *port,
1540 u_int hash_res,
1541 m_uint64_t hash_entry)
1542 {
1543 /* Hop Number exceeded */
1544 if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
1545 return(1);
1546
1547 /* Match and hash entry marked as "Receive" */
1548 if ((hash_res == GT_HTLOOKUP_MATCH) && (hash_entry & GT_HTE_RD))
1549 return(2);
1550
1551 /* Miss but hash table default mode to forward ? */
1552 if ((hash_res == GT_HTLOOKUP_MISS) && (port->pcr & GT_PCR_HDM))
1553 return(2);
1554
1555 /* Promiscous Mode */
1556 if (port->pcr & GT_PCR_PM)
1557 return(1);
1558
1559 /* Drop packet for other cases */
1560 return(0);
1561 }
1562
1563 /* Put a packet in buffer of a descriptor */
1564 static void gt_eth_rxdesc_put_pkt(struct gt_data *d,struct eth_desc *rxd,
1565 u_char **pkt,ssize_t *pkt_len)
1566 {
1567 ssize_t len,cp_len;
1568
1569 len = (rxd->buf_size & GT_RXDESC_BS_MASK) >> GT_RXDESC_BS_SHIFT;
1570
1571 /* compute the data length to copy */
1572 cp_len = m_min(len,*pkt_len);
1573
1574 /* copy packet data to the VM physical RAM */
1575 physmem_copy_to_vm(d->vm,*pkt,rxd->buf_ptr,cp_len);
1576
1577 /* set the byte count in descriptor */
1578 rxd->buf_size |= cp_len;
1579
1580 *pkt += cp_len;
1581 *pkt_len -= cp_len;
1582 }
1583
1584 /* Put a packet in the specified RX queue */
1585 static int gt_eth_handle_rxqueue(struct gt_data *d,u_int port_id,u_int queue,
1586 u_char *pkt,ssize_t pkt_len)
1587 {
1588 struct eth_port *port = &d->eth_ports[port_id];
1589 m_uint32_t rx_start,rx_current;
1590 struct eth_desc rxd0,rxdn,*rxdc;
1591 ssize_t tot_len = pkt_len;
1592 u_char *pkt_ptr = pkt;
1593 n_eth_dot1q_hdr_t *hdr;
1594 m_uint64_t hash_entry;
1595 int i,hash_res,addr_action;
1596
1597 /* Truncate the packet if it is too big */
1598 pkt_len = m_min(pkt_len,GT_MAX_PKT_SIZE);
1599
1600 /* Copy the first RX descriptor */
1601 if (!(rx_start = rx_current = port->rx_start[queue]))
1602 goto dma_error;
1603
1604 /* Analyze the Ethernet header */
1605 hdr = (n_eth_dot1q_hdr_t *)pkt;
1606
1607 /* Hash table lookup for address filtering */
1608 hash_res = gt_eth_hash_lookup(d,port,&hdr->daddr,&hash_entry);
1609
1610 #if DEBUG_ETH_HASH
1611 GT_LOG(d,"Hash result: %d, hash_entry=0x%llx\n",hash_res,hash_entry);
1612 #endif
1613
1614 if (!(addr_action = gt_eth_handle_rx_daddr(d,port,hash_res,hash_entry)))
1615 return(FALSE);
1616
1617 /* Load the first RX descriptor */
1618 gt_eth_desc_read(d,rx_start,&rxd0);
1619
1620 #if DEBUG_ETH_RX
1621 GT_LOG(d,"port %u/queue %u: reading desc at 0x%8.8x "
1622 "[buf_size=0x%8.8x,cmd_stat=0x%8.8x,"
1623 "next_ptr=0x%8.8x,buf_ptr=0x%8.8x]\n",
1624 port_id,queue,rx_start,
1625 rxd0.buf_size,rxd0.cmd_stat,rxd0.next_ptr,rxd0.buf_ptr);
1626 #endif
1627
1628 for(i=0,rxdc=&rxd0;tot_len>0;i++)
1629 {
1630 /* We must own the descriptor */
1631 if (!(rxdc->cmd_stat & GT_RXDESC_OWN))
1632 goto dma_error;
1633
1634 /* Put data into the descriptor buffer */
1635 gt_eth_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
1636
1637 /* Clear the OWN bit */
1638 rxdc->cmd_stat &= ~GT_RXDESC_OWN;
1639
1640 /* We have finished if the complete packet has been stored */
1641 if (tot_len == 0) {
1642 rxdc->cmd_stat |= GT_RXDESC_L;
1643 rxdc->buf_size += 4; /* Add 4 bytes for CRC */
1644 }
1645
1646 /* Update the descriptor in host memory (but not the 1st) */
1647 if (i != 0)
1648 gt_eth_desc_write(d,rx_current,rxdc);
1649
1650 /* Get address of the next descriptor */
1651 rx_current = rxdc->next_ptr;
1652
1653 if (tot_len == 0)
1654 break;
1655
1656 if (!rx_current)
1657 goto dma_error;
1658
1659 /* Read the next descriptor from VM physical RAM */
1660 gt_eth_desc_read(d,rx_current,&rxdn);
1661 rxdc = &rxdn;
1662 }
1663
1664 /* Update the RX pointers */
1665 port->rx_start[queue] = port->rx_current[queue] = rx_current;
1666
1667 /* Update the first RX descriptor */
1668 rxd0.cmd_stat |= GT_RXDESC_F;
1669
1670 if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
1671 rxd0.cmd_stat |= GT_RXDESC_HE;
1672
1673 if (addr_action == 2)
1674 rxd0.cmd_stat |= GT_RXDESC_M;
1675
1676 if (ntohs(hdr->type) <= N_ETH_MTU) /* 802.3 frame */
1677 rxd0.cmd_stat |= GT_RXDESC_FT;
1678
1679 gt_eth_desc_write(d,rx_start,&rxd0);
1680
1681 /* Update MIB counters */
1682 port->rx_bytes += pkt_len;
1683 port->rx_frames++;
1684
1685 /* Indicate that we have a frame ready */
1686 port->icr |= (GT_ICR_RXBUFQ0 << queue) | GT_ICR_RXBUF;
1687 gt_eth_update_int_status(d,port);
1688 return(TRUE);
1689
1690 dma_error:
1691 port->icr |= (GT_ICR_RXERRQ0 << queue) | GT_ICR_RXERR;
1692 gt_eth_update_int_status(d,port);
1693 return(FALSE);
1694 }
1695
1696 /* Handle RX packet for an Ethernet port */
1697 static int gt_eth_handle_rx_pkt(netio_desc_t *nio,
1698 u_char *pkt,ssize_t pkt_len,
1699 struct gt_data *d,void *arg)
1700 {
1701 u_int queue,port_id = (int)arg;
1702 struct eth_port *port;
1703
1704 port = &d->eth_ports[port_id];
1705
1706 /* Check if RX DMA is active */
1707 if (!(port->sdcmr & GT_SDCMR_ERD))
1708 return(FALSE);
1709
1710 queue = 0; /* At this time, only put packet in queue 0 */
1711 gt_eth_handle_rxqueue(d,port_id,queue,pkt,pkt_len);
1712 return(TRUE);
1713 }
1714
1715 /* Shutdown a GT system controller */
1716 void dev_gt_shutdown(vm_instance_t *vm,struct gt_data *d)
1717 {
1718 if (d != NULL) {
1719 /* Stop the TX ring scanner */
1720 ptask_remove(d->eth_tx_tid);
1721
1722 /* Remove the device */
1723 dev_remove(vm,&d->dev);
1724
1725 /* Remove the PCI device */
1726 pci_dev_remove(d->pci_dev);
1727
1728 /* Free the structure itself */
1729 free(d);
1730 }
1731 }
1732
1733 /* Create a new GT64010 controller */
1734 int dev_gt64010_init(vm_instance_t *vm,char *name,
1735 m_uint64_t paddr,m_uint32_t len,u_int irq)
1736 {
1737 struct gt_data *d;
1738
1739 if (!(d = malloc(sizeof(*d)))) {
1740 fprintf(stderr,"gt64010: unable to create device data.\n");
1741 return(-1);
1742 }
1743
1744 memset(d,0,sizeof(*d));
1745 d->vm = vm;
1746 d->bus[0] = vm->pci_bus[0];
1747
1748 vm_object_init(&d->vm_obj);
1749 d->vm_obj.name = name;
1750 d->vm_obj.data = d;
1751 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1752
1753 dev_init(&d->dev);
1754 d->dev.name = name;
1755 d->dev.priv_data = d;
1756 d->dev.phys_addr = paddr;
1757 d->dev.phys_len = len;
1758 d->dev.handler = dev_gt64010_access;
1759
1760 /* Add the controller as a PCI device */
1761 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1762 d->pci_dev = pci_dev_add(d->bus[0],name,
1763 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64010,
1764 0,0,irq,d,NULL,NULL,NULL);
1765
1766 if (!d->pci_dev) {
1767 fprintf(stderr,"gt64010: unable to create PCI device.\n");
1768 return(-1);
1769 }
1770 }
1771
1772 /* Map this device to the VM */
1773 vm_bind_device(vm,&d->dev);
1774 vm_object_add(vm,&d->vm_obj);
1775 return(0);
1776 }
1777
1778 /*
1779 * pci_gt64120_read()
1780 *
1781 * Read a PCI register.
1782 */
1783 static m_uint32_t pci_gt64120_read(cpu_gen_t *cpu,struct pci_device *dev,
1784 int reg)
1785 {
1786 switch (reg) {
1787 case 0x08:
1788 return(0x03008005);
1789 default:
1790 return(0);
1791 }
1792 }
1793
1794 /* Create a new GT64120 controller */
1795 int dev_gt64120_init(vm_instance_t *vm,char *name,
1796 m_uint64_t paddr,m_uint32_t len,u_int irq)
1797 {
1798 struct gt_data *d;
1799
1800 if (!(d = malloc(sizeof(*d)))) {
1801 fprintf(stderr,"gt64120: unable to create device data.\n");
1802 return(-1);
1803 }
1804
1805 memset(d,0,sizeof(*d));
1806 d->vm = vm;
1807 d->bus[0] = vm->pci_bus[0];
1808 d->bus[1] = vm->pci_bus[1];
1809
1810 vm_object_init(&d->vm_obj);
1811 d->vm_obj.name = name;
1812 d->vm_obj.data = d;
1813 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1814
1815 dev_init(&d->dev);
1816 d->dev.name = name;
1817 d->dev.priv_data = d;
1818 d->dev.phys_addr = paddr;
1819 d->dev.phys_len = len;
1820 d->dev.handler = dev_gt64120_access;
1821
1822 /* Add the controller as a PCI device */
1823 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1824 d->pci_dev = pci_dev_add(d->bus[0],name,
1825 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64120,
1826 0,0,irq,d,NULL,pci_gt64120_read,NULL);
1827 if (!d->pci_dev) {
1828 fprintf(stderr,"gt64120: unable to create PCI device.\n");
1829 return(-1);
1830 }
1831 }
1832
1833 /* Map this device to the VM */
1834 vm_bind_device(vm,&d->dev);
1835 vm_object_add(vm,&d->vm_obj);
1836 return(0);
1837 }
1838
1839 /*
1840 * pci_gt96100_read()
1841 *
1842 * Read a PCI register.
1843 */
1844 static m_uint32_t pci_gt96100_read(cpu_gen_t *cpu,struct pci_device *dev,
1845 int reg)
1846 {
1847 switch (reg) {
1848 case 0x08:
1849 return(0x03008005);
1850 default:
1851 return(0);
1852 }
1853 }
1854
1855 /* Create a new GT96100 controller */
1856 int dev_gt96100_init(vm_instance_t *vm,char *name,
1857 m_uint64_t paddr,m_uint32_t len,
1858 u_int dma_irq,u_int eth_irq)
1859 {
1860 struct gt_data *d;
1861
1862 if (!(d = malloc(sizeof(*d)))) {
1863 fprintf(stderr,"gt96100: unable to create device data.\n");
1864 return(-1);
1865 }
1866
1867 memset(d,0,sizeof(*d));
1868 d->name = name;
1869 d->vm = vm;
1870 d->eth_irq = eth_irq;
1871 d->bus[0] = vm->pci_bus[0];
1872 d->bus[1] = vm->pci_bus[1];
1873
1874 vm_object_init(&d->vm_obj);
1875 d->vm_obj.name = name;
1876 d->vm_obj.data = d;
1877 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1878
1879 dev_init(&d->dev);
1880 d->dev.name = name;
1881 d->dev.priv_data = d;
1882 d->dev.phys_addr = paddr;
1883 d->dev.phys_len = len;
1884 d->dev.handler = dev_gt96100_access;
1885
1886 /* Add the controller as a PCI device */
1887 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1888 d->pci_dev = pci_dev_add(d->bus[0],name,
1889 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT96100,
1890 0,0,dma_irq,d,NULL,pci_gt96100_read,NULL);
1891 if (!d->pci_dev) {
1892 fprintf(stderr,"gt96100: unable to create PCI device.\n");
1893 return(-1);
1894 }
1895 }
1896
1897 /* Start the TX ring scanner */
1898 d->eth_tx_tid = ptask_add((ptask_callback)gt_eth_handle_txqueues,d,NULL);
1899
1900 /* Map this device to the VM */
1901 vm_bind_device(vm,&d->dev);
1902 vm_object_add(vm,&d->vm_obj);
1903 return(0);
1904 }
1905
1906 /* Bind a NIO to GT96100 device */
1907 int dev_gt96100_set_nio(struct gt_data *d,u_int port_id,netio_desc_t *nio)
1908 {
1909 struct eth_port *port;
1910
1911 if (port_id >= GT_ETH_PORTS)
1912 return(-1);
1913
1914 port = &d->eth_ports[port_id];
1915
1916 /* check that a NIO is not already bound */
1917 if (port->nio != NULL)
1918 return(-1);
1919
1920 port->nio = nio;
1921 netio_rxl_add(nio,(netio_rx_handler_t)gt_eth_handle_rx_pkt,
1922 d,(void *)port_id);
1923 return(0);
1924 }
1925
1926 /* Unbind a NIO from a GT96100 device */
1927 int dev_gt96100_unset_nio(struct gt_data *d,u_int port_id)
1928 {
1929 struct eth_port *port;
1930
1931 if (port_id >= GT_ETH_PORTS)
1932 return(-1);
1933
1934 port = &d->eth_ports[port_id];
1935
1936 if (port->nio != NULL) {
1937 netio_rxl_remove(port->nio);
1938 port->nio = NULL;
1939 }
1940
1941 return(0);
1942 }

  ViewVC Help
Powered by ViewVC 1.1.26