/[dynamips]/upstream/dynamips-0.2.6-RC3/dev_gt.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.6-RC3/dev_gt.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4 - (show annotations)
Sat Oct 6 16:06:49 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 57174 byte(s)
dynamips-0.2.6-RC3

1 /*
2 * Cisco 7200 (Predator) simulation platform.
3 * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4 *
5 * Galileo GT64010/GT64120A/GT96100A system controller.
6 *
7 * The DMA stuff is not complete, only "normal" transfers are working
8 * (source and destination addresses incrementing).
9 *
10 * Also, these transfers are "instantaneous" from a CPU point-of-view: when
11 * a channel is enabled, the transfer is immediately done. So, this is not
12 * very realistic.
13 */
14
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18
19 #include "utils.h"
20 #include "net.h"
21 #include "mips64.h"
22 #include "dynamips.h"
23 #include "memory.h"
24 #include "device.h"
25 #include "net_io.h"
26 #include "ptask.h"
27 #include "dev_gt.h"
28
29 /* Debugging flags */
30 #define DEBUG_UNKNOWN 0
31 #define DEBUG_DMA 0
32 #define DEBUG_MII 0
33 #define DEBUG_ETH_TX 0
34 #define DEBUG_ETH_RX 0
35 #define DEBUG_ETH_HASH 0
36
37 /* PCI identification */
38 #define PCI_VENDOR_GALILEO 0x11ab /* Galileo Technology */
39 #define PCI_PRODUCT_GALILEO_GT64010 0x0146 /* GT-64010 */
40 #define PCI_PRODUCT_GALILEO_GT64011 0x4146 /* GT-64011 */
41 #define PCI_PRODUCT_GALILEO_GT64120 0x4620 /* GT-64120 */
42 #define PCI_PRODUCT_GALILEO_GT96100 0x9653 /* GT-96100 */
43
44 /* === Global definitions === */
45
46 /* Interrupt High Cause Register */
47 #define GT_IHCR_ETH0_SUM 0x00000001
48 #define GT_IHCR_ETH1_SUM 0x00000002
49 #define GT_IHCR_SDMA_SUM 0x00000010
50
51 /* Serial Cause Register */
52 #define GT_SCR_ETH0_SUM 0x00000001
53 #define GT_SCR_ETH1_SUM 0x00000002
54 #define GT_SCR_SDMA_SUM 0x00000010
55
56 /* === DMA definitions === */
57 #define GT_DMA_CHANNELS 4
58
59 #define GT_DMA_FLYBY_ENABLE 0x00000001 /* FlyBy Enable */
60 #define GT_DMA_FLYBY_RDWR 0x00000002 /* SDRAM Read/Write (FlyBy) */
61 #define GT_DMA_SRC_DIR 0x0000000c /* Source Direction */
62 #define GT_DMA_DST_DIR 0x00000030 /* Destination Direction */
63 #define GT_DMA_DATA_LIMIT 0x000001c0 /* Data Transfer Limit */
64 #define GT_DMA_CHAIN_MODE 0x00000200 /* Chained Mode */
65 #define GT_DMA_INT_MODE 0x00000400 /* Interrupt Mode */
66 #define GT_DMA_TRANS_MODE 0x00000800 /* Transfer Mode */
67 #define GT_DMA_CHAN_ENABLE 0x00001000 /* Channel Enable */
68 #define GT_DMA_FETCH_NEXT 0x00002000 /* Fetch Next Record */
69 #define GT_DMA_ACT_STATUS 0x00004000 /* DMA Activity Status */
70 #define GT_DMA_SDA 0x00008000 /* Source/Destination Alignment */
71 #define GT_DMA_MDREQ 0x00010000 /* Mask DMA Requests */
72 #define GT_DMA_CDE 0x00020000 /* Close Descriptor Enable */
73 #define GT_DMA_EOTE 0x00040000 /* End-of-Transfer (EOT) Enable */
74 #define GT_DMA_EOTIE 0x00080000 /* EOT Interrupt Enable */
75 #define GT_DMA_ABORT 0x00100000 /* Abort DMA Transfer */
76 #define GT_DMA_SLP 0x00600000 /* Override Source Address */
77 #define GT_DMA_DLP 0x01800000 /* Override Dest Address */
78 #define GT_DMA_RLP 0x06000000 /* Override Record Address */
79 #define GT_DMA_REQ_SRC 0x10000000 /* DMA Request Source */
80
81 /* Galileo DMA channel */
82 struct dma_channel {
83 m_uint32_t byte_count;
84 m_uint32_t src_addr;
85 m_uint32_t dst_addr;
86 m_uint32_t cdptr;
87 m_uint32_t nrptr;
88 m_uint32_t ctrl;
89 };
90
91 /* === Ethernet definitions === */
92 #define GT_ETH_PORTS 2
93 #define GT_MAX_PKT_SIZE 2048
94
95 /* SMI register */
96 #define GT_SMIR_DATA_MASK 0x0000FFFF
97 #define GT_SMIR_PHYAD_MASK 0x001F0000 /* PHY Device Address */
98 #define GT_SMIR_PHYAD_SHIFT 16
99 #define GT_SMIR_REGAD_MASK 0x03e00000 /* PHY Device Register Address */
100 #define GT_SMIR_REGAD_SHIFT 21
101 #define GT_SMIR_OPCODE_MASK 0x04000000 /* Opcode (0: write, 1: read) */
102 #define GT_SMIR_OPCODE_READ 0x04000000
103 #define GT_SMIR_RVALID_FLAG 0x08000000 /* Read Valid */
104 #define GT_SMIR_BUSY_FLAG 0x10000000 /* Busy: 1=op in progress */
105
106 /* PCR: Port Configuration Register */
107 #define GT_PCR_PM 0x00000001 /* Promiscuous mode */
108 #define GT_PCR_RBM 0x00000002 /* Reject broadcast mode */
109 #define GT_PCR_PBF 0x00000004 /* Pass bad frames */
110 #define GT_PCR_EN 0x00000080 /* Port Enabled/Disabled */
111 #define GT_PCR_LPBK 0x00000300 /* Loopback mode */
112 #define GT_PCR_FC 0x00000400 /* Force collision */
113 #define GT_PCR_HS 0x00001000 /* Hash size */
114 #define GT_PCR_HM 0x00002000 /* Hash mode */
115 #define GT_PCR_HDM 0x00004000 /* Hash default mode */
116 #define GT_PCR_HD 0x00008000 /* Duplex Mode */
117 #define GT_PCR_ISL 0x70000000 /* ISL enabled (0x06) */
118 #define GT_PCR_ACCS 0x80000000 /* Accelerate Slot Time */
119
120 /* PCXR: Port Configuration Extend Register */
121 #define GT_PCXR_IGMP 0x00000001 /* IGMP packet capture */
122 #define GT_PCXR_SPAN 0x00000002 /* BPDU packet capture */
123 #define GT_PCXR_PAR 0x00000004 /* Partition Enable */
124 #define GT_PCXR_PRIOTX 0x00000038 /* Priority weight for TX */
125 #define GT_PCXR_PRIORX 0x000000C0 /* Priority weight for RX */
126 #define GT_PCXR_PRIORX_OV 0x00000100 /* Prio RX override */
127 #define GT_PCXR_DPLX_EN 0x00000200 /* Autoneg for Duplex */
128 #define GT_PCXR_FCTL_EN 0x00000400 /* Autoneg for 802.3x */
129 #define GT_PCXR_FLP 0x00000800 /* Force Link Pass */
130 #define GT_PCXR_FCTL 0x00001000 /* Flow Control Mode */
131 #define GT_PCXR_MFL 0x0000C000 /* Maximum Frame Length */
132 #define GT_PCXR_MIB_CLR_MODE 0x00010000 /* MIB counters clear mode */
133 #define GT_PCXR_SPEED 0x00040000 /* Port Speed */
134 #define GT_PCXR_SPEED_EN 0x00080000 /* Autoneg for Speed */
135 #define GT_PCXR_RMII_EN 0x00100000 /* RMII Enable */
136 #define GT_PCXR_DSCP_EN 0x00200000 /* DSCP decoding enable */
137
138 /* PCMR: Port Command Register */
139 #define GT_PCMR_FJ 0x00008000 /* Force Jam / Flow Control */
140
141 /* PSR: Port Status Register */
142 #define GT_PSR_SPEED 0x00000001 /* Speed: 10/100 Mb/s (100=>1)*/
143 #define GT_PSR_DUPLEX 0x00000002 /* Duplex (1: full) */
144 #define GT_PSR_FCTL 0x00000004 /* Flow Control Mode */
145 #define GT_PSR_LINK 0x00000008 /* Link Up/Down */
146 #define GT_PSR_PAUSE 0x00000010 /* Flow-control disabled state */
147 #define GT_PSR_TXLOW 0x00000020 /* TX Low priority status */
148 #define GT_PSR_TXHIGH 0x00000040 /* TX High priority status */
149 #define GT_PSR_TXINP 0x00000080 /* TX in Progress */
150
151 /* SDCR: SDMA Configuration Register */
152 #define GT_SDCR_RC 0x0000003c /* Retransmit count */
153 #define GT_SDCR_BLMR 0x00000040 /* Big/Little Endian RX mode */
154 #define GT_SDCR_BLMT 0x00000080 /* Big/Litlle Endian TX mode */
155 #define GT_SDCR_POVR 0x00000100 /* PCI override */
156 #define GT_SDCR_RIFB 0x00000200 /* RX IRQ on frame boundary */
157 #define GT_SDCR_BSZ 0x00003000 /* Burst size */
158
159 /* SDCMR: SDMA Command Register */
160 #define GT_SDCMR_ERD 0x00000080 /* Enable RX DMA */
161 #define GT_SDCMR_AR 0x00008000 /* Abort Receive */
162 #define GT_SDCMR_STDH 0x00010000 /* Stop TX High */
163 #define GT_SDCMR_STDL 0x00020000 /* Stop TX Low */
164 #define GT_SDCMR_TXDH 0x00800000 /* Start TX High */
165 #define GT_SDCMR_TXDL 0x01000000 /* Start TX Low */
166 #define GT_SDCMR_AT 0x80000000 /* Abort Transmit */
167
168 /* ICR: Interrupt Cause Register */
169 #define GT_ICR_RXBUF 0x00000001 /* RX Buffer returned to host */
170 #define GT_ICR_TXBUFH 0x00000004 /* TX Buffer High */
171 #define GT_ICR_TXBUFL 0x00000008 /* TX Buffer Low */
172 #define GT_ICR_TXENDH 0x00000040 /* TX End High */
173 #define GT_ICR_TXENDL 0x00000080 /* TX End Low */
174 #define GT_ICR_RXERR 0x00000100 /* RX Error */
175 #define GT_ICR_TXERRH 0x00000400 /* TX Error High */
176 #define GT_ICR_TXERRL 0x00000800 /* TX Error Low */
177 #define GT_ICR_RXOVR 0x00001000 /* RX Overrun */
178 #define GT_ICR_TXUDR 0x00002000 /* TX Underrun */
179 #define GT_ICR_RXBUFQ0 0x00010000 /* RX Buffer in Prio Queue 0 */
180 #define GT_ICR_RXBUFQ1 0x00020000 /* RX Buffer in Prio Queue 1 */
181 #define GT_ICR_RXBUFQ2 0x00040000 /* RX Buffer in Prio Queue 2 */
182 #define GT_ICR_RXBUFQ3 0x00080000 /* RX Buffer in Prio Queue 3 */
183 #define GT_ICR_RXERRQ0 0x00010000 /* RX Error in Prio Queue 0 */
184 #define GT_ICR_RXERRQ1 0x00020000 /* RX Error in Prio Queue 1 */
185 #define GT_ICR_RXERRQ2 0x00040000 /* RX Error in Prio Queue 2 */
186 #define GT_ICR_RXERRQ3 0x00080000 /* RX Error in Prio Queue 3 */
187 #define GT_ICR_MII_STC 0x10000000 /* MII PHY Status Change */
188 #define GT_ICR_SMI_DONE 0x20000000 /* SMI Command Done */
189 #define GT_ICR_INT_SUM 0x80000000 /* Ethernet Interrupt Summary */
190 #define GT_ICR_MASK 0x7FFFFFFF
191
192 /* Ethernet hash entry */
193 #define GT_HTE_VALID 0x00000001 /* Valid entry */
194 #define GT_HTE_SKIP 0x00000002 /* Skip entry in a chain */
195 #define GT_HTE_RD 0x00000004 /* 0: Discard, 1: Receive */
196 #define GT_HTE_ADDR_MASK 0x7fffffffffff8ULL
197
198 #define GT_HTE_HOPNUM 12 /* Hash Table Hop Number */
199
200 enum {
201 GT_HTLOOKUP_MISS,
202 GT_HTLOOKUP_MATCH,
203 GT_HTLOOKUP_HOP_EXCEEDED,
204 };
205
206 /* TX Descriptor */
207 #define GT_TXDESC_OWN 0x80000000 /* Ownership */
208 #define GT_TXDESC_AM 0x40000000 /* Auto-mode */
209 #define GT_TXDESC_EI 0x00800000 /* Enable Interrupt */
210 #define GT_TXDESC_GC 0x00400000 /* Generate CRC */
211 #define GT_TXDESC_P 0x00040000 /* Padding */
212 #define GT_TXDESC_F 0x00020000 /* First buffer of packet */
213 #define GT_TXDESC_L 0x00010000 /* Last buffer of packet */
214 #define GT_TXDESC_ES 0x00008000 /* Error Summary */
215 #define GT_TXDESC_RC 0x00003c00 /* Retransmit Count */
216 #define GT_TXDESC_COL 0x00000200 /* Collision */
217 #define GT_TXDESC_RL 0x00000100 /* Retransmit Limit Error */
218 #define GT_TXDESC_UR 0x00000040 /* Underrun Error */
219 #define GT_TXDESC_LC 0x00000020 /* Late Collision Error */
220
221 #define GT_TXDESC_BC_MASK 0xFFFF0000 /* Number of bytes to transmit */
222 #define GT_TXDESC_BC_SHIFT 16
223
224 /* RX Descriptor */
225 #define GT_RXDESC_OWN 0x80000000 /* Ownership */
226 #define GT_RXDESC_AM 0x40000000 /* Auto-mode */
227 #define GT_RXDESC_EI 0x00800000 /* Enable Interrupt */
228 #define GT_RXDESC_F 0x00020000 /* First buffer of packet */
229 #define GT_RXDESC_L 0x00010000 /* Last buffer of packet */
230 #define GT_RXDESC_ES 0x00008000 /* Error Summary */
231 #define GT_RXDESC_IGMP 0x00004000 /* IGMP packet detected */
232 #define GT_RXDESC_HE 0x00002000 /* Hash Table Expired */
233 #define GT_RXDESC_M 0x00001000 /* Missed Frame */
234 #define GT_RXDESC_FT 0x00000800 /* Frame Type (802.3/Ethernet) */
235 #define GT_RXDESC_SF 0x00000100 /* Short Frame Error */
236 #define GT_RXDESC_MFL 0x00000080 /* Maximum Frame Length Error */
237 #define GT_RXDESC_OR 0x00000040 /* Overrun Error */
238 #define GT_RXDESC_COL 0x00000010 /* Collision */
239 #define GT_RXDESC_CE 0x00000001 /* CRC Error */
240
241 #define GT_RXDESC_BC_MASK 0x0000FFFF /* Byte count */
242 #define GT_RXDESC_BS_MASK 0xFFFF0000 /* Buffer size */
243 #define GT_RXDESC_BS_SHIFT 16
244
245 /* RX/TX descriptor */
246 struct eth_desc {
247 m_uint32_t buf_size;
248 m_uint32_t cmd_stat;
249 m_uint32_t next_ptr;
250 m_uint32_t buf_ptr;
251 };
252
253 /* Galileo Ethernet port */
254 struct eth_port {
255 netio_desc_t *nio;
256
257 /* First and Current RX descriptors (4 queues) */
258 m_uint32_t rx_start[4],rx_current[4];
259
260 /* Current TX descriptors (2 queues) */
261 m_uint32_t tx_current[2];
262
263 /* Port registers */
264 m_uint32_t pcr,pcxr,pcmr,psr;
265
266 /* SDMA registers */
267 m_uint32_t sdcr,sdcmr;
268
269 /* Interrupt register */
270 m_uint32_t icr,imr;
271
272 /* Hash Table pointer */
273 m_uint32_t ht_addr;
274
275 /* Ethernet MIB counters */
276 m_uint32_t rx_bytes,tx_bytes,rx_frames,tx_frames;
277 };
278
279 /* Galileo GT64xxx/GT96xxx system controller */
280 struct gt_data {
281 char *name;
282 vm_obj_t vm_obj;
283 struct vdevice dev;
284 struct pci_device *pci_dev;
285 vm_instance_t *vm;
286
287 struct pci_bus *bus[2];
288 struct dma_channel dma[GT_DMA_CHANNELS];
289 m_uint32_t int_cause_reg;
290 m_uint32_t int_mask_reg;
291
292 /* Ethernet ports (GT-96100) */
293 u_int eth_irq;
294 ptask_id_t eth_tx_tid;
295 struct eth_port eth_ports[GT_ETH_PORTS];
296 m_uint32_t smi_reg;
297 m_uint16_t mii_regs[32][32];
298 };
299
300 /* Log a GT message */
301 #define GT_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
302
303 /* Update the interrupt status */
304 static void gt_update_irq_status(struct gt_data *gt_data)
305 {
306 if (gt_data->pci_dev) {
307 if (gt_data->int_cause_reg & gt_data->int_mask_reg)
308 pci_dev_trigger_irq(gt_data->vm,gt_data->pci_dev);
309 else
310 pci_dev_clear_irq(gt_data->vm,gt_data->pci_dev);
311 }
312 }
313
314 /* Fetch a DMA record (chained mode) */
315 static void gt_dma_fetch_rec(vm_instance_t *vm,struct dma_channel *channel)
316 {
317 m_uint32_t ptr;
318
319 #if DEBUG_DMA
320 vm_log(vm,"GT_DMA","fetching record at address 0x%x\n",channel->nrptr);
321 #endif
322
323 /* fetch the record from RAM */
324 ptr = channel->nrptr;
325 channel->byte_count = swap32(physmem_copy_u32_from_vm(vm,ptr));
326 channel->src_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x04));
327 channel->dst_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x08));
328 channel->nrptr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x0c));
329
330 /* clear the "fetch next record bit" */
331 channel->ctrl &= ~GT_DMA_FETCH_NEXT;
332 }
333
334 /* Handle control register of a DMA channel */
335 static void gt_dma_handle_ctrl(struct gt_data *gt_data,int chan_id)
336 {
337 struct dma_channel *channel = &gt_data->dma[chan_id];
338 vm_instance_t *vm = gt_data->vm;
339 int done;
340
341 if (channel->ctrl & GT_DMA_FETCH_NEXT) {
342 if (channel->nrptr == 0) {
343 vm_log(vm,"GT_DMA","trying to load a NULL DMA record...\n");
344 return;
345 }
346
347 gt_dma_fetch_rec(vm,channel);
348 }
349
350 if (channel->ctrl & GT_DMA_CHAN_ENABLE)
351 {
352 do {
353 done = TRUE;
354
355 #if DEBUG_DMA
356 vm_log(vm,"GT_DMA",
357 "starting transfer from 0x%x to 0x%x (size=%u bytes)\n",
358 channel->src_addr,channel->dst_addr,
359 channel->byte_count & 0xFFFF);
360 #endif
361 physmem_dma_transfer(vm,channel->src_addr,channel->dst_addr,
362 channel->byte_count & 0xFFFF);
363
364 /* chained mode */
365 if (!(channel->ctrl & GT_DMA_CHAIN_MODE)) {
366 if (channel->nrptr) {
367 gt_dma_fetch_rec(vm,channel);
368 done = FALSE;
369 }
370 }
371 }while(!done);
372
373 #if DEBUG_DMA
374 vm_log(vm,"GT_DMA","finished transfer.\n");
375 #endif
376 /* Trigger DMA interrupt */
377 gt_data->int_cause_reg |= 1 << (4 + chan_id);
378 gt_update_irq_status(gt_data);
379 }
380 }
381
382 #define DMA_REG(ch,reg_name) \
383 if (op_type == MTS_WRITE) \
384 gt_data->dma[ch].reg_name = swap32(*data); \
385 else \
386 *data = swap32(gt_data->dma[ch].reg_name);
387
388 /* Handle a DMA channel */
389 static int gt_dma_access(cpu_mips_t *cpu,struct vdevice *dev,
390 m_uint32_t offset,u_int op_size,u_int op_type,
391 m_uint64_t *data)
392 {
393 struct gt_data *gt_data = dev->priv_data;
394
395 switch(offset) {
396 /* DMA Source Address */
397 case 0x810: DMA_REG(0,src_addr); return(1);
398 case 0x814: DMA_REG(1,src_addr); return(1);
399 case 0x818: DMA_REG(2,src_addr); return(1);
400 case 0x81c: DMA_REG(3,src_addr); return(1);
401
402 /* DMA Destination Address */
403 case 0x820: DMA_REG(0,dst_addr); return(1);
404 case 0x824: DMA_REG(1,dst_addr); return(1);
405 case 0x828: DMA_REG(2,dst_addr); return(1);
406 case 0x82c: DMA_REG(3,dst_addr); return(1);
407
408 /* DMA Next Record Pointer */
409 case 0x830:
410 gt_data->dma[0].cdptr = *data;
411 DMA_REG(0,nrptr);
412 return(1);
413
414 case 0x834:
415 gt_data->dma[1].cdptr = *data;
416 DMA_REG(1,nrptr);
417 return(1);
418
419 case 0x838:
420 gt_data->dma[2].cdptr = *data;
421 DMA_REG(2,nrptr);
422 return(1);
423
424 case 0x83c:
425 gt_data->dma[3].cdptr = *data;
426 DMA_REG(3,nrptr);
427 return(1);
428
429 /* DMA Channel Control */
430 case 0x840:
431 DMA_REG(0,ctrl);
432 if (op_type == MTS_WRITE)
433 gt_dma_handle_ctrl(gt_data,0);
434 return(1);
435
436 case 0x844:
437 DMA_REG(1,ctrl);
438 if (op_type == MTS_WRITE)
439 gt_dma_handle_ctrl(gt_data,1);
440 return(1);
441
442 case 0x848:
443 DMA_REG(2,ctrl);
444 if (op_type == MTS_WRITE)
445 gt_dma_handle_ctrl(gt_data,2);
446 return(1);
447
448 case 0x84c:
449 DMA_REG(3,ctrl);
450 if (op_type == MTS_WRITE)
451 gt_dma_handle_ctrl(gt_data,3);
452 return(1);
453 }
454
455 return(0);
456 }
457
458 /*
459 * dev_gt64010_access()
460 */
461 void *dev_gt64010_access(cpu_mips_t *cpu,struct vdevice *dev,m_uint32_t offset,
462 u_int op_size,u_int op_type,m_uint64_t *data)
463 {
464 struct gt_data *gt_data = dev->priv_data;
465
466 if (op_type == MTS_READ)
467 *data = 0;
468
469 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
470 return NULL;
471
472 switch(offset) {
473 /* ===== DRAM Settings (completely faked, 128 Mb) ===== */
474 case 0x008: /* ras10_low */
475 if (op_type == MTS_READ)
476 *data = swap32(0x000);
477 break;
478 case 0x010: /* ras10_high */
479 if (op_type == MTS_READ)
480 *data = swap32(0x7F);
481 break;
482 case 0x018: /* ras32_low */
483 if (op_type == MTS_READ)
484 *data = swap32(0x080);
485 break;
486 case 0x020: /* ras32_high */
487 if (op_type == MTS_READ)
488 *data = swap32(0x7F);
489 break;
490 case 0x400: /* ras0_low */
491 if (op_type == MTS_READ)
492 *data = swap32(0x00);
493 break;
494 case 0x404: /* ras0_high */
495 if (op_type == MTS_READ)
496 *data = swap32(0xFF);
497 break;
498 case 0x408: /* ras1_low */
499 if (op_type == MTS_READ)
500 *data = swap32(0x7F);
501 break;
502 case 0x40c: /* ras1_high */
503 if (op_type == MTS_READ)
504 *data = swap32(0x00);
505 break;
506 case 0x410: /* ras2_low */
507 if (op_type == MTS_READ)
508 *data = swap32(0x00);
509 break;
510 case 0x414: /* ras2_high */
511 if (op_type == MTS_READ)
512 *data = swap32(0xFF);
513 break;
514 case 0x418: /* ras3_low */
515 if (op_type == MTS_READ)
516 *data = swap32(0x7F);
517 break;
518 case 0x41c: /* ras3_high */
519 if (op_type == MTS_READ)
520 *data = swap32(0x00);
521 break;
522 case 0xc08: /* pci0_cs10 */
523 if (op_type == MTS_READ)
524 *data = swap32(0xFFF);
525 break;
526 case 0xc0c: /* pci0_cs32 */
527 if (op_type == MTS_READ)
528 *data = swap32(0xFFF);
529 break;
530
531 case 0xc00: /* pci_cmd */
532 if (op_type == MTS_READ)
533 *data = swap32(0x00008001);
534 break;
535
536 /* ===== Interrupt Cause Register ===== */
537 case 0xc18:
538 if (op_type == MTS_READ) {
539 *data = swap32(gt_data->int_cause_reg);
540 } else {
541 gt_data->int_cause_reg &= swap32(*data);
542 gt_update_irq_status(gt_data);
543 }
544 break;
545
546 /* ===== Interrupt Mask Register ===== */
547 case 0xc1c:
548 if (op_type == MTS_READ)
549 *data = swap32(gt_data->int_mask_reg);
550 else {
551 gt_data->int_mask_reg = swap32(*data);
552 gt_update_irq_status(gt_data);
553 }
554 break;
555
556 /* ===== PCI Configuration ===== */
557 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
558 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
559 break;
560
561 case PCI_BUS_DATA: /* pci data address (0xcfc) */
562 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
563 break;
564
565 #if DEBUG_UNKNOWN
566 default:
567 if (op_type == MTS_READ) {
568 cpu_log(cpu,"GT64010","read from addr 0x%x, pc=0x%llx\n",
569 offset,cpu->pc);
570 } else {
571 cpu_log(cpu,"GT64010","write to addr 0x%x, value=0x%llx, "
572 "pc=0x%llx\n",offset,*data,cpu->pc);
573 }
574 #endif
575 }
576
577 return NULL;
578 }
579
580 /*
581 * dev_gt64120_access()
582 */
583 void *dev_gt64120_access(cpu_mips_t *cpu,struct vdevice *dev,m_uint32_t offset,
584 u_int op_size,u_int op_type,m_uint64_t *data)
585 {
586 struct gt_data *gt_data = dev->priv_data;
587
588 if (op_type == MTS_READ)
589 *data = 0;
590
591 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
592 return NULL;
593
594 switch(offset) {
595 case 0x008: /* ras10_low */
596 if (op_type == MTS_READ)
597 *data = swap32(0x000);
598 break;
599 case 0x010: /* ras10_high */
600 if (op_type == MTS_READ)
601 *data = swap32(0x7F);
602 break;
603 case 0x018: /* ras32_low */
604 if (op_type == MTS_READ)
605 *data = swap32(0x100);
606 break;
607 case 0x020: /* ras32_high */
608 if (op_type == MTS_READ)
609 *data = swap32(0x7F);
610 break;
611 case 0x400: /* ras0_low */
612 if (op_type == MTS_READ)
613 *data = swap32(0x00);
614 break;
615 case 0x404: /* ras0_high */
616 if (op_type == MTS_READ)
617 *data = swap32(0xFF);
618 break;
619 case 0x408: /* ras1_low */
620 if (op_type == MTS_READ)
621 *data = swap32(0x7F);
622 break;
623 case 0x40c: /* ras1_high */
624 if (op_type == MTS_READ)
625 *data = swap32(0x00);
626 break;
627 case 0x410: /* ras2_low */
628 if (op_type == MTS_READ)
629 *data = swap32(0x00);
630 break;
631 case 0x414: /* ras2_high */
632 if (op_type == MTS_READ)
633 *data = swap32(0xFF);
634 break;
635 case 0x418: /* ras3_low */
636 if (op_type == MTS_READ)
637 *data = swap32(0x7F);
638 break;
639 case 0x41c: /* ras3_high */
640 if (op_type == MTS_READ)
641 *data = swap32(0x00);
642 break;
643 case 0xc08: /* pci0_cs10 */
644 if (op_type == MTS_READ)
645 *data = swap32(0xFFF);
646 break;
647 case 0xc0c: /* pci0_cs32 */
648 if (op_type == MTS_READ)
649 *data = swap32(0xFFF);
650 break;
651
652 case 0xc00: /* pci_cmd */
653 if (op_type == MTS_READ)
654 *data = swap32(0x00008001);
655 break;
656
657 /* ===== Interrupt Cause Register ===== */
658 case 0xc18:
659 if (op_type == MTS_READ)
660 *data = swap32(gt_data->int_cause_reg);
661 else {
662 gt_data->int_cause_reg &= swap32(*data);
663 gt_update_irq_status(gt_data);
664 }
665 break;
666
667 /* ===== Interrupt Mask Register ===== */
668 case 0xc1c:
669 if (op_type == MTS_READ) {
670 *data = swap32(gt_data->int_mask_reg);
671 } else {
672 gt_data->int_mask_reg = swap32(*data);
673 gt_update_irq_status(gt_data);
674 }
675 break;
676
677 /* ===== PCI Bus 1 ===== */
678 case 0xcf0:
679 pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
680 break;
681
682 case 0xcf4:
683 pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
684 break;
685
686 /* ===== PCI Bus 0 ===== */
687 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
688 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
689 break;
690
691 case PCI_BUS_DATA: /* pci data address (0xcfc) */
692 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
693 break;
694
695 #if DEBUG_UNKNOWN
696 default:
697 if (op_type == MTS_READ) {
698 cpu_log(cpu,"GT64120","read from addr 0x%x, pc=0x%llx\n",
699 offset,cpu->pc);
700 } else {
701 cpu_log(cpu,"GT64120","write to addr 0x%x, value=0x%llx, "
702 "pc=0x%llx\n",offset,*data,cpu->pc);
703 }
704 #endif
705 }
706
707 return NULL;
708 }
709
710 /* Update the Ethernet port interrupt status */
711 static void gt_eth_update_int_status(struct gt_data *d,struct eth_port *port)
712 {
713 if (port->icr & GT_ICR_MASK)
714 port->icr |= GT_ICR_INT_SUM;
715
716 if (port->icr & port->imr & GT_ICR_MASK)
717 vm_set_irq(d->vm,d->eth_irq);
718 }
719
720 /* Read a MII register */
721 static m_uint32_t gt_mii_read(struct gt_data *d)
722 {
723 m_uint8_t port,reg;
724 m_uint32_t res = 0;
725
726 port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
727 reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
728
729 #if DEBUG_MII
730 GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: reading.\n",port,reg);
731 #endif
732
733 if ((port < GT_ETH_PORTS) && (reg < 32)) {
734 res = d->mii_regs[port][reg];
735
736 switch(reg) {
737 case 0x00:
738 res &= ~0x8200; /* clear reset bit and autoneg restart */
739 break;
740 case 0x01:
741 #if 0
742 if (d->ports[port].nio && bcm5600_mii_port_status(d,port))
743 d->mii_output = 0x782C;
744 else
745 d->mii_output = 0;
746 #endif
747 res = 0x782c;
748 break;
749 case 0x02:
750 res = 0x40;
751 break;
752 case 0x03:
753 res = 0x61d4;
754 break;
755 case 0x04:
756 res = 0x1E1;
757 break;
758 case 0x05:
759 res = 0x41E1;
760 break;
761 default:
762 res = 0;
763 }
764 }
765
766 /* Mark the data as ready */
767 res |= GT_SMIR_RVALID_FLAG;
768
769 return(res);
770 }
771
772 /* Write a MII register */
773 static void gt_mii_write(struct gt_data *d)
774 {
775 m_uint8_t port,reg;
776 m_uint16_t isolation;
777
778 port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
779 reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
780
781 if ((port < GT_ETH_PORTS) && (reg < 32))
782 {
783 #if DEBUG_MII
784 GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: writing 0x%4.4x\n",
785 port,reg,d->smi_reg & GT_SMIR_DATA_MASK);
786 #endif
787
788 /* Check if PHY isolation status is changing */
789 if (reg == 0) {
790 isolation = (d->smi_reg ^ d->mii_regs[port][reg]) & 0x400;
791
792 if (isolation) {
793 #if DEBUG_MII
794 GT_LOG(d,"MII: port 0x%4.4x: generating IRQ\n",port);
795 #endif
796 d->eth_ports[port].icr |= GT_ICR_MII_STC;
797 gt_eth_update_int_status(d,&d->eth_ports[port]);
798 }
799 }
800
801 d->mii_regs[port][reg] = d->smi_reg & GT_SMIR_DATA_MASK;
802 }
803 }
804
805 /* Handle registers of Ethernet ports */
806 static int gt_eth_access(cpu_mips_t *cpu,struct vdevice *dev,
807 m_uint32_t offset,u_int op_size,u_int op_type,
808 m_uint64_t *data)
809 {
810 struct gt_data *d = dev->priv_data;
811 struct eth_port *port;
812 u_int port_id = 0;
813 u_int queue;
814
815 if ((offset < 0x80000) || (offset >= 0x90000))
816 return(FALSE);
817
818 if (op_type == MTS_WRITE)
819 *data = swap32(*data);
820
821 /* Detemine the Ethernet port */
822 if ((offset >= 0x84800) && (offset < 0x88800))
823 port_id = 0;
824
825 if ((offset >= 0x88800) && (offset < 0x8c800))
826 port_id = 1;
827
828 port = &d->eth_ports[port_id];
829
830 switch(offset) {
831 /* SMI register */
832 case 0x80810:
833 if (op_type == MTS_WRITE) {
834 d->smi_reg = *data;
835
836 if (!(d->smi_reg & GT_SMIR_OPCODE_READ))
837 gt_mii_write(d);
838 } else {
839 *data = 0;
840
841 if (d->smi_reg & GT_SMIR_OPCODE_READ)
842 *data = gt_mii_read(d);
843 }
844 break;
845
846 /* ICR: Interrupt Cause Register */
847 case 0x84850:
848 case 0x88850:
849 if (op_type == MTS_READ)
850 *data = port->icr;
851 else
852 port->icr &= *data;
853 break;
854
855 /* IMR: Interrupt Mask Register */
856 case 0x84858:
857 case 0x88858:
858 if (op_type == MTS_READ)
859 *data = port->imr;
860 else
861 port->imr = *data;
862 break;
863
864 /* PCR: Port Configuration Register */
865 case 0x84800:
866 case 0x88800:
867 if (op_type == MTS_READ)
868 *data = port->pcr;
869 else
870 port->pcr = *data;
871 break;
872
873 /* PCXR: Port Configuration Extend Register */
874 case 0x84808:
875 case 0x88808:
876 if (op_type == MTS_READ) {
877 *data = port->pcxr;
878 *data |= GT_PCXR_SPEED;
879 } else
880 port->pcxr = *data;
881 break;
882
883 /* PCMR: Port Command Register */
884 case 0x84810:
885 case 0x88810:
886 if (op_type == MTS_READ)
887 *data = port->pcmr;
888 else
889 port->pcmr = *data;
890 break;
891
892 /* Port Status Register */
893 case 0x84818:
894 case 0x88818:
895 if (op_type == MTS_READ)
896 *data = 0x0F;
897 break;
898
899 /* First RX descriptor */
900 case 0x84880:
901 case 0x88880:
902 case 0x84884:
903 case 0x88884:
904 case 0x84888:
905 case 0x88888:
906 case 0x8488C:
907 case 0x8888C:
908 queue = (offset >> 2) & 0x03;
909 if (op_type == MTS_READ)
910 *data = port->rx_start[queue];
911 else
912 port->rx_start[queue] = *data;
913 break;
914
915 /* Current RX descriptor */
916 case 0x848A0:
917 case 0x888A0:
918 case 0x848A4:
919 case 0x888A4:
920 case 0x848A8:
921 case 0x888A8:
922 case 0x848AC:
923 case 0x888AC:
924 queue = (offset >> 2) & 0x03;
925 if (op_type == MTS_READ)
926 *data = port->rx_current[queue];
927 else
928 port->rx_current[queue] = *data;
929 break;
930
931 /* Current TX descriptor */
932 case 0x848E0:
933 case 0x888E0:
934 case 0x848E4:
935 case 0x888E4:
936 queue = (offset >> 2) & 0x01;
937 if (op_type == MTS_READ)
938 *data = port->tx_current[queue];
939 else
940 port->tx_current[queue] = *data;
941 break;
942
943 /* Hash Table Pointer */
944 case 0x84828:
945 case 0x88828:
946 if (op_type == MTS_READ)
947 *data = port->ht_addr;
948 else
949 port->ht_addr = *data;
950 break;
951
952 /* SDCR: SDMA Configuration Register */
953 case 0x84840:
954 case 0x88840:
955 if (op_type == MTS_READ)
956 *data = port->sdcr;
957 else
958 port->sdcr = *data;
959 break;
960
961 /* SDCMR: SDMA Command Register */
962 case 0x84848:
963 case 0x88848:
964 if (op_type == MTS_WRITE) {
965 /* Start RX DMA */
966 if (*data & GT_SDCMR_ERD) {
967 port->sdcmr |= GT_SDCMR_ERD;
968 port->sdcmr &= ~GT_SDCMR_AR;
969 }
970
971 /* Abort RX DMA */
972 if (*data & GT_SDCMR_AR)
973 port->sdcmr &= ~GT_SDCMR_ERD;
974
975 /* Start TX High */
976 if (*data & GT_SDCMR_TXDH) {
977 port->sdcmr |= GT_SDCMR_TXDH;
978 port->sdcmr &= ~GT_SDCMR_STDH;
979 }
980
981 /* Start TX Low */
982 if (*data & GT_SDCMR_TXDL) {
983 port->sdcmr |= GT_SDCMR_TXDL;
984 port->sdcmr &= ~GT_SDCMR_STDL;
985 }
986
987 /* Stop TX High */
988 if (*data & GT_SDCMR_STDH) {
989 port->sdcmr &= ~GT_SDCMR_TXDH;
990 port->sdcmr |= GT_SDCMR_STDH;
991 }
992
993 /* Stop TX Low */
994 if (*data & GT_SDCMR_STDL) {
995 port->sdcmr &= ~GT_SDCMR_TXDL;
996 port->sdcmr |= GT_SDCMR_STDL;
997 }
998 } else {
999 *data = port->sdcmr;
1000 }
1001 break;
1002
1003 case 0x85800:
1004 case 0x89800:
1005 if (op_type == MTS_READ) {
1006 *data = port->rx_bytes;
1007 port->rx_bytes = 0;
1008 }
1009 break;
1010
1011 case 0x85804:
1012 case 0x89804:
1013 if (op_type == MTS_READ) {
1014 *data = port->tx_bytes;
1015 port->tx_bytes = 0;
1016 }
1017 break;
1018
1019 case 0x85808:
1020 case 0x89808:
1021 if (op_type == MTS_READ) {
1022 *data = port->rx_frames;
1023 port->rx_frames = 0;
1024 }
1025 break;
1026
1027 case 0x8580C:
1028 case 0x8980C:
1029 if (op_type == MTS_READ) {
1030 *data = port->tx_frames;
1031 port->tx_frames = 0;
1032 }
1033 break;
1034
1035 #if DEBUG_UNKNOWN
1036 default:
1037 if (op_type == MTS_READ) {
1038 cpu_log(cpu,"GT96100/ETH",
1039 "read access to unknown register 0x%x, pc=0x%llx\n",
1040 offset,cpu->pc);
1041 } else {
1042 cpu_log(cpu,"GT96100/ETH",
1043 "write access to unknown register 0x%x, value=0x%llx, "
1044 "pc=0x%llx\n",offset,*data,cpu->pc);
1045 }
1046 #endif
1047 }
1048
1049 if (op_type == MTS_READ)
1050 *data = swap32(*data);
1051
1052 return(TRUE);
1053 }
1054
1055 /*
1056 * dev_gt96100_access()
1057 */
1058 void *dev_gt96100_access(cpu_mips_t *cpu,struct vdevice *dev,m_uint32_t offset,
1059 u_int op_size,u_int op_type,m_uint64_t *data)
1060 {
1061 struct gt_data *gt_data = dev->priv_data;
1062
1063 if (op_type == MTS_READ)
1064 *data = 0;
1065
1066 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
1067 return NULL;
1068
1069 if (gt_eth_access(cpu,dev,offset,op_size,op_type,data) != 0)
1070 return NULL;
1071
1072 switch(offset) {
1073 /* Watchdog configuration register */
1074 case 0x101a80:
1075 break;
1076
1077 /* Watchdog value register */
1078 case 0x101a84:
1079 break;
1080
1081 case 0x008: /* ras10_low */
1082 if (op_type == MTS_READ)
1083 *data = swap32(0x000);
1084 break;
1085 case 0x010: /* ras10_high */
1086 if (op_type == MTS_READ)
1087 *data = swap32(0x7F);
1088 break;
1089 case 0x018: /* ras32_low */
1090 if (op_type == MTS_READ)
1091 *data = swap32(0x100);
1092 break;
1093 case 0x020: /* ras32_high */
1094 if (op_type == MTS_READ)
1095 *data = swap32(0x7F);
1096 break;
1097 case 0x400: /* ras0_low */
1098 if (op_type == MTS_READ)
1099 *data = swap32(0x00);
1100 break;
1101 case 0x404: /* ras0_high */
1102 if (op_type == MTS_READ)
1103 *data = swap32(0xFF);
1104 break;
1105 case 0x408: /* ras1_low */
1106 if (op_type == MTS_READ)
1107 *data = swap32(0x7F);
1108 break;
1109 case 0x40c: /* ras1_high */
1110 if (op_type == MTS_READ)
1111 *data = swap32(0x00);
1112 break;
1113 case 0x410: /* ras2_low */
1114 if (op_type == MTS_READ)
1115 *data = swap32(0x00);
1116 break;
1117 case 0x414: /* ras2_high */
1118 if (op_type == MTS_READ)
1119 *data = swap32(0xFF);
1120 break;
1121 case 0x418: /* ras3_low */
1122 if (op_type == MTS_READ)
1123 *data = swap32(0x7F);
1124 break;
1125 case 0x41c: /* ras3_high */
1126 if (op_type == MTS_READ)
1127 *data = swap32(0x00);
1128 break;
1129 case 0xc08: /* pci0_cs10 */
1130 if (op_type == MTS_READ)
1131 *data = swap32(0xFFF);
1132 break;
1133 case 0xc0c: /* pci0_cs32 */
1134 if (op_type == MTS_READ)
1135 *data = swap32(0xFFF);
1136 break;
1137
1138 case 0xc00: /* pci_cmd */
1139 if (op_type == MTS_READ)
1140 *data = swap32(0x00008001);
1141 break;
1142
1143 /* ===== Interrupt Main Cause Register ===== */
1144 case 0xc18:
1145 if (op_type == MTS_READ) {
1146 *data = gt_data->int_cause_reg;
1147
1148 /* TODO: signal Eth0/Eth1 */
1149 //*data |= (1 << 30) | (1 << 31) | 1;
1150
1151 *data = swap32(*data);
1152 } else {
1153 gt_data->int_cause_reg &= swap32(*data);
1154 gt_update_irq_status(gt_data);
1155 }
1156 break;
1157
1158 /* ===== Interrupt Mask Register ===== */
1159 case 0xc1c:
1160 if (op_type == MTS_READ) {
1161 *data = swap32(gt_data->int_mask_reg);
1162 } else {
1163 gt_data->int_mask_reg = swap32(*data);
1164 gt_update_irq_status(gt_data);
1165 }
1166 break;
1167
1168 /* ===== Interrupt High Cause Register ===== */
1169 case 0xc98:
1170 if (op_type == MTS_READ) {
1171 *data = 0;
1172
1173 /* interrupt on ethernet port 0 ? */
1174 if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM)
1175 *data |= GT_IHCR_ETH0_SUM;
1176
1177 /* interrupt on ethernet port 1 ? */
1178 if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM)
1179 *data |= GT_IHCR_ETH1_SUM;
1180
1181 *data = swap32(*data);
1182 }
1183 break;
1184
1185 /* Serial Cause Register */
1186 case 0x103a00:
1187 if (op_type == MTS_READ) {
1188 *data = 0;
1189
1190 /* interrupt on ethernet port 0 ? */
1191 if (gt_data->eth_ports[0].icr & GT_ICR_INT_SUM)
1192 *data |= GT_SCR_ETH0_SUM;
1193
1194 /* interrupt on ethernet port 1 ? */
1195 if (gt_data->eth_ports[1].icr & GT_ICR_INT_SUM)
1196 *data |= GT_SCR_ETH1_SUM;
1197
1198 *data = swap32(*data);
1199 }
1200 break;
1201
1202 /* ===== PCI Bus 1 ===== */
1203 case 0xcf0:
1204 pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
1205 break;
1206
1207 case 0xcf4:
1208 pci_dev_data_handler(cpu,gt_data->bus[1],op_type,TRUE,data);
1209 break;
1210
1211 /* ===== PCI Bus 0 ===== */
1212 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
1213 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
1214 break;
1215
1216 case PCI_BUS_DATA: /* pci data address (0xcfc) */
1217 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,TRUE,data);
1218 break;
1219
1220 #if DEBUG_UNKNOWN
1221 default:
1222 if (op_type == MTS_READ) {
1223 cpu_log(cpu,"GT96100","read from addr 0x%x, pc=0x%llx\n",
1224 offset,cpu->pc);
1225 } else {
1226 cpu_log(cpu,"GT96100","write to addr 0x%x, value=0x%llx, "
1227 "pc=0x%llx\n",offset,*data,cpu->pc);
1228 }
1229 #endif
1230 }
1231
1232 return NULL;
1233 }
1234
1235 /* Read an Ethernet descriptor */
1236 static void gt_eth_desc_read(struct gt_data *d,m_uint32_t addr,
1237 struct eth_desc *desc)
1238 {
1239 physmem_copy_from_vm(d->vm,desc,addr,sizeof(struct eth_desc));
1240
1241 /* byte-swapping */
1242 desc->cmd_stat = vmtoh32(desc->cmd_stat);
1243 desc->buf_size = vmtoh32(desc->buf_size);
1244 desc->next_ptr = vmtoh32(desc->next_ptr);
1245 desc->buf_ptr = vmtoh32(desc->buf_ptr);
1246 }
1247
1248 /* Write an Ethernet descriptor */
1249 static void gt_eth_desc_write(struct gt_data *d,m_uint32_t addr,
1250 struct eth_desc *desc)
1251 {
1252 struct eth_desc tmp;
1253
1254 /* byte-swapping */
1255 tmp.cmd_stat = vmtoh32(desc->cmd_stat);
1256 tmp.buf_size = vmtoh32(desc->buf_size);
1257 tmp.next_ptr = vmtoh32(desc->next_ptr);
1258 tmp.buf_ptr = vmtoh32(desc->buf_ptr);
1259
1260 physmem_copy_to_vm(d->vm,&tmp,addr,sizeof(struct eth_desc));
1261 }
1262
1263 /* Handle a TX queue (single packet) */
1264 static int gt_eth_handle_txqueue(struct gt_data *d,struct eth_port *port,
1265 int queue)
1266 {
1267 u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr;
1268 struct eth_desc txd0,ctxd,*ptxd;
1269 m_uint32_t tx_start,tx_current;
1270 m_uint32_t len,tot_len;
1271 int abort = FALSE;
1272
1273 /* Check if this TX queue is active */
1274 if ((queue == 0) && (port->sdcmr & GT_SDCMR_STDL))
1275 return(FALSE);
1276
1277 if ((queue == 1) && (port->sdcmr & GT_SDCMR_STDH))
1278 return(FALSE);
1279
1280 /* Copy the current txring descriptor */
1281 tx_start = tx_current = port->tx_current[queue];
1282
1283 if (!tx_start)
1284 goto done;
1285
1286 ptxd = &txd0;
1287 gt_eth_desc_read(d,tx_start,ptxd);
1288
1289 /* If we don't own the first descriptor, we cannot transmit */
1290 if (!(txd0.cmd_stat & GT_TXDESC_OWN))
1291 goto done;
1292
1293 /* Empty packet for now */
1294 pkt_ptr = pkt;
1295 tot_len = 0;
1296
1297 for(;;) {
1298 #if DEBUG_ETH_TX
1299 GT_LOG(d,"gt_eth_handle_txqueue: loop: "
1300 "cmd_stat=0x%x, buf_size=0x%x, next_ptr=0x%x, buf_ptr=0x%x\n",
1301 ptxd->cmd_stat,ptxd->buf_size,ptxd->next_ptr,ptxd->buf_ptr);
1302 #endif
1303
1304 if (!(ptxd->cmd_stat & GT_TXDESC_OWN)) {
1305 GT_LOG(d,"gt_eth_handle_txqueue: descriptor not owned!\n");
1306 abort = TRUE;
1307 break;
1308 }
1309
1310 /* Copy packet data to the buffer */
1311 len = (ptxd->buf_size & GT_TXDESC_BC_MASK) >> GT_TXDESC_BC_SHIFT;
1312
1313 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->buf_ptr,len);
1314 pkt_ptr += len;
1315 tot_len += len;
1316
1317 /* Clear the OWN bit if this is not the first descriptor */
1318 if (!(ptxd->cmd_stat & GT_TXDESC_F)) {
1319 ptxd->cmd_stat &= ~GT_TXDESC_OWN;
1320 physmem_copy_u32_to_vm(d->vm,tx_current,ptxd->cmd_stat);
1321 }
1322
1323 tx_current = ptxd->next_ptr;
1324
1325 /* Last descriptor or no more desc available ? */
1326 if (ptxd->cmd_stat & GT_TXDESC_L)
1327 break;
1328
1329 if (!tx_current) {
1330 abort = TRUE;
1331 break;
1332 }
1333
1334 /* Fetch the next descriptor */
1335 gt_eth_desc_read(d,tx_current,&ctxd);
1336 ptxd = &ctxd;
1337 }
1338
1339 if ((tot_len != 0) && !abort) {
1340 #if DEBUG_ETH_TX
1341 GT_LOG(d,"Ethernet: sending packet of %u bytes\n",tot_len);
1342 mem_dump(log_file,pkt,tot_len);
1343 #endif
1344 /* send it on wire */
1345 netio_send(port->nio,pkt,tot_len);
1346
1347 /* Update MIB counters */
1348 port->tx_bytes += tot_len;
1349 port->tx_frames++;
1350 }
1351
1352 /* Clear the OWN flag of the first descriptor */
1353 txd0.cmd_stat &= ~GT_TXDESC_OWN;
1354 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.cmd_stat);
1355
1356 port->tx_current[queue] = tx_current;
1357
1358 /* Notify host about transmitted packet */
1359 if (queue == 0)
1360 port->icr |= GT_ICR_TXBUFL;
1361 else
1362 port->icr |= GT_ICR_TXBUFH;
1363
1364 done:
1365 if (abort) {
1366 /* TX underrun */
1367 port->icr |= GT_ICR_TXUDR;
1368
1369 if (queue == 0)
1370 port->icr |= GT_ICR_TXERRL;
1371 else
1372 port->icr |= GT_ICR_TXERRH;
1373 } else {
1374 /* End of queue has been reached */
1375 if (!tx_current) {
1376 if (queue == 0)
1377 port->icr |= GT_ICR_TXENDL;
1378 else
1379 port->icr |= GT_ICR_TXENDH;
1380 }
1381 }
1382
1383 /* Update the interrupt status */
1384 gt_eth_update_int_status(d,port);
1385 return(TRUE);
1386 }
1387
1388 /* Handle TX ring of the specified port */
1389 static void gt_eth_handle_port_txqueues(struct gt_data *d,u_int port)
1390 {
1391 gt_eth_handle_txqueue(d,&d->eth_ports[port],0); /* TX Low */
1392 gt_eth_handle_txqueue(d,&d->eth_ports[port],1); /* TX High */
1393 }
1394
1395 /* Handle all TX rings of all Ethernet ports */
1396 static int gt_eth_handle_txqueues(struct gt_data *d)
1397 {
1398 int i;
1399
1400 for(i=0;i<GT_ETH_PORTS;i++)
1401 gt_eth_handle_port_txqueues(d,i);
1402
1403 return(TRUE);
1404 }
1405
1406 /* Inverse a nibble */
1407 static const int inv_nibble[16] = {
1408 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1409 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF
1410 };
1411
1412 /* Inverse a 9-bit value */
1413 static inline u_int gt_hash_inv_9bit(u_int val)
1414 {
1415 u_int res;
1416
1417 res = inv_nibble[val & 0x0F] << 5;
1418 res |= inv_nibble[(val & 0xF0) >> 4] << 1;
1419 res |= (val & 0x100) >> 8;
1420 return(res);
1421 }
1422
1423 /*
1424 * Compute hash value for Ethernet address filtering.
1425 * Two modes are available (p.271 of the GT96100 doc).
1426 */
1427 static u_int gt_eth_hash_value(n_eth_addr_t *addr,int mode)
1428 {
1429 m_uint64_t tmp;
1430 u_int res;
1431 int i;
1432
1433 /* Swap the nibbles */
1434 for(i=0,tmp=0;i<N_ETH_ALEN;i++) {
1435 tmp <<= 8;
1436 tmp |= (inv_nibble[addr->eth_addr_byte[i] & 0x0F]) << 4;
1437 tmp |= inv_nibble[(addr->eth_addr_byte[i] & 0xF0) >> 4];
1438 }
1439
1440 if (mode == 0) {
1441 /* Fill bits 0:8 */
1442 res = (tmp & 0x00000003) | ((tmp & 0x00007f00) >> 6);
1443 res ^= (tmp & 0x00ff8000) >> 15;
1444 res ^= (tmp & 0x1ff000000ULL) >> 24;
1445
1446 /* Fill bits 9:14 */
1447 res |= (tmp & 0xfc) << 7;
1448 } else {
1449 /* Fill bits 0:8 */
1450 res = gt_hash_inv_9bit((tmp & 0x00007fc0) >> 6);
1451 res ^= gt_hash_inv_9bit((tmp & 0x00ff8000) >> 15);
1452 res ^= gt_hash_inv_9bit((tmp & 0x1ff000000ULL) >> 24);
1453
1454 /* Fill bits 9:14 */
1455 res |= (tmp & 0x3f) << 9;
1456 }
1457
1458 return(res);
1459 }
1460
1461 /*
1462 * Walk through the Ethernet hash table.
1463 */
1464 static int gt_eth_hash_lookup(struct gt_data *d,struct eth_port *port,
1465 n_eth_addr_t *addr,m_uint64_t *entry)
1466 {
1467 m_uint64_t eth_val;
1468 m_uint32_t hte_addr;
1469 u_int hash_val;
1470 int i;
1471
1472 eth_val = (m_uint64_t)addr->eth_addr_byte[0] << 3;
1473 eth_val |= (m_uint64_t)addr->eth_addr_byte[1] << 11;
1474 eth_val |= (m_uint64_t)addr->eth_addr_byte[2] << 19;
1475 eth_val |= (m_uint64_t)addr->eth_addr_byte[3] << 27;
1476 eth_val |= (m_uint64_t)addr->eth_addr_byte[4] << 35;
1477 eth_val |= (m_uint64_t)addr->eth_addr_byte[5] << 43;
1478
1479 /* Compute hash value for Ethernet address filtering */
1480 hash_val = gt_eth_hash_value(addr,port->pcr & GT_PCR_HM);
1481
1482 if (port->pcr & GT_PCR_HS) {
1483 /* 1/2K address filtering */
1484 hte_addr = port->ht_addr + ((hash_val & 0x7ff) << 3);
1485 } else {
1486 /* 8K address filtering */
1487 hte_addr = port->ht_addr + (hash_val << 3);
1488 }
1489
1490 #if DEBUG_ETH_HASH
1491 GT_LOG(d,"Hash Lookup for Ethernet address "
1492 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x: addr=0x%x\n",
1493 addr->eth_addr_byte[0], addr->eth_addr_byte[1],
1494 addr->eth_addr_byte[2], addr->eth_addr_byte[3],
1495 addr->eth_addr_byte[4], addr->eth_addr_byte[5],
1496 hte_addr);
1497 #endif
1498
1499 for(i=0;i<GT_HTE_HOPNUM;i++,hte_addr+=8) {
1500 *entry = ((m_uint64_t)physmem_copy_u32_from_vm(d->vm,hte_addr)) << 32;
1501 *entry |= physmem_copy_u32_from_vm(d->vm,hte_addr+4);
1502
1503 /* Empty entry ? */
1504 if (!(*entry & GT_HTE_VALID))
1505 return(GT_HTLOOKUP_MISS);
1506
1507 /* Skip flag or different Ethernet address: jump to next entry */
1508 if ((*entry & GT_HTE_SKIP) || ((*entry & GT_HTE_ADDR_MASK) != eth_val))
1509 continue;
1510
1511 /* We have the good MAC address in this entry */
1512 return(GT_HTLOOKUP_MATCH);
1513 }
1514
1515 return(GT_HTLOOKUP_HOP_EXCEEDED);
1516 }
1517
1518 /*
1519 * Check if a packet (given its destination address) must be handled
1520 * at RX path.
1521 *
1522 * Return values:
1523 * - 0: Discard packet ;
1524 * - 1: Receive packet ;
1525 * - 2: Receive packet and set "M" bit in RX descriptor.
1526 *
1527 * The documentation is not clear about the M bit in RX descriptor.
1528 * It is described as "Miss" or "Match" depending on the section.
1529 */
1530 static inline int gt_eth_handle_rx_daddr(struct gt_data *d,
1531 struct eth_port *port,
1532 u_int hash_res,
1533 m_uint64_t hash_entry)
1534 {
1535 /* Hop Number exceeded */
1536 if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
1537 return(1);
1538
1539 /* Match and hash entry marked as "Receive" */
1540 if ((hash_res == GT_HTLOOKUP_MATCH) && (hash_entry & GT_HTE_RD))
1541 return(2);
1542
1543 /* Miss but hash table default mode to forward ? */
1544 if ((hash_res == GT_HTLOOKUP_MISS) && (port->pcr & GT_PCR_HDM))
1545 return(2);
1546
1547 /* Promiscous Mode */
1548 if (port->pcr & GT_PCR_PM)
1549 return(1);
1550
1551 /* Drop packet for other cases */
1552 return(0);
1553 }
1554
1555 /* Put a packet in buffer of a descriptor */
1556 static void gt_eth_rxdesc_put_pkt(struct gt_data *d,struct eth_desc *rxd,
1557 u_char **pkt,ssize_t *pkt_len)
1558 {
1559 ssize_t len,cp_len;
1560
1561 len = (rxd->buf_size & GT_RXDESC_BS_MASK) >> GT_RXDESC_BS_SHIFT;
1562
1563 /* compute the data length to copy */
1564 cp_len = m_min(len,*pkt_len);
1565
1566 /* copy packet data to the VM physical RAM */
1567 physmem_copy_to_vm(d->vm,*pkt,rxd->buf_ptr,cp_len);
1568
1569 /* set the byte count in descriptor */
1570 rxd->buf_size |= cp_len;
1571
1572 *pkt += cp_len;
1573 *pkt_len -= cp_len;
1574 }
1575
1576 /* Put a packet in the specified RX queue */
1577 static int gt_eth_handle_rxqueue(struct gt_data *d,u_int port_id,u_int queue,
1578 u_char *pkt,ssize_t pkt_len)
1579 {
1580 struct eth_port *port = &d->eth_ports[port_id];
1581 m_uint32_t rx_start,rx_current;
1582 struct eth_desc rxd0,rxdn,*rxdc;
1583 ssize_t tot_len = pkt_len;
1584 u_char *pkt_ptr = pkt;
1585 n_eth_dot1q_hdr_t *hdr;
1586 m_uint64_t hash_entry;
1587 int i,hash_res,addr_action;
1588
1589 /* Truncate the packet if it is too big */
1590 pkt_len = m_min(pkt_len,GT_MAX_PKT_SIZE);
1591
1592 /* Copy the first RX descriptor */
1593 if (!(rx_start = rx_current = port->rx_start[queue]))
1594 goto dma_error;
1595
1596 /* Analyze the Ethernet header */
1597 hdr = (n_eth_dot1q_hdr_t *)pkt;
1598
1599 /* Hash table lookup for address filtering */
1600 hash_res = gt_eth_hash_lookup(d,port,&hdr->daddr,&hash_entry);
1601
1602 #if DEBUG_ETH_HASH
1603 GT_LOG(d,"Hash result: %d, hash_entry=0x%llx\n",hash_res,hash_entry);
1604 #endif
1605
1606 if (!(addr_action = gt_eth_handle_rx_daddr(d,port,hash_res,hash_entry)))
1607 return(FALSE);
1608
1609 /* Load the first RX descriptor */
1610 gt_eth_desc_read(d,rx_start,&rxd0);
1611
1612 #if DEBUG_ETH_RX
1613 GT_LOG(d,"port %u/queue %u: reading desc at 0x%8.8x "
1614 "[buf_size=0x%8.8x,cmd_stat=0x%8.8x,"
1615 "next_ptr=0x%8.8x,buf_ptr=0x%8.8x]\n",
1616 port_id,queue,rx_start,
1617 rxd0.buf_size,rxd0.cmd_stat,rxd0.next_ptr,rxd0.buf_ptr);
1618 #endif
1619
1620 for(i=0,rxdc=&rxd0;tot_len>0;i++)
1621 {
1622 /* We must own the descriptor */
1623 if (!(rxdc->cmd_stat & GT_RXDESC_OWN))
1624 goto dma_error;
1625
1626 /* Put data into the descriptor buffer */
1627 gt_eth_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
1628
1629 /* Clear the OWN bit */
1630 rxdc->cmd_stat &= ~GT_RXDESC_OWN;
1631
1632 /* We have finished if the complete packet has been stored */
1633 if (tot_len == 0) {
1634 rxdc->cmd_stat |= GT_RXDESC_L;
1635 rxdc->buf_size += 4; /* Add 4 bytes for CRC */
1636 }
1637
1638 /* Update the descriptor in host memory (but not the 1st) */
1639 if (i != 0)
1640 gt_eth_desc_write(d,rx_current,rxdc);
1641
1642 /* Get address of the next descriptor */
1643 rx_current = rxdc->next_ptr;
1644
1645 if (tot_len == 0)
1646 break;
1647
1648 if (!rx_current)
1649 goto dma_error;
1650
1651 /* Read the next descriptor from VM physical RAM */
1652 gt_eth_desc_read(d,rx_current,&rxdn);
1653 rxdc = &rxdn;
1654 }
1655
1656 /* Update the RX pointers */
1657 port->rx_start[queue] = port->rx_current[queue] = rx_current;
1658
1659 /* Update the first RX descriptor */
1660 rxd0.cmd_stat |= GT_RXDESC_F;
1661
1662 if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
1663 rxd0.cmd_stat |= GT_RXDESC_HE;
1664
1665 if (addr_action == 2)
1666 rxd0.cmd_stat |= GT_RXDESC_M;
1667
1668 if (ntohs(hdr->type) <= N_ETH_MTU) /* 802.3 frame */
1669 rxd0.cmd_stat |= GT_RXDESC_FT;
1670
1671 gt_eth_desc_write(d,rx_start,&rxd0);
1672
1673 /* Update MIB counters */
1674 port->rx_bytes += pkt_len;
1675 port->rx_frames++;
1676
1677 /* Indicate that we have a frame ready */
1678 port->icr |= (GT_ICR_RXBUFQ0 << queue) | GT_ICR_RXBUF;
1679 gt_eth_update_int_status(d,port);
1680 return(TRUE);
1681
1682 dma_error:
1683 port->icr |= (GT_ICR_RXERRQ0 << queue) | GT_ICR_RXERR;
1684 gt_eth_update_int_status(d,port);
1685 return(FALSE);
1686 }
1687
1688 /* Handle RX packet for an Ethernet port */
1689 static int gt_eth_handle_rx_pkt(netio_desc_t *nio,
1690 u_char *pkt,ssize_t pkt_len,
1691 struct gt_data *d,void *arg)
1692 {
1693 u_int queue,port_id = (int)arg;
1694 struct eth_port *port;
1695
1696 port = &d->eth_ports[port_id];
1697
1698 /* Check if RX DMA is active */
1699 if (!(port->sdcmr & GT_SDCMR_ERD))
1700 return(FALSE);
1701
1702 queue = 0; /* At this time, only put packet in queue 0 */
1703 gt_eth_handle_rxqueue(d,port_id,queue,pkt,pkt_len);
1704 return(TRUE);
1705 }
1706
1707 /* Shutdown a GT system controller */
1708 void dev_gt_shutdown(vm_instance_t *vm,struct gt_data *d)
1709 {
1710 if (d != NULL) {
1711 /* Stop the TX ring scanner */
1712 ptask_remove(d->eth_tx_tid);
1713
1714 /* Remove the device */
1715 dev_remove(vm,&d->dev);
1716
1717 /* Remove the PCI device */
1718 pci_dev_remove(d->pci_dev);
1719
1720 /* Free the structure itself */
1721 free(d);
1722 }
1723 }
1724
1725 /* Create a new GT64010 controller */
1726 int dev_gt64010_init(vm_instance_t *vm,char *name,
1727 m_uint64_t paddr,m_uint32_t len,u_int irq)
1728 {
1729 struct gt_data *d;
1730
1731 if (!(d = malloc(sizeof(*d)))) {
1732 fprintf(stderr,"gt64010: unable to create device data.\n");
1733 return(-1);
1734 }
1735
1736 memset(d,0,sizeof(*d));
1737 d->vm = vm;
1738 d->bus[0] = vm->pci_bus[0];
1739
1740 vm_object_init(&d->vm_obj);
1741 d->vm_obj.name = name;
1742 d->vm_obj.data = d;
1743 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1744
1745 dev_init(&d->dev);
1746 d->dev.name = name;
1747 d->dev.priv_data = d;
1748 d->dev.phys_addr = paddr;
1749 d->dev.phys_len = len;
1750 d->dev.handler = dev_gt64010_access;
1751
1752 /* Add the controller as a PCI device */
1753 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1754 d->pci_dev = pci_dev_add(d->bus[0],name,
1755 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64010,
1756 0,0,irq,d,NULL,NULL,NULL);
1757
1758 if (!d->pci_dev) {
1759 fprintf(stderr,"gt64010: unable to create PCI device.\n");
1760 return(-1);
1761 }
1762 }
1763
1764 /* Map this device to the VM */
1765 vm_bind_device(vm,&d->dev);
1766 vm_object_add(vm,&d->vm_obj);
1767 return(0);
1768 }
1769
1770 /*
1771 * pci_gt64120_read()
1772 *
1773 * Read a PCI register.
1774 */
1775 static m_uint32_t pci_gt64120_read(cpu_mips_t *cpu,struct pci_device *dev,
1776 int reg)
1777 {
1778 switch (reg) {
1779 case 0x08:
1780 return(0x03008005);
1781 default:
1782 return(0);
1783 }
1784 }
1785
1786 /* Create a new GT64120 controller */
1787 int dev_gt64120_init(vm_instance_t *vm,char *name,
1788 m_uint64_t paddr,m_uint32_t len,u_int irq)
1789 {
1790 struct gt_data *d;
1791
1792 if (!(d = malloc(sizeof(*d)))) {
1793 fprintf(stderr,"gt64120: unable to create device data.\n");
1794 return(-1);
1795 }
1796
1797 memset(d,0,sizeof(*d));
1798 d->vm = vm;
1799 d->bus[0] = vm->pci_bus[0];
1800 d->bus[1] = vm->pci_bus[1];
1801
1802 vm_object_init(&d->vm_obj);
1803 d->vm_obj.name = name;
1804 d->vm_obj.data = d;
1805 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1806
1807 dev_init(&d->dev);
1808 d->dev.name = name;
1809 d->dev.priv_data = d;
1810 d->dev.phys_addr = paddr;
1811 d->dev.phys_len = len;
1812 d->dev.handler = dev_gt64120_access;
1813
1814 /* Add the controller as a PCI device */
1815 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1816 d->pci_dev = pci_dev_add(d->bus[0],name,
1817 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64120,
1818 0,0,irq,d,NULL,pci_gt64120_read,NULL);
1819 if (!d->pci_dev) {
1820 fprintf(stderr,"gt64120: unable to create PCI device.\n");
1821 return(-1);
1822 }
1823 }
1824
1825 /* Map this device to the VM */
1826 vm_bind_device(vm,&d->dev);
1827 vm_object_add(vm,&d->vm_obj);
1828 return(0);
1829 }
1830
1831 /*
1832 * pci_gt96100_read()
1833 *
1834 * Read a PCI register.
1835 */
1836 static m_uint32_t pci_gt96100_read(cpu_mips_t *cpu,struct pci_device *dev,
1837 int reg)
1838 {
1839 switch (reg) {
1840 case 0x08:
1841 return(0x03008005);
1842 default:
1843 return(0);
1844 }
1845 }
1846
1847 /* Create a new GT96100 controller */
1848 int dev_gt96100_init(vm_instance_t *vm,char *name,
1849 m_uint64_t paddr,m_uint32_t len,
1850 u_int dma_irq,u_int eth_irq)
1851 {
1852 struct gt_data *d;
1853
1854 if (!(d = malloc(sizeof(*d)))) {
1855 fprintf(stderr,"gt96100: unable to create device data.\n");
1856 return(-1);
1857 }
1858
1859 memset(d,0,sizeof(*d));
1860 d->name = name;
1861 d->vm = vm;
1862 d->eth_irq = eth_irq;
1863 d->bus[0] = vm->pci_bus[0];
1864 d->bus[1] = vm->pci_bus[1];
1865
1866 vm_object_init(&d->vm_obj);
1867 d->vm_obj.name = name;
1868 d->vm_obj.data = d;
1869 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
1870
1871 dev_init(&d->dev);
1872 d->dev.name = name;
1873 d->dev.priv_data = d;
1874 d->dev.phys_addr = paddr;
1875 d->dev.phys_len = len;
1876 d->dev.handler = dev_gt96100_access;
1877
1878 /* Add the controller as a PCI device */
1879 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
1880 d->pci_dev = pci_dev_add(d->bus[0],name,
1881 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT96100,
1882 0,0,dma_irq,d,NULL,pci_gt96100_read,NULL);
1883 if (!d->pci_dev) {
1884 fprintf(stderr,"gt96100: unable to create PCI device.\n");
1885 return(-1);
1886 }
1887 }
1888
1889 /* Start the TX ring scanner */
1890 d->eth_tx_tid = ptask_add((ptask_callback)gt_eth_handle_txqueues,d,NULL);
1891
1892 /* Map this device to the VM */
1893 vm_bind_device(vm,&d->dev);
1894 vm_object_add(vm,&d->vm_obj);
1895 return(0);
1896 }
1897
1898 /* Bind a NIO to GT96100 device */
1899 int dev_gt96100_set_nio(struct gt_data *d,u_int port_id,netio_desc_t *nio)
1900 {
1901 struct eth_port *port;
1902
1903 if (port_id >= GT_ETH_PORTS)
1904 return(-1);
1905
1906 port = &d->eth_ports[port_id];
1907
1908 /* check that a NIO is not already bound */
1909 if (port->nio != NULL)
1910 return(-1);
1911
1912 port->nio = nio;
1913 netio_rxl_add(nio,(netio_rx_handler_t)gt_eth_handle_rx_pkt,
1914 d,(void *)port_id);
1915 return(0);
1916 }
1917
1918 /* Unbind a NIO from a GT96100 device */
1919 int dev_gt96100_unset_nio(struct gt_data *d,u_int port_id)
1920 {
1921 struct eth_port *port;
1922
1923 if (port_id >= GT_ETH_PORTS)
1924 return(-1);
1925
1926 port = &d->eth_ports[port_id];
1927
1928 if (port->nio != NULL) {
1929 netio_rxl_remove(port->nio);
1930 port->nio = NULL;
1931 }
1932
1933 return(0);
1934 }

  ViewVC Help
Powered by ViewVC 1.1.26