/[dynamips]/upstream/dynamips-0.2.6-RC5/dev_am79c971.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.6-RC5/dev_am79c971.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5 - (show annotations)
Sat Oct 6 16:08:03 2007 UTC (16 years, 5 months ago) by dpavlin
Original Path: upstream/dynamips-0.2.6-RC4/dev_am79c971.c
File MIME type: text/plain
File size: 30082 byte(s)
dynamips-0.2.6-RC4

1 /*
2 * Cisco C7200 (Predator) AMD Am79c971 Module.
3 * Copyright (C) 2006 Christophe Fillot. All rights reserved.
4 *
5 * AMD Am79c971 FastEthernet chip emulation.
6 */
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <unistd.h>
13 #include <time.h>
14 #include <errno.h>
15 #include <assert.h>
16
17 #include "utils.h"
18 #include "mips64.h"
19 #include "dynamips.h"
20 #include "memory.h"
21 #include "device.h"
22 #include "net.h"
23 #include "net_io.h"
24 #include "ptask.h"
25 #include "dev_am79c971.h"
26
27 /* Debugging flags */
28 #define DEBUG_CSR_REGS 0
29 #define DEBUG_BCR_REGS 0
30 #define DEBUG_PCI_REGS 0
31 #define DEBUG_ACCESS 0
32 #define DEBUG_TRANSMIT 0
33 #define DEBUG_RECEIVE 0
34 #define DEBUG_UNKNOWN 0
35
36 /* AMD Am79c971 PCI vendor/product codes */
37 #define AM79C971_PCI_VENDOR_ID 0x1022
38 #define AM79C971_PCI_PRODUCT_ID 0x2000
39
40 /* Maximum packet size */
41 #define AM79C971_MAX_PKT_SIZE 2048
42
43 /* Send up to 16 packets in a TX ring scan pass */
44 #define AM79C971_TXRING_PASS_COUNT 16
45
46 /* CSR0: Controller Status and Control Register */
47 #define AM79C971_CSR0_ERR 0x00008000 /* Error (BABL,CERR,MISS,MERR) */
48 #define AM79C971_CSR0_BABL 0x00004000 /* Transmitter Timeout Error */
49 #define AM79C971_CSR0_CERR 0x00002000 /* Collision Error */
50 #define AM79C971_CSR0_MISS 0x00001000 /* Missed Frame */
51 #define AM79C971_CSR0_MERR 0x00000800 /* Memory Error */
52 #define AM79C971_CSR0_RINT 0x00000400 /* Receive Interrupt */
53 #define AM79C971_CSR0_TINT 0x00000200 /* Transmit Interrupt */
54 #define AM79C971_CSR0_IDON 0x00000100 /* Initialization Done */
55 #define AM79C971_CSR0_INTR 0x00000080 /* Interrupt Flag */
56 #define AM79C971_CSR0_IENA 0x00000040 /* Interrupt Enable */
57 #define AM79C971_CSR0_RXON 0x00000020 /* Receive On */
58 #define AM79C971_CSR0_TXON 0x00000010 /* Transmit On */
59 #define AM79C971_CSR0_TDMD 0x00000008 /* Transmit Demand */
60 #define AM79C971_CSR0_STOP 0x00000004 /* Stop */
61 #define AM79C971_CSR0_STRT 0x00000002 /* Start */
62 #define AM79C971_CSR0_INIT 0x00000001 /* Initialization */
63
64 /* CSR3: Interrupt Masks and Deferral Control */
65 #define AM79C971_CSR3_BABLM 0x00004000 /* Transmit. Timeout Int. Mask */
66 #define AM79C971_CSR3_CERRM 0x00002000 /* Collision Error Int. Mask*/
67 #define AM79C971_CSR3_MISSM 0x00001000 /* Missed Frame Interrupt Mask */
68 #define AM79C971_CSR3_MERRM 0x00000800 /* Memory Error Interrupt Mask */
69 #define AM79C971_CSR3_RINTM 0x00000400 /* Receive Interrupt Mask */
70 #define AM79C971_CSR3_TINTM 0x00000200 /* Transmit Interrupt Mask */
71 #define AM79C971_CSR3_IDONM 0x00000100 /* Initialization Done Mask */
72 #define AM79C971_CSR3_BSWP 0x00000004 /* Byte Swap */
73 #define AM79C971_CSR3_IM_MASK 0x00007F00 /* Interrupt Masks for CSR3 */
74
75 /* CSR5: Extended Control and Interrupt 1 */
76 #define AM79C971_CSR5_TOKINTD 0x00008000 /* Receive Interrupt Mask */
77 #define AM79C971_CSR5_SPND 0x00000001 /* Suspend */
78
79 /* CSR15: Mode */
80 #define AM79C971_CSR15_PROM 0x00008000 /* Promiscous Mode */
81 #define AM79C971_CSR15_DRCVBC 0x00004000 /* Disable Receive Broadcast */
82 #define AM79C971_CSR15_DRCVPA 0x00002000 /* Disable Receive PHY address */
83 #define AM79C971_CSR15_DTX 0x00000002 /* Disable Transmit */
84 #define AM79C971_CSR15_DRX 0x00000001 /* Disable Receive */
85
86 /* AMD 79C971 Initialization block length */
87 #define AM79C971_INIT_BLOCK_LEN 0x1c
88
89 /* RX descriptors */
90 #define AM79C971_RMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */
91 #define AM79C971_RMD1_ERR 0x40000000 /* Error */
92 #define AM79C971_RMD1_FRAM 0x20000000 /* Framing Error */
93 #define AM79C971_RMD1_OFLO 0x10000000 /* Overflow Error */
94 #define AM79C971_RMD1_CRC 0x08000000 /* Invalid CRC */
95 #define AM79C971_RMD1_BUFF 0x08000000 /* Buffer Error (chaining) */
96 #define AM79C971_RMD1_STP 0x02000000 /* Start of Packet */
97 #define AM79C971_RMD1_ENP 0x01000000 /* End of Packet */
98 #define AM79C971_RMD1_BPE 0x00800000 /* Bus Parity Error */
99 #define AM79C971_RMD1_PAM 0x00400000 /* Physical Address Match */
100 #define AM79C971_RMD1_LAFM 0x00200000 /* Logical Addr. Filter Match */
101 #define AM79C971_RMD1_BAM 0x00100000 /* Broadcast Address Match */
102 #define AM79C971_RMD1_LEN 0x00000FFF /* Buffer Length */
103
104 #define AM79C971_RMD2_LEN 0x00000FFF /* Received byte count */
105
106 /* TX descriptors */
107 #define AM79C971_TMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */
108 #define AM79C971_TMD1_ERR 0x40000000 /* Error */
109 #define AM79C971_TMD1_ADD_FCS 0x20000000 /* FCS generation */
110 #define AM79C971_TMD1_STP 0x02000000 /* Start of Packet */
111 #define AM79C971_TMD1_ENP 0x01000000 /* End of Packet */
112 #define AM79C971_TMD1_LEN 0x00000FFF /* Buffer Length */
113
114 /* RX Descriptor */
115 struct rx_desc {
116 m_uint32_t rmd[4];
117 };
118
119 /* TX Descriptor */
120 struct tx_desc {
121 m_uint32_t tmd[4];
122 };
123
124 /* AMD 79C971 Data */
125 struct am79c971_data {
126 char *name;
127
128 /* Interface type (10baseT or 100baseTX) */
129 int type;
130
131 /* Current RAP (Register Address Pointer) value */
132 m_uint8_t rap;
133
134 /* CSR and BCR registers */
135 m_uint32_t csr[256],bcr[256];
136
137 /* RX/TX rings start addresses */
138 m_uint32_t rx_start,tx_start;
139
140 /* RX/TX number of descriptors (log2) */
141 m_uint32_t rx_l2len,tx_l2len;
142
143 /* RX/TX number of descriptors */
144 m_uint32_t rx_len,tx_len;
145
146 /* RX/TX ring positions */
147 m_uint32_t rx_pos,tx_pos;
148
149 /* MII registers */
150 m_uint16_t mii_regs[32][32];
151
152 /* Physical (MAC) address */
153 n_eth_addr_t mac_addr;
154
155 /* Device information */
156 struct vdevice *dev;
157
158 /* PCI device information */
159 struct pci_device *pci_dev;
160
161 /* Virtual machine */
162 vm_instance_t *vm;
163
164 /* NetIO descriptor */
165 netio_desc_t *nio;
166
167 /* TX ring scanner task id */
168 ptask_id_t tx_tid;
169 };
170
171 /* Log an am79c971 message */
172 #define AM79C971_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
173
174
175 static m_uint16_t mii_reg_values[32] = {
176 0x1000, 0x782D, 0x2000, 0x5C01, 0x01E1, 0x0000, 0x0000, 0x0000,
177 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
178 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8060,
179 0x8020, 0x0820, 0x0000, 0x3800, 0xA3B9, 0x0000, 0x0000, 0x0000,
180 };
181
182 /* Read a MII register */
183 static m_uint16_t mii_reg_read(struct am79c971_data *d,u_int phy,u_int reg)
184 {
185 if ((phy >= 32) || (reg >= 32))
186 return(0);
187
188 return(d->mii_regs[phy][reg]);
189 }
190
191 /* Write a MII register */
192 static void mii_reg_write(struct am79c971_data *d,u_int phy,u_int reg,
193 m_uint16_t value)
194 {
195 if ((phy < 32) && (reg < 32))
196 d->mii_regs[phy][reg] = value;
197 }
198
199 /* Check if a packet must be delivered to the emulated chip */
200 static inline int am79c971_handle_mac_addr(struct am79c971_data *d,
201 m_uint8_t *pkt)
202 {
203 n_eth_hdr_t *hdr = (n_eth_hdr_t *)pkt;
204
205 /* Accept systematically frames if we are running is promiscuous mode */
206 if (d->csr[15] & AM79C971_CSR15_PROM)
207 return(TRUE);
208
209 /* Accept systematically all multicast frames */
210 if (eth_addr_is_mcast(&hdr->daddr))
211 return(TRUE);
212
213 /* Accept frames directly for us, discard others */
214 if (!memcmp(&d->mac_addr,&hdr->daddr,N_ETH_ALEN))
215 return(TRUE);
216
217 return(FALSE);
218 }
219
220 /* Update the Interrupt Flag bit of csr0 */
221 static void am79c971_update_intr_flag(struct am79c971_data *d)
222 {
223 m_uint32_t mask;
224
225 mask = d->csr[3] & AM79C971_CSR3_IM_MASK;
226
227 if (d->csr[0] & mask)
228 d->csr[0] |= AM79C971_CSR0_INTR;
229 }
230
231 /* Trigger an interrupt */
232 static int am79c971_trigger_irq(struct am79c971_data *d)
233 {
234 if (d->csr[0] & (AM79C971_CSR0_INTR|AM79C971_CSR0_IENA)) {
235 pci_dev_trigger_irq(d->vm,d->pci_dev);
236 return(TRUE);
237 }
238
239 return(FALSE);
240 }
241
242 /* Update RX/TX ON bits of csr0 */
243 static void am79c971_update_rx_tx_on_bits(struct am79c971_data *d)
244 {
245 /*
246 * Set RX ON if DRX in csr15 is cleared, and set TX on if DTX
247 * in csr15 is cleared. The START bit must be set.
248 */
249 d->csr[0] &= ~(AM79C971_CSR0_RXON|AM79C971_CSR0_TXON);
250
251 if (d->csr[0] & AM79C971_CSR0_STRT) {
252 if (!(d->csr[15] & AM79C971_CSR15_DRX))
253 d->csr[0] |= AM79C971_CSR0_RXON;
254
255 if (!(d->csr[15] & AM79C971_CSR15_DTX))
256 d->csr[0] |= AM79C971_CSR0_TXON;
257 }
258 }
259
260 /* Update RX/TX descriptor lengths */
261 static void am79c971_update_rx_tx_len(struct am79c971_data *d)
262 {
263 d->rx_len = 1 << d->rx_l2len;
264 d->tx_len = 1 << d->tx_l2len;
265
266 /* Normalize ring sizes */
267 if (d->rx_len > 512) d->rx_len = 512;
268 if (d->tx_len > 512) d->tx_len = 512;
269 }
270
271 /* Fetch the initialization block from memory */
272 static int am79c971_fetch_init_block(struct am79c971_data *d)
273 {
274 m_uint32_t ib[AM79C971_INIT_BLOCK_LEN];
275 m_uint32_t ib_addr,ib_tmp;
276
277 /* The init block address is contained in csr1 (low) and csr2 (high) */
278 ib_addr = (d->csr[2] << 16) | d->csr[1];
279
280 if (!ib_addr) {
281 AM79C971_LOG(d,"trying to fetch init block at address 0...\n");
282 return(-1);
283 }
284
285 AM79C971_LOG(d,"fetching init block at address 0x%8.8x\n",ib_addr);
286 physmem_copy_from_vm(d->vm,ib,ib_addr,sizeof(ib));
287
288 /* Extract RX/TX ring addresses */
289 d->rx_start = vmtoh32(ib[5]);
290 d->tx_start = vmtoh32(ib[6]);
291
292 /* Set csr15 from mode field */
293 ib_tmp = vmtoh32(ib[0]);
294 d->csr[15] = ib_tmp & 0xffff;
295
296 /* Extract RX/TX ring sizes */
297 d->rx_l2len = (ib_tmp >> 20) & 0x0F;
298 d->tx_l2len = (ib_tmp >> 28) & 0x0F;
299 am79c971_update_rx_tx_len(d);
300
301 AM79C971_LOG(d,"rx_ring = 0x%8.8x (%u), tx_ring = 0x%8.8x (%u)\n",
302 d->rx_start,d->rx_len,d->tx_start,d->tx_len);
303
304 /* Get the physical MAC address */
305 ib_tmp = vmtoh32(ib[1]);
306 d->csr[12] = ib_tmp & 0xFFFF;
307 d->csr[13] = ib_tmp >> 16;
308
309 d->mac_addr.eth_addr_byte[3] = (ib_tmp >> 24) & 0xFF;
310 d->mac_addr.eth_addr_byte[2] = (ib_tmp >> 16) & 0xFF;
311 d->mac_addr.eth_addr_byte[1] = (ib_tmp >> 8) & 0xFF;
312 d->mac_addr.eth_addr_byte[0] = ib_tmp & 0xFF;
313
314 ib_tmp = vmtoh32(ib[2]);
315 d->csr[14] = ib_tmp & 0xFFFF;
316 d->mac_addr.eth_addr_byte[5] = (ib_tmp >> 8) & 0xFF;
317 d->mac_addr.eth_addr_byte[4] = ib_tmp & 0xFF;
318
319 /*
320 * Mark the initialization as done is csr0.
321 */
322 d->csr[0] |= AM79C971_CSR0_IDON;
323
324 /* Update RX/TX ON bits of csr0 since csr15 has been modified */
325 am79c971_update_rx_tx_on_bits(d);
326 AM79C971_LOG(d,"CSR0 = 0x%4.4x\n",d->csr[0]);
327
328 am79c971_update_intr_flag(d);
329
330 if (am79c971_trigger_irq(d))
331 AM79C971_LOG(d,"triggering IDON interrupt\n");
332
333 return(0);
334 }
335
336 /* RDP (Register Data Port) access */
337 static void am79c971_rdp_access(cpu_mips_t *cpu,struct am79c971_data *d,
338 u_int op_type,m_uint64_t *data)
339 {
340 m_uint32_t mask;
341
342 #if DEBUG_CSR_REGS
343 if (op_type == MTS_READ) {
344 cpu_log(cpu,d->name,"read access to CSR %d\n",d->rap);
345 } else {
346 cpu_log(cpu,d->name,"write access to CSR %d, value=0x%x\n",d->rap,*data);
347 }
348 #endif
349
350 switch(d->rap) {
351 case 0: /* CSR0: Controller Status and Control Register */
352 if (op_type == MTS_READ) {
353 //AM79C971_LOG(d,"reading CSR0 (val=0x%4.4x)\n",d->csr[0]);
354 *data = d->csr[0];
355 } else {
356 /*
357 * The STOP bit clears other bits.
358 * It has precedence over INIT and START bits.
359 */
360 if (*data & AM79C971_CSR0_STOP) {
361 //AM79C971_LOG(d,"stopping interface!\n");
362 d->csr[0] = AM79C971_CSR0_STOP;
363 d->tx_pos = d->rx_pos = 0;
364 break;
365 }
366
367 /* These bits are cleared when set to 1 */
368 mask = AM79C971_CSR0_BABL | AM79C971_CSR0_CERR;
369 mask |= AM79C971_CSR0_MISS | AM79C971_CSR0_MERR;
370 mask |= AM79C971_CSR0_RINT | AM79C971_CSR0_TINT;
371 mask |= AM79C971_CSR0_IDON;
372 d->csr[0] &= ~(*data & mask);
373
374 /* Save the Interrupt Enable bit */
375 d->csr[0] |= *data & AM79C971_CSR0_IENA;
376
377 /* If INIT bit is set, fetch the initialization block */
378 if (*data & AM79C971_CSR0_INIT) {
379 d->csr[0] |= AM79C971_CSR0_INIT;
380 d->csr[0] &= ~AM79C971_CSR0_STOP;
381 am79c971_fetch_init_block(d);
382 }
383
384 /* If STRT bit is set, clear the stop bit */
385 if (*data & AM79C971_CSR0_STRT) {
386 //AM79C971_LOG(d,"enabling interface!\n");
387 d->csr[0] |= AM79C971_CSR0_STRT;
388 d->csr[0] &= ~AM79C971_CSR0_STOP;
389 am79c971_update_rx_tx_on_bits(d);
390 }
391 }
392 break;
393
394 case 6: /* CSR6: RX/TX Descriptor Table Length */
395 if (op_type == MTS_WRITE) {
396 d->rx_l2len = (*data >> 8) & 0x0F;
397 d->tx_l2len = (*data >> 12) & 0x0F;
398 am79c971_update_rx_tx_len(d);
399 } else {
400 *data = (d->tx_l2len << 12) | (d->rx_l2len << 8);
401 }
402 break;
403
404 case 15: /* CSR15: Mode */
405 if (op_type == MTS_WRITE) {
406 d->csr[15] = *data;
407 am79c971_update_rx_tx_on_bits(d);
408 } else {
409 *data = d->csr[15];
410 }
411 break;
412
413 case 88:
414 if (op_type == MTS_READ) {
415 switch(d->type) {
416 case AM79C971_TYPE_100BASE_TX:
417 *data = 0x2623003;
418 break;
419 default:
420 *data = 0;
421 break;
422 }
423 }
424 break;
425
426 default:
427 if (op_type == MTS_READ) {
428 *data = d->csr[d->rap];
429 } else {
430 d->csr[d->rap] = *data;
431 }
432
433 #if DEBUG_UNKNOWN
434 if (op_type == MTS_READ) {
435 cpu_log(cpu,d->name,"read access to unknown CSR %d\n",d->rap);
436 } else {
437 cpu_log(cpu,d->name,"write access to unknown CSR %d, value=0x%x\n",
438 d->rap,*data);
439 }
440 #endif
441 }
442 }
443
444 /* BDP (BCR Data Port) access */
445 static void am79c971_bdp_access(cpu_mips_t *cpu,struct am79c971_data *d,
446 u_int op_type,m_uint64_t *data)
447 {
448 u_int mii_phy,mii_reg;
449
450 #if DEBUG_BCR_REGS
451 if (op_type == MTS_READ) {
452 cpu_log(cpu,d->name,"read access to BCR %d\n",d->rap);
453 } else {
454 cpu_log(cpu,d->name,"write access to BCR %d, value=0x%x\n",d->rap,*data);
455 }
456 #endif
457
458 switch(d->rap) {
459 case 9:
460 if (op_type == MTS_READ)
461 *data = 1;
462 break;
463
464 case 34: /* BCR34: MII Management Data Register */
465 mii_phy = (d->bcr[33] >> 5) & 0x1F;
466 mii_reg = (d->bcr[33] >> 0) & 0x1F;
467
468 if (op_type == MTS_READ)
469 *data = mii_reg_read(d,mii_phy,mii_reg);
470 //else
471 //mii_reg_write(d,mii_phy,mii_reg,*data);
472 break;
473
474 default:
475 if (op_type == MTS_READ) {
476 *data = d->bcr[d->rap];
477 } else {
478 d->bcr[d->rap] = *data;
479 }
480
481 #if DEBUG_UNKNOWN
482 if (op_type == MTS_READ) {
483 cpu_log(cpu,d->name,"read access to unknown BCR %d\n",d->rap);
484 } else {
485 cpu_log(cpu,d->name,"write access to unknown BCR %d, value=0x%x\n",
486 d->rap,*data);
487 }
488 #endif
489 }
490 }
491
492 /*
493 * dev_am79c971_access()
494 */
495 void *dev_am79c971_access(cpu_mips_t *cpu,struct vdevice *dev,
496 m_uint32_t offset,u_int op_size,u_int op_type,
497 m_uint64_t *data)
498 {
499 struct am79c971_data *d = dev->priv_data;
500
501 if (op_type == MTS_READ)
502 *data = 0;
503
504 #if DEBUG_ACCESS
505 if (op_type == MTS_READ) {
506 cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
507 offset,cpu->pc,op_size);
508 } else {
509 cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
510 "val=0x%llx, size=%u\n",offset,cpu->pc,*data,op_size);
511 }
512 #endif
513
514 switch(offset) {
515 case 0x14: /* RAP (Register Address Pointer) */
516 if (op_type == MTS_WRITE) {
517 d->rap = *data & 0xFF;
518 } else {
519 *data = d->rap;
520 }
521 break;
522
523 case 0x10: /* RDP (Register Data Port) */
524 am79c971_rdp_access(cpu,d,op_type,data);
525 break;
526
527 case 0x1c: /* BDP (BCR Data Port) */
528 am79c971_bdp_access(cpu,d,op_type,data);
529 break;
530 }
531
532 return NULL;
533 }
534
535 /* Read a RX descriptor */
536 static int rxdesc_read(struct am79c971_data *d,m_uint32_t rxd_addr,
537 struct rx_desc *rxd)
538 {
539 m_uint32_t buf[4];
540 m_uint8_t sw_style;
541
542 /* Get the software style */
543 sw_style = d->bcr[20];
544
545 /* Read the descriptor from VM physical RAM */
546 physmem_copy_from_vm(d->vm,&buf,rxd_addr,sizeof(struct rx_desc));
547
548 switch(sw_style) {
549 case 2:
550 rxd->rmd[0] = vmtoh32(buf[0]); /* rb addr */
551 rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */
552 rxd->rmd[2] = vmtoh32(buf[2]); /* rfrtag, mcnt, ... */
553 rxd->rmd[3] = vmtoh32(buf[3]); /* user */
554 break;
555
556 case 3:
557 rxd->rmd[0] = vmtoh32(buf[2]); /* rb addr */
558 rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */
559 rxd->rmd[2] = vmtoh32(buf[0]); /* rfrtag, mcnt, ... */
560 rxd->rmd[3] = vmtoh32(buf[3]); /* user */
561 break;
562
563 default:
564 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
565 return(-1);
566 }
567
568 return(0);
569 }
570
571 /* Set the address of the next RX descriptor */
572 static inline void rxdesc_set_next(struct am79c971_data *d)
573 {
574 d->rx_pos++;
575
576 if (d->rx_pos == d->rx_len)
577 d->rx_pos = 0;
578 }
579
580 /* Compute the address of the current RX descriptor */
581 static inline m_uint32_t rxdesc_get_current(struct am79c971_data *d)
582 {
583 return(d->rx_start + (d->rx_pos * sizeof(struct rx_desc)));
584 }
585
586 /* Put a packet in buffer of a descriptor */
587 static void rxdesc_put_pkt(struct am79c971_data *d,struct rx_desc *rxd,
588 u_char **pkt,ssize_t *pkt_len)
589 {
590 ssize_t len,cp_len;
591
592 /* Compute the data length to copy */
593 len = ~((rxd->rmd[1] & AM79C971_RMD1_LEN) - 1);
594 len &= AM79C971_RMD1_LEN;
595 cp_len = m_min(len,*pkt_len);
596
597 /* Copy packet data to the VM physical RAM */
598 #if DEBUG_RECEIVE
599 AM79C971_LOG(d,"am79c971_handle_rxring: storing %u bytes at 0x%8.8x\n",
600 cp_len, rxd->rmd[0]);
601 #endif
602 physmem_copy_to_vm(d->vm,*pkt,rxd->rmd[0],cp_len);
603
604 *pkt += cp_len;
605 *pkt_len -= cp_len;
606 }
607
608 /*
609 * Put a packet in the RX ring.
610 */
611 static int am79c971_receive_pkt(struct am79c971_data *d,
612 u_char *pkt,ssize_t pkt_len)
613 {
614 m_uint32_t rx_start,rx_current,rx_next,rxdn_rmd1;
615 struct rx_desc rxd0,rxdn,*rxdc;
616 ssize_t tot_len = pkt_len;
617 u_char *pkt_ptr = pkt;
618 m_uint8_t sw_style;
619 int i;
620
621 /* Truncate the packet if it is too big */
622 pkt_len = m_min(pkt_len,AM79C971_MAX_PKT_SIZE);
623
624 /* Copy the current rxring descriptor */
625 rx_start = rx_current = rxdesc_get_current(d);
626 rxdesc_read(d,rx_start,&rxd0);
627
628 /* We must have the first descriptor... */
629 if (!(rxd0.rmd[1] & AM79C971_RMD1_OWN))
630 return(FALSE);
631
632 for(i=0,rxdc=&rxd0;;i++)
633 {
634 #if DEBUG_RECEIVE
635 AM79C971_LOG(d,"am79c971_handle_rxring: i=%d, addr=0x%8.8x: "
636 "rmd[0]=0x%x, rmd[1]=0x%x, rmd[2]=0x%x, rmd[3]=0x%x\n",
637 i,rx_current,
638 rxdc->rmd[0],rxdc->rmd[1],rxdc->rmd[2],rxdc->rmd[3]);
639 #endif
640 /* Put data into the descriptor buffer */
641 rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
642
643 /* Go to the next descriptor */
644 rxdesc_set_next(d);
645
646 /* If this is not the first descriptor, clear the OWN bit */
647 if (i != 0)
648 rxdc->rmd[1] &= ~AM79C971_RMD1_OWN;
649
650 /* If we have finished, mark the descriptor as end of packet */
651 if (tot_len == 0) {
652 rxdc->rmd[1] |= AM79C971_RMD1_ENP;
653 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
654
655 /* Get the software style */
656 sw_style = d->bcr[20];
657
658 /* Update the message byte count field */
659 rxdc->rmd[2] &= ~AM79C971_RMD2_LEN;
660 rxdc->rmd[2] |= pkt_len + 4;
661
662 switch(sw_style) {
663 case 2:
664 physmem_copy_u32_to_vm(d->vm,rx_current+8,rxdc->rmd[2]);
665 break;
666 case 3:
667 physmem_copy_u32_to_vm(d->vm,rx_current,rxdc->rmd[2]);
668 break;
669 default:
670 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
671 }
672
673 break;
674 }
675
676 /* Try to acquire the next descriptor */
677 rx_next = rxdesc_get_current(d);
678 rxdn_rmd1 = physmem_copy_u32_from_vm(d->vm,rx_next+4);
679
680 if (!(rxdn_rmd1 & AM79C971_RMD1_OWN)) {
681 rxdc->rmd[1] |= AM79C971_RMD1_ERR | AM79C971_RMD1_BUFF;
682 rxdc->rmd[1] |= AM79C971_RMD1_ENP;
683 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
684 break;
685 }
686
687 /* Update rmd1 to store change of OWN bit */
688 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
689
690 /* Read the next descriptor from VM physical RAM */
691 rxdesc_read(d,rx_next,&rxdn);
692 rxdc = &rxdn;
693 rx_current = rx_next;
694 }
695
696 /* Update the first RX descriptor */
697 rxd0.rmd[1] &= ~AM79C971_RMD1_OWN;
698 rxd0.rmd[1] |= AM79C971_RMD1_STP;
699 physmem_copy_u32_to_vm(d->vm,rx_start+4,rxd0.rmd[1]);
700
701 d->csr[0] |= AM79C971_CSR0_RINT;
702 am79c971_update_intr_flag(d);
703 am79c971_trigger_irq(d);
704 return(TRUE);
705 }
706
707 /* Handle the RX ring */
708 static int am79c971_handle_rxring(netio_desc_t *nio,
709 u_char *pkt,ssize_t pkt_len,
710 struct am79c971_data *d)
711 {
712 n_eth_hdr_t *hdr;
713
714 /*
715 * Don't start receive if the RX ring address has not been set
716 * and if RX ON is not set.
717 */
718 if ((d->rx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON))
719 return(FALSE);
720
721 #if DEBUG_RECEIVE
722 AM79C971_LOG(d,"receiving a packet of %d bytes\n",pkt_len);
723 mem_dump(log_file,pkt,pkt_len);
724 #endif
725
726 /*
727 * Receive only multicast/broadcast trafic + unicast traffic
728 * for this virtual machine.
729 */
730 hdr = (n_eth_hdr_t *)pkt;
731 if (am79c971_handle_mac_addr(d,pkt))
732 am79c971_receive_pkt(d,pkt,pkt_len);
733
734 return(TRUE);
735 }
736
737 /* Read a TX descriptor */
738 static int txdesc_read(struct am79c971_data *d,m_uint32_t txd_addr,
739 struct tx_desc *txd)
740 {
741 m_uint32_t buf[4];
742 m_uint8_t sw_style;
743
744 /* Get the software style */
745 sw_style = d->bcr[20];
746
747 /* Read the descriptor from VM physical RAM */
748 physmem_copy_from_vm(d->vm,&buf,txd_addr,sizeof(struct tx_desc));
749
750 switch(sw_style) {
751 case 2:
752 txd->tmd[0] = vmtoh32(buf[0]); /* tb addr */
753 txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */
754 txd->tmd[2] = vmtoh32(buf[2]); /* buff, uflo, ... */
755 txd->tmd[3] = vmtoh32(buf[3]); /* user */
756 break;
757
758 case 3:
759 txd->tmd[0] = vmtoh32(buf[2]); /* tb addr */
760 txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */
761 txd->tmd[2] = vmtoh32(buf[0]); /* buff, uflo, ... */
762 txd->tmd[3] = vmtoh32(buf[3]); /* user */
763 break;
764
765 default:
766 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
767 return(-1);
768 }
769
770 return(0);
771 }
772
773 /* Set the address of the next TX descriptor */
774 static inline void txdesc_set_next(struct am79c971_data *d)
775 {
776 d->tx_pos++;
777
778 if (d->tx_pos == d->tx_len)
779 d->tx_pos = 0;
780 }
781
782 /* Compute the address of the current TX descriptor */
783 static inline m_uint32_t txdesc_get_current(struct am79c971_data *d)
784 {
785 return(d->tx_start + (d->tx_pos * sizeof(struct tx_desc)));
786 }
787
788 /* Handle the TX ring (single packet) */
789 static int am79c971_handle_txring_single(struct am79c971_data *d)
790 {
791 u_char pkt[AM79C971_MAX_PKT_SIZE],*pkt_ptr;
792 struct tx_desc txd0,ctxd,ntxd,*ptxd;
793 m_uint32_t tx_start,tx_current;
794 m_uint32_t clen,tot_len;
795
796 if ((d->tx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON))
797 return(FALSE);
798
799 /* Copy the current txring descriptor */
800 tx_start = tx_current = txdesc_get_current(d);
801 ptxd = &txd0;
802 txdesc_read(d,tx_start,ptxd);
803
804 /* If we don't own the first descriptor, we cannot transmit */
805 if (!(ptxd->tmd[1] & AM79C971_TMD1_OWN))
806 return(FALSE);
807
808 #if DEBUG_TRANSMIT
809 AM79C971_LOG(d,"am79c971_handle_txring: 1st desc: "
810 "tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n",
811 ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]);
812 #endif
813
814 /* Empty packet for now */
815 pkt_ptr = pkt;
816 tot_len = 0;
817
818 for(;;) {
819 #if DEBUG_TRANSMIT
820 AM79C971_LOG(d,"am79c971_handle_txring: loop: "
821 "tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n",
822 ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]);
823 #endif
824 /* Copy packet data */
825 clen = ~((ptxd->tmd[1] & AM79C971_TMD1_LEN) - 1);
826 clen &= AM79C971_TMD1_LEN;
827 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tmd[0],clen);
828
829 pkt_ptr += clen;
830 tot_len += clen;
831
832 /* Clear the OWN bit if this is not the first descriptor */
833 if (!(ptxd->tmd[1] & AM79C971_TMD1_STP)) {
834 ptxd->tmd[1] &= ~AM79C971_TMD1_OWN;
835 physmem_copy_u32_to_vm(d->vm,tx_current+4,ptxd->tmd[1]);
836 }
837
838 /* Set the next descriptor */
839 txdesc_set_next(d);
840
841 /* Stop now if end of packet has been reached */
842 if (ptxd->tmd[1] & AM79C971_TMD1_ENP)
843 break;
844
845 /* Read the next descriptor and try to acquire it */
846 tx_current = txdesc_get_current(d);
847 txdesc_read(d,tx_current,&ntxd);
848
849 if (!(ntxd.tmd[1] & AM79C971_TMD1_OWN)) {
850 AM79C971_LOG(d,"am79c971_handle_txring: UNDERFLOW!\n");
851 return(FALSE);
852 }
853
854 memcpy(&ctxd,&ntxd,sizeof(struct tx_desc));
855 ptxd = &ctxd;
856 }
857
858 if (tot_len != 0) {
859 #if DEBUG_TRANSMIT
860 AM79C971_LOG(d,"sending packet of %u bytes\n",tot_len);
861 mem_dump(log_file,pkt,tot_len);
862 #endif
863 /* send it on wire */
864 netio_send(d->nio,pkt,tot_len);
865 }
866
867 /* Clear the OWN flag of the first descriptor */
868 txd0.tmd[1] &= ~AM79C971_TMD1_OWN;
869 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.tmd[1]);
870
871 /* Generate TX interrupt */
872 d->csr[0] |= AM79C971_CSR0_TINT;
873 am79c971_update_intr_flag(d);
874 am79c971_trigger_irq(d);
875 return(TRUE);
876 }
877
878 /* Handle the TX ring */
879 static int am79c971_handle_txring(struct am79c971_data *d)
880 {
881 int i;
882
883 for(i=0;i<AM79C971_TXRING_PASS_COUNT;i++)
884 if (!am79c971_handle_txring_single(d))
885 break;
886
887 return(TRUE);
888 }
889
890 /*
891 * pci_am79c971_read()
892 *
893 * Read a PCI register.
894 */
895 static m_uint32_t pci_am79c971_read(cpu_mips_t *cpu,struct pci_device *dev,
896 int reg)
897 {
898 struct am79c971_data *d = dev->priv_data;
899
900 #if DEBUG_PCI_REGS
901 AM79C971_LOG(d,"read PCI register 0x%x\n",reg);
902 #endif
903
904 switch (reg) {
905 case 0x00:
906 return((AM79C971_PCI_PRODUCT_ID << 16) | AM79C971_PCI_VENDOR_ID);
907 case 0x08:
908 return(0x02000002);
909 case PCI_REG_BAR1:
910 return(d->dev->phys_addr);
911 default:
912 return(0);
913 }
914 }
915
916 /*
917 * pci_am79c971_write()
918 *
919 * Write a PCI register.
920 */
921 static void pci_am79c971_write(cpu_mips_t *cpu,struct pci_device *dev,
922 int reg,m_uint32_t value)
923 {
924 struct am79c971_data *d = dev->priv_data;
925
926 #if DEBUG_PCI_REGS
927 AM79C971_LOG(d,"write PCI register 0x%x, value 0x%x\n",reg,value);
928 #endif
929
930 switch(reg) {
931 case PCI_REG_BAR1:
932 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
933 AM79C971_LOG(d,"registers are mapped at 0x%x\n",value);
934 break;
935 }
936 }
937
938 /*
939 * dev_am79c971_init()
940 *
941 * Generic AMD Am79c971 initialization code.
942 */
943 struct am79c971_data *
944 dev_am79c971_init(vm_instance_t *vm,char *name,int interface_type,
945 struct pci_bus *pci_bus,int pci_device,int irq)
946 {
947 struct am79c971_data *d;
948 struct pci_device *pci_dev;
949 struct vdevice *dev;
950
951 /* Allocate the private data structure for AM79C971 */
952 if (!(d = malloc(sizeof(*d)))) {
953 fprintf(stderr,"%s (AM79C971): out of memory\n",name);
954 return NULL;
955 }
956
957 memset(d,0,sizeof(*d));
958 memcpy(d->mii_regs[0],mii_reg_values,sizeof(mii_reg_values));
959
960 /* Add as PCI device */
961 pci_dev = pci_dev_add(pci_bus,name,
962 AM79C971_PCI_VENDOR_ID,AM79C971_PCI_PRODUCT_ID,
963 pci_device,0,irq,
964 d,NULL,pci_am79c971_read,pci_am79c971_write);
965
966 if (!pci_dev) {
967 fprintf(stderr,"%s (AM79C971): unable to create PCI device.\n",name);
968 goto err_pci_dev;
969 }
970
971 /* Create the device itself */
972 if (!(dev = dev_create(name))) {
973 fprintf(stderr,"%s (AM79C971): unable to create device.\n",name);
974 goto err_dev;
975 }
976
977 d->name = name;
978 d->vm = vm;
979 d->type = interface_type;
980 d->pci_dev = pci_dev;
981 d->dev = dev;
982
983 dev->phys_addr = 0;
984 dev->phys_len = 0x4000;
985 dev->handler = dev_am79c971_access;
986 dev->priv_data = d;
987 return(d);
988
989 err_dev:
990 pci_dev_remove(pci_dev);
991 err_pci_dev:
992 free(d);
993 return NULL;
994 }
995
996 /* Remove an AMD Am79c971 device */
997 void dev_am79c971_remove(struct am79c971_data *d)
998 {
999 if (d != NULL) {
1000 pci_dev_remove(d->pci_dev);
1001 vm_unbind_device(d->vm,d->dev);
1002 cpu_group_rebuild_mts(d->vm->cpu_group);
1003 free(d->dev);
1004 free(d);
1005 }
1006 }
1007
1008 /* Bind a NIO to an AMD Am79c971 device */
1009 int dev_am79c971_set_nio(struct am79c971_data *d,netio_desc_t *nio)
1010 {
1011 /* check that a NIO is not already bound */
1012 if (d->nio != NULL)
1013 return(-1);
1014
1015 d->nio = nio;
1016 d->tx_tid = ptask_add((ptask_callback)am79c971_handle_txring,d,NULL);
1017 netio_rxl_add(nio,(netio_rx_handler_t)am79c971_handle_rxring,d,NULL);
1018 return(0);
1019 }
1020
1021 /* Unbind a NIO from an AMD Am79c971 device */
1022 void dev_am79c971_unset_nio(struct am79c971_data *d)
1023 {
1024 if (d->nio != NULL) {
1025 ptask_remove(d->tx_tid);
1026 netio_rxl_remove(d->nio);
1027 d->nio = NULL;
1028 }
1029 }

  ViewVC Help
Powered by ViewVC 1.1.26