/[dynamips]/upstream/dynamips-0.2.6-RC1/dev_am79c971.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.6-RC1/dev_am79c971.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2 - (show annotations)
Sat Oct 6 16:03:58 2007 UTC (12 years, 1 month ago) by dpavlin
File MIME type: text/plain
File size: 30192 byte(s)
import dynamips-0.2.6-RC1

1 /*
2 * Cisco C7200 (Predator) AMD Am79c971 Module.
3 * Copyright (C) 2006 Christophe Fillot. All rights reserved.
4 *
5 * AMD Am79c971 FastEthernet chip emulation.
6 */
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <unistd.h>
13 #include <time.h>
14 #include <errno.h>
15 #include <assert.h>
16
17 #include "utils.h"
18 #include "mips64.h"
19 #include "dynamips.h"
20 #include "memory.h"
21 #include "device.h"
22 #include "net.h"
23 #include "net_io.h"
24 #include "ptask.h"
25 #include "dev_am79c971.h"
26
27 /* Debugging flags */
28 #define DEBUG_CSR_REGS 0
29 #define DEBUG_BCR_REGS 0
30 #define DEBUG_PCI_REGS 0
31 #define DEBUG_ACCESS 0
32 #define DEBUG_TRANSMIT 0
33 #define DEBUG_RECEIVE 0
34 #define DEBUG_UNKNOWN 0
35
36 /* AMD Am79c971 PCI vendor/product codes */
37 #define AM79C971_PCI_VENDOR_ID 0x1022
38 #define AM79C971_PCI_PRODUCT_ID 0x2000
39
40 /* Maximum packet size */
41 #define AM79C971_MAX_PKT_SIZE 2048
42
43 /* Send up to 16 packets in a TX ring scan pass */
44 #define AM79C971_TXRING_PASS_COUNT 16
45
46 /* CSR0: Controller Status and Control Register */
47 #define AM79C971_CSR0_ERR 0x00008000 /* Error (BABL,CERR,MISS,MERR) */
48 #define AM79C971_CSR0_BABL 0x00004000 /* Transmitter Timeout Error */
49 #define AM79C971_CSR0_CERR 0x00002000 /* Collision Error */
50 #define AM79C971_CSR0_MISS 0x00001000 /* Missed Frame */
51 #define AM79C971_CSR0_MERR 0x00000800 /* Memory Error */
52 #define AM79C971_CSR0_RINT 0x00000400 /* Receive Interrupt */
53 #define AM79C971_CSR0_TINT 0x00000200 /* Transmit Interrupt */
54 #define AM79C971_CSR0_IDON 0x00000100 /* Initialization Done */
55 #define AM79C971_CSR0_INTR 0x00000080 /* Interrupt Flag */
56 #define AM79C971_CSR0_IENA 0x00000040 /* Interrupt Enable */
57 #define AM79C971_CSR0_RXON 0x00000020 /* Receive On */
58 #define AM79C971_CSR0_TXON 0x00000010 /* Transmit On */
59 #define AM79C971_CSR0_TDMD 0x00000008 /* Transmit Demand */
60 #define AM79C971_CSR0_STOP 0x00000004 /* Stop */
61 #define AM79C971_CSR0_STRT 0x00000002 /* Start */
62 #define AM79C971_CSR0_INIT 0x00000001 /* Initialization */
63
64 /* CSR3: Interrupt Masks and Deferral Control */
65 #define AM79C971_CSR3_BABLM 0x00004000 /* Transmit. Timeout Int. Mask */
66 #define AM79C971_CSR3_CERRM 0x00002000 /* Collision Error Int. Mask*/
67 #define AM79C971_CSR3_MISSM 0x00001000 /* Missed Frame Interrupt Mask */
68 #define AM79C971_CSR3_MERRM 0x00000800 /* Memory Error Interrupt Mask */
69 #define AM79C971_CSR3_RINTM 0x00000400 /* Receive Interrupt Mask */
70 #define AM79C971_CSR3_TINTM 0x00000200 /* Transmit Interrupt Mask */
71 #define AM79C971_CSR3_IDONM 0x00000100 /* Initialization Done Mask */
72 #define AM79C971_CSR3_BSWP 0x00000004 /* Byte Swap */
73 #define AM79C971_CSR3_IM_MASK 0x00007F00 /* Interrupt Masks for CSR3 */
74
75 /* CSR5: Extended Control and Interrupt 1 */
76 #define AM79C971_CSR5_TOKINTD 0x00008000 /* Receive Interrupt Mask */
77 #define AM79C971_CSR5_SPND 0x00000001 /* Suspend */
78
79 /* CSR15: Mode */
80 #define AM79C971_CSR15_PROM 0x00008000 /* Promiscous Mode */
81 #define AM79C971_CSR15_DRCVBC 0x00004000 /* Disable Receive Broadcast */
82 #define AM79C971_CSR15_DRCVPA 0x00002000 /* Disable Receive PHY address */
83 #define AM79C971_CSR15_DTX 0x00000002 /* Disable Transmit */
84 #define AM79C971_CSR15_DRX 0x00000001 /* Disable Receive */
85
86 /* AMD 79C971 Initialization block length */
87 #define AM79C971_INIT_BLOCK_LEN 0x1c
88
89 /* RX descriptors */
90 #define AM79C971_RMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */
91 #define AM79C971_RMD1_ERR 0x40000000 /* Error */
92 #define AM79C971_RMD1_FRAM 0x20000000 /* Framing Error */
93 #define AM79C971_RMD1_OFLO 0x10000000 /* Overflow Error */
94 #define AM79C971_RMD1_CRC 0x08000000 /* Invalid CRC */
95 #define AM79C971_RMD1_BUFF 0x08000000 /* Buffer Error (chaining) */
96 #define AM79C971_RMD1_STP 0x02000000 /* Start of Packet */
97 #define AM79C971_RMD1_ENP 0x01000000 /* End of Packet */
98 #define AM79C971_RMD1_BPE 0x00800000 /* Bus Parity Error */
99 #define AM79C971_RMD1_PAM 0x00400000 /* Physical Address Match */
100 #define AM79C971_RMD1_LAFM 0x00200000 /* Logical Addr. Filter Match */
101 #define AM79C971_RMD1_BAM 0x00100000 /* Broadcast Address Match */
102 #define AM79C971_RMD1_LEN 0x00000FFF /* Buffer Length */
103
104 #define AM79C971_RMD2_LEN 0x00000FFF /* Received byte count */
105
106 /* TX descriptors */
107 #define AM79C971_TMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */
108 #define AM79C971_TMD1_ERR 0x40000000 /* Error */
109 #define AM79C971_TMD1_ADD_FCS 0x20000000 /* FCS generation */
110 #define AM79C971_TMD1_STP 0x02000000 /* Start of Packet */
111 #define AM79C971_TMD1_ENP 0x01000000 /* End of Packet */
112 #define AM79C971_TMD1_LEN 0x00000FFF /* Buffer Length */
113
114 /* RX Descriptor */
115 struct rx_desc {
116 m_uint32_t rmd[4];
117 };
118
119 /* TX Descriptor */
120 struct tx_desc {
121 m_uint32_t tmd[4];
122 };
123
124 /* AMD 79C971 Data */
125 struct am79c971_data {
126 char *name;
127
128 /* Interface type (10baseT or 100baseTX) */
129 int type;
130
131 /* Current RAP (Register Address Pointer) value */
132 m_uint8_t rap;
133
134 /* CSR and BCR registers */
135 m_uint32_t csr[256],bcr[256];
136
137 /* RX/TX rings start addresses */
138 m_uint32_t rx_start,tx_start;
139
140 /* RX/TX number of descriptors (log2) */
141 m_uint32_t rx_l2len,tx_l2len;
142
143 /* RX/TX number of descriptors */
144 m_uint32_t rx_len,tx_len;
145
146 /* RX/TX ring positions */
147 m_uint32_t rx_pos,tx_pos;
148
149 /* MII registers */
150 m_uint16_t mii_regs[32][32];
151
152 /* Physical (MAC) address */
153 n_eth_addr_t mac_addr;
154
155 /* Device information */
156 struct vdevice *dev;
157
158 /* PCI device information */
159 struct pci_device *pci_dev;
160
161 /* Virtual machine */
162 vm_instance_t *vm;
163
164 /* NetIO descriptor */
165 netio_desc_t *nio;
166
167 /* TX ring scanner task id */
168 ptask_id_t tx_tid;
169 };
170
171 /* Log an am79c971 message */
172 #define AM79C971_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
173
174
175 static m_uint16_t mii_reg_values[32] = {
176 0x1000, 0x782D, 0x2000, 0x5C01, 0x01E1, 0x0000, 0x0000, 0x0000,
177 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
178 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8060,
179 0x8020, 0x0820, 0x0000, 0x3800, 0xA3B9, 0x0000, 0x0000, 0x0000,
180 };
181
182 /* Read a MII register */
183 static m_uint16_t mii_reg_read(struct am79c971_data *d,u_int phy,u_int reg)
184 {
185 if ((phy >= 32) || (reg >= 32))
186 return(0);
187
188 return(d->mii_regs[phy][reg]);
189 }
190
191 /* Write a MII register */
192 static void mii_reg_write(struct am79c971_data *d,u_int phy,u_int reg,
193 m_uint16_t value)
194 {
195 if ((phy < 32) && (reg < 32))
196 d->mii_regs[phy][reg] = value;
197 }
198
199 /* Check if a packet must be delivered to the emulated chip */
200 static inline int am79c971_handle_mac_addr(struct am79c971_data *d,
201 m_uint8_t *pkt)
202 {
203 n_eth_hdr_t *hdr = (n_eth_hdr_t *)pkt;
204
205 /* Ignore traffic sent by us */
206 if (!memcmp(&d->mac_addr,&hdr->saddr,N_ETH_ALEN))
207 return(FALSE);
208
209 /* Accept systematically frames if we are running is promiscuous mode */
210 if (d->csr[15] & AM79C971_CSR15_PROM)
211 return(TRUE);
212
213 /* Accept systematically all multicast frames */
214 if (eth_addr_is_mcast(&hdr->daddr))
215 return(TRUE);
216
217 /* Accept frames directly for us, discard others */
218 if (!memcmp(&d->mac_addr,&hdr->daddr,N_ETH_ALEN))
219 return(TRUE);
220
221 return(FALSE);
222 }
223
224 /* Update the Interrupt Flag bit of csr0 */
225 static void am79c971_update_intr_flag(struct am79c971_data *d)
226 {
227 m_uint32_t mask;
228
229 mask = d->csr[3] & AM79C971_CSR3_IM_MASK;
230
231 if (d->csr[0] & mask)
232 d->csr[0] |= AM79C971_CSR0_INTR;
233 }
234
235 /* Trigger an interrupt */
236 static int am79c971_trigger_irq(struct am79c971_data *d)
237 {
238 if (d->csr[0] & (AM79C971_CSR0_INTR|AM79C971_CSR0_IENA)) {
239 pci_dev_trigger_irq(d->vm,d->pci_dev);
240 return(TRUE);
241 }
242
243 return(FALSE);
244 }
245
246 /* Update RX/TX ON bits of csr0 */
247 static void am79c971_update_rx_tx_on_bits(struct am79c971_data *d)
248 {
249 /*
250 * Set RX ON if DRX in csr15 is cleared, and set TX on if DTX
251 * in csr15 is cleared. The START bit must be set.
252 */
253 d->csr[0] &= ~(AM79C971_CSR0_RXON|AM79C971_CSR0_TXON);
254
255 if (d->csr[0] & AM79C971_CSR0_STRT) {
256 if (!(d->csr[15] & AM79C971_CSR15_DRX))
257 d->csr[0] |= AM79C971_CSR0_RXON;
258
259 if (!(d->csr[15] & AM79C971_CSR15_DTX))
260 d->csr[0] |= AM79C971_CSR0_TXON;
261 }
262 }
263
264 /* Update RX/TX descriptor lengths */
265 static void am79c971_update_rx_tx_len(struct am79c971_data *d)
266 {
267 d->rx_len = 1 << d->rx_l2len;
268 d->tx_len = 1 << d->tx_l2len;
269
270 /* Normalize ring sizes */
271 if (d->rx_len > 512) d->rx_len = 512;
272 if (d->tx_len > 512) d->tx_len = 512;
273 }
274
275 /* Fetch the initialization block from memory */
276 static int am79c971_fetch_init_block(struct am79c971_data *d)
277 {
278 m_uint32_t ib[AM79C971_INIT_BLOCK_LEN];
279 m_uint32_t ib_addr,ib_tmp;
280
281 /* The init block address is contained in csr1 (low) and csr2 (high) */
282 ib_addr = (d->csr[2] << 16) | d->csr[1];
283
284 if (!ib_addr) {
285 AM79C971_LOG(d,"trying to fetch init block at address 0...\n");
286 return(-1);
287 }
288
289 AM79C971_LOG(d,"fetching init block at address 0x%8.8x\n",ib_addr);
290 physmem_copy_from_vm(d->vm,ib,ib_addr,sizeof(ib));
291
292 /* Extract RX/TX ring addresses */
293 d->rx_start = vmtoh32(ib[5]);
294 d->tx_start = vmtoh32(ib[6]);
295
296 /* Set csr15 from mode field */
297 ib_tmp = vmtoh32(ib[0]);
298 d->csr[15] = ib_tmp & 0xffff;
299
300 /* Extract RX/TX ring sizes */
301 d->rx_l2len = (ib_tmp >> 20) & 0x0F;
302 d->tx_l2len = (ib_tmp >> 28) & 0x0F;
303 am79c971_update_rx_tx_len(d);
304
305 AM79C971_LOG(d,"rx_ring = 0x%8.8x (%u), tx_ring = 0x%8.8x (%u)\n",
306 d->rx_start,d->rx_len,d->tx_start,d->tx_len);
307
308 /* Get the physical MAC address */
309 ib_tmp = vmtoh32(ib[1]);
310 d->csr[12] = ib_tmp & 0xFFFF;
311 d->csr[13] = ib_tmp >> 16;
312
313 d->mac_addr.eth_addr_byte[3] = (ib_tmp >> 24) & 0xFF;
314 d->mac_addr.eth_addr_byte[2] = (ib_tmp >> 16) & 0xFF;
315 d->mac_addr.eth_addr_byte[1] = (ib_tmp >> 8) & 0xFF;
316 d->mac_addr.eth_addr_byte[0] = ib_tmp & 0xFF;
317
318 ib_tmp = vmtoh32(ib[2]);
319 d->csr[14] = ib_tmp & 0xFFFF;
320 d->mac_addr.eth_addr_byte[5] = (ib_tmp >> 8) & 0xFF;
321 d->mac_addr.eth_addr_byte[4] = ib_tmp & 0xFF;
322
323 /*
324 * Mark the initialization as done is csr0.
325 */
326 d->csr[0] |= AM79C971_CSR0_IDON;
327
328 /* Update RX/TX ON bits of csr0 since csr15 has been modified */
329 am79c971_update_rx_tx_on_bits(d);
330 AM79C971_LOG(d,"CSR0 = 0x%4.4x\n",d->csr[0]);
331
332 am79c971_update_intr_flag(d);
333
334 if (am79c971_trigger_irq(d))
335 AM79C971_LOG(d,"triggering IDON interrupt\n");
336
337 return(0);
338 }
339
340 /* RDP (Register Data Port) access */
341 static void am79c971_rdp_access(cpu_mips_t *cpu,struct am79c971_data *d,
342 u_int op_type,m_uint64_t *data)
343 {
344 m_uint32_t mask;
345
346 #if DEBUG_CSR_REGS
347 if (op_type == MTS_READ) {
348 cpu_log(cpu,d->name,"read access to CSR %d\n",d->rap);
349 } else {
350 cpu_log(cpu,d->name,"write access to CSR %d, value=0x%x\n",d->rap,*data);
351 }
352 #endif
353
354 switch(d->rap) {
355 case 0: /* CSR0: Controller Status and Control Register */
356 if (op_type == MTS_READ) {
357 //AM79C971_LOG(d,"reading CSR0 (val=0x%4.4x)\n",d->csr[0]);
358 *data = d->csr[0];
359 } else {
360 /*
361 * The STOP bit clears other bits.
362 * It has precedence over INIT and START bits.
363 */
364 if (*data & AM79C971_CSR0_STOP) {
365 //AM79C971_LOG(d,"stopping interface!\n");
366 d->csr[0] = AM79C971_CSR0_STOP;
367 d->tx_pos = d->rx_pos = 0;
368 break;
369 }
370
371 /* These bits are cleared when set to 1 */
372 mask = AM79C971_CSR0_BABL | AM79C971_CSR0_CERR;
373 mask |= AM79C971_CSR0_MISS | AM79C971_CSR0_MERR;
374 mask |= AM79C971_CSR0_RINT | AM79C971_CSR0_TINT;
375 mask |= AM79C971_CSR0_IDON;
376 d->csr[0] &= ~(*data & mask);
377
378 /* Save the Interrupt Enable bit */
379 d->csr[0] |= *data & AM79C971_CSR0_IENA;
380
381 /* If INIT bit is set, fetch the initialization block */
382 if (*data & AM79C971_CSR0_INIT) {
383 d->csr[0] |= AM79C971_CSR0_INIT;
384 d->csr[0] &= ~AM79C971_CSR0_STOP;
385 am79c971_fetch_init_block(d);
386 }
387
388 /* If STRT bit is set, clear the stop bit */
389 if (*data & AM79C971_CSR0_STRT) {
390 //AM79C971_LOG(d,"enabling interface!\n");
391 d->csr[0] |= AM79C971_CSR0_STRT;
392 d->csr[0] &= ~AM79C971_CSR0_STOP;
393 am79c971_update_rx_tx_on_bits(d);
394 }
395 }
396 break;
397
398 case 6: /* CSR6: RX/TX Descriptor Table Length */
399 if (op_type == MTS_WRITE) {
400 d->rx_l2len = (*data >> 8) & 0x0F;
401 d->tx_l2len = (*data >> 12) & 0x0F;
402 am79c971_update_rx_tx_len(d);
403 } else {
404 *data = (d->tx_l2len << 12) | (d->rx_l2len << 8);
405 }
406 break;
407
408 case 15: /* CSR15: Mode */
409 if (op_type == MTS_WRITE) {
410 d->csr[15] = *data;
411 am79c971_update_rx_tx_on_bits(d);
412 } else {
413 *data = d->csr[15];
414 }
415 break;
416
417 case 88:
418 if (op_type == MTS_READ) {
419 switch(d->type) {
420 case AM79C971_TYPE_100BASE_TX:
421 *data = 0x2623003;
422 break;
423 default:
424 *data = 0;
425 break;
426 }
427 }
428 break;
429
430 default:
431 if (op_type == MTS_READ) {
432 *data = d->csr[d->rap];
433 } else {
434 d->csr[d->rap] = *data;
435 }
436
437 #if DEBUG_UNKNOWN
438 if (op_type == MTS_READ) {
439 cpu_log(cpu,d->name,"read access to unknown CSR %d\n",d->rap);
440 } else {
441 cpu_log(cpu,d->name,"write access to unknown CSR %d, value=0x%x\n",
442 d->rap,*data);
443 }
444 #endif
445 }
446 }
447
448 /* BDP (BCR Data Port) access */
449 static void am79c971_bdp_access(cpu_mips_t *cpu,struct am79c971_data *d,
450 u_int op_type,m_uint64_t *data)
451 {
452 u_int mii_phy,mii_reg;
453
454 #if DEBUG_BCR_REGS
455 if (op_type == MTS_READ) {
456 cpu_log(cpu,d->name,"read access to BCR %d\n",d->rap);
457 } else {
458 cpu_log(cpu,d->name,"write access to BCR %d, value=0x%x\n",d->rap,*data);
459 }
460 #endif
461
462 switch(d->rap) {
463 case 9:
464 if (op_type == MTS_READ)
465 *data = 1;
466 break;
467
468 case 34: /* BCR34: MII Management Data Register */
469 mii_phy = (d->bcr[33] >> 5) & 0x1F;
470 mii_reg = (d->bcr[33] >> 0) & 0x1F;
471
472 if (op_type == MTS_READ)
473 *data = mii_reg_read(d,mii_phy,mii_reg);
474 //else
475 //mii_reg_write(d,mii_phy,mii_reg,*data);
476 break;
477
478 default:
479 if (op_type == MTS_READ) {
480 *data = d->bcr[d->rap];
481 } else {
482 d->bcr[d->rap] = *data;
483 }
484
485 #if DEBUG_UNKNOWN
486 if (op_type == MTS_READ) {
487 cpu_log(cpu,d->name,"read access to unknown BCR %d\n",d->rap);
488 } else {
489 cpu_log(cpu,d->name,"write access to unknown BCR %d, value=0x%x\n",
490 d->rap,*data);
491 }
492 #endif
493 }
494 }
495
496 /*
497 * dev_am79c971_access()
498 */
499 void *dev_am79c971_access(cpu_mips_t *cpu,struct vdevice *dev,
500 m_uint32_t offset,u_int op_size,u_int op_type,
501 m_uint64_t *data)
502 {
503 struct am79c971_data *d = dev->priv_data;
504
505 if (op_type == MTS_READ)
506 *data = 0;
507
508 #if DEBUG_ACCESS
509 if (op_type == MTS_READ) {
510 cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
511 offset,cpu->pc,op_size);
512 } else {
513 cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
514 "val=0x%llx, size=%u\n",offset,cpu->pc,*data,op_size);
515 }
516 #endif
517
518 switch(offset) {
519 case 0x14: /* RAP (Register Address Pointer) */
520 if (op_type == MTS_WRITE) {
521 d->rap = *data & 0xFF;
522 } else {
523 *data = d->rap;
524 }
525 break;
526
527 case 0x10: /* RDP (Register Data Port) */
528 am79c971_rdp_access(cpu,d,op_type,data);
529 break;
530
531 case 0x1c: /* BDP (BCR Data Port) */
532 am79c971_bdp_access(cpu,d,op_type,data);
533 break;
534 }
535
536 return NULL;
537 }
538
539 /* Read a RX descriptor */
540 static int rxdesc_read(struct am79c971_data *d,m_uint32_t rxd_addr,
541 struct rx_desc *rxd)
542 {
543 m_uint32_t buf[4];
544 m_uint8_t sw_style;
545
546 /* Get the software style */
547 sw_style = d->bcr[20];
548
549 /* Read the descriptor from VM physical RAM */
550 physmem_copy_from_vm(d->vm,&buf,rxd_addr,sizeof(struct rx_desc));
551
552 switch(sw_style) {
553 case 2:
554 rxd->rmd[0] = vmtoh32(buf[0]); /* rb addr */
555 rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */
556 rxd->rmd[2] = vmtoh32(buf[2]); /* rfrtag, mcnt, ... */
557 rxd->rmd[3] = vmtoh32(buf[3]); /* user */
558 break;
559
560 case 3:
561 rxd->rmd[0] = vmtoh32(buf[2]); /* rb addr */
562 rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */
563 rxd->rmd[2] = vmtoh32(buf[0]); /* rfrtag, mcnt, ... */
564 rxd->rmd[3] = vmtoh32(buf[3]); /* user */
565 break;
566
567 default:
568 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
569 return(-1);
570 }
571
572 return(0);
573 }
574
575 /* Set the address of the next RX descriptor */
576 static inline void rxdesc_set_next(struct am79c971_data *d)
577 {
578 d->rx_pos++;
579
580 if (d->rx_pos == d->rx_len)
581 d->rx_pos = 0;
582 }
583
584 /* Compute the address of the current RX descriptor */
585 static inline m_uint32_t rxdesc_get_current(struct am79c971_data *d)
586 {
587 return(d->rx_start + (d->rx_pos * sizeof(struct rx_desc)));
588 }
589
590 /* Put a packet in buffer of a descriptor */
591 static void rxdesc_put_pkt(struct am79c971_data *d,struct rx_desc *rxd,
592 u_char **pkt,ssize_t *pkt_len)
593 {
594 ssize_t len,cp_len;
595
596 /* Compute the data length to copy */
597 len = ~((rxd->rmd[1] & AM79C971_RMD1_LEN) - 1);
598 len &= AM79C971_RMD1_LEN;
599 cp_len = m_min(len,*pkt_len);
600
601 /* Copy packet data to the VM physical RAM */
602 #if DEBUG_RECEIVE
603 AM79C971_LOG(d,"am79c971_handle_rxring: storing %u bytes at 0x%8.8x\n",
604 cp_len, rxd->rmd[0]);
605 #endif
606 physmem_copy_to_vm(d->vm,*pkt,rxd->rmd[0],cp_len);
607
608 *pkt += cp_len;
609 *pkt_len -= cp_len;
610 }
611
612 /*
613 * Put a packet in the RX ring.
614 */
615 static int am79c971_receive_pkt(struct am79c971_data *d,
616 u_char *pkt,ssize_t pkt_len)
617 {
618 m_uint32_t rx_start,rx_current,rx_next,rxdn_rmd1;
619 struct rx_desc rxd0,rxdn,*rxdc;
620 ssize_t tot_len = pkt_len;
621 u_char *pkt_ptr = pkt;
622 m_uint8_t sw_style;
623 int i;
624
625 /* Truncate the packet if it is too big */
626 pkt_len = m_min(pkt_len,AM79C971_MAX_PKT_SIZE);
627
628 /* Copy the current rxring descriptor */
629 rx_start = rx_current = rxdesc_get_current(d);
630 rxdesc_read(d,rx_start,&rxd0);
631
632 /* We must have the first descriptor... */
633 if (!(rxd0.rmd[1] & AM79C971_RMD1_OWN))
634 return(FALSE);
635
636 for(i=0,rxdc=&rxd0;;i++)
637 {
638 #if DEBUG_RECEIVE
639 AM79C971_LOG(d,"am79c971_handle_rxring: i=%d, addr=0x%8.8x: "
640 "rmd[0]=0x%x, rmd[1]=0x%x, rmd[2]=0x%x, rmd[3]=0x%x\n",
641 i,rx_current,
642 rxdc->rmd[0],rxdc->rmd[1],rxdc->rmd[2],rxdc->rmd[3]);
643 #endif
644 /* Put data into the descriptor buffer */
645 rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
646
647 /* Go to the next descriptor */
648 rxdesc_set_next(d);
649
650 /* If this is not the first descriptor, clear the OWN bit */
651 if (i != 0)
652 rxdc->rmd[1] &= ~AM79C971_RMD1_OWN;
653
654 /* If we have finished, mark the descriptor as end of packet */
655 if (tot_len == 0) {
656 rxdc->rmd[1] |= AM79C971_RMD1_ENP;
657 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
658
659 /* Get the software style */
660 sw_style = d->bcr[20];
661
662 /* Update the message byte count field */
663 rxdc->rmd[2] &= ~AM79C971_RMD2_LEN;
664 rxdc->rmd[2] |= pkt_len + 4;
665
666 switch(sw_style) {
667 case 2:
668 physmem_copy_u32_to_vm(d->vm,rx_current+8,rxdc->rmd[2]);
669 break;
670 case 3:
671 physmem_copy_u32_to_vm(d->vm,rx_current,rxdc->rmd[2]);
672 break;
673 default:
674 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
675 }
676
677 break;
678 }
679
680 /* Try to acquire the next descriptor */
681 rx_next = rxdesc_get_current(d);
682 rxdn_rmd1 = physmem_copy_u32_from_vm(d->vm,rx_next+4);
683
684 if (!(rxdn_rmd1 & AM79C971_RMD1_OWN)) {
685 rxdc->rmd[1] |= AM79C971_RMD1_ERR | AM79C971_RMD1_BUFF;
686 rxdc->rmd[1] |= AM79C971_RMD1_ENP;
687 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
688 break;
689 }
690
691 /* Update rmd1 to store change of OWN bit */
692 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
693
694 /* Read the next descriptor from VM physical RAM */
695 rxdesc_read(d,rx_next,&rxdn);
696 rxdc = &rxdn;
697 rx_current = rx_next;
698 }
699
700 /* Update the first RX descriptor */
701 rxd0.rmd[1] &= ~AM79C971_RMD1_OWN;
702 rxd0.rmd[1] |= AM79C971_RMD1_STP;
703 physmem_copy_u32_to_vm(d->vm,rx_start+4,rxd0.rmd[1]);
704
705 d->csr[0] |= AM79C971_CSR0_RINT;
706 am79c971_update_intr_flag(d);
707 am79c971_trigger_irq(d);
708 return(TRUE);
709 }
710
711 /* Handle the RX ring */
712 static int am79c971_handle_rxring(netio_desc_t *nio,
713 u_char *pkt,ssize_t pkt_len,
714 struct am79c971_data *d)
715 {
716 n_eth_hdr_t *hdr;
717
718 /*
719 * Don't start receive if the RX ring address has not been set
720 * and if RX ON is not set.
721 */
722 if ((d->rx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON))
723 return(FALSE);
724
725 #if DEBUG_RECEIVE
726 AM79C971_LOG(d,"receiving a packet of %d bytes\n",pkt_len);
727 mem_dump(log_file,pkt,pkt_len);
728 #endif
729
730 /*
731 * Receive only multicast/broadcast trafic + unicast traffic
732 * for this virtual machine.
733 */
734 hdr = (n_eth_hdr_t *)pkt;
735 if (am79c971_handle_mac_addr(d,pkt))
736 am79c971_receive_pkt(d,pkt,pkt_len);
737
738 return(TRUE);
739 }
740
741 /* Read a TX descriptor */
742 static int txdesc_read(struct am79c971_data *d,m_uint32_t txd_addr,
743 struct tx_desc *txd)
744 {
745 m_uint32_t buf[4];
746 m_uint8_t sw_style;
747
748 /* Get the software style */
749 sw_style = d->bcr[20];
750
751 /* Read the descriptor from VM physical RAM */
752 physmem_copy_from_vm(d->vm,&buf,txd_addr,sizeof(struct tx_desc));
753
754 switch(sw_style) {
755 case 2:
756 txd->tmd[0] = vmtoh32(buf[0]); /* tb addr */
757 txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */
758 txd->tmd[2] = vmtoh32(buf[2]); /* buff, uflo, ... */
759 txd->tmd[3] = vmtoh32(buf[3]); /* user */
760 break;
761
762 case 3:
763 txd->tmd[0] = vmtoh32(buf[2]); /* tb addr */
764 txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */
765 txd->tmd[2] = vmtoh32(buf[0]); /* buff, uflo, ... */
766 txd->tmd[3] = vmtoh32(buf[3]); /* user */
767 break;
768
769 default:
770 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
771 return(-1);
772 }
773
774 return(0);
775 }
776
777 /* Set the address of the next TX descriptor */
778 static inline void txdesc_set_next(struct am79c971_data *d)
779 {
780 d->tx_pos++;
781
782 if (d->tx_pos == d->tx_len)
783 d->tx_pos = 0;
784 }
785
786 /* Compute the address of the current TX descriptor */
787 static inline m_uint32_t txdesc_get_current(struct am79c971_data *d)
788 {
789 return(d->tx_start + (d->tx_pos * sizeof(struct tx_desc)));
790 }
791
792 /* Handle the TX ring (single packet) */
793 static int am79c971_handle_txring_single(struct am79c971_data *d)
794 {
795 u_char pkt[AM79C971_MAX_PKT_SIZE],*pkt_ptr;
796 struct tx_desc txd0,ctxd,ntxd,*ptxd;
797 m_uint32_t tx_start,tx_current;
798 m_uint32_t clen,tot_len;
799
800 if ((d->tx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON))
801 return(FALSE);
802
803 /* Copy the current txring descriptor */
804 tx_start = tx_current = txdesc_get_current(d);
805 ptxd = &txd0;
806 txdesc_read(d,tx_start,ptxd);
807
808 /* If we don't own the first descriptor, we cannot transmit */
809 if (!(ptxd->tmd[1] & AM79C971_TMD1_OWN))
810 return(FALSE);
811
812 #if DEBUG_TRANSMIT
813 AM79C971_LOG(d,"am79c971_handle_txring: 1st desc: "
814 "tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n",
815 ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]);
816 #endif
817
818 /* Empty packet for now */
819 pkt_ptr = pkt;
820 tot_len = 0;
821
822 for(;;) {
823 #if DEBUG_TRANSMIT
824 AM79C971_LOG(d,"am79c971_handle_txring: loop: "
825 "tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n",
826 ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]);
827 #endif
828 /* Copy packet data */
829 clen = ~((ptxd->tmd[1] & AM79C971_TMD1_LEN) - 1);
830 clen &= AM79C971_TMD1_LEN;
831 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tmd[0],clen);
832
833 pkt_ptr += clen;
834 tot_len += clen;
835
836 /* Clear the OWN bit if this is not the first descriptor */
837 if (!(ptxd->tmd[1] & AM79C971_TMD1_STP)) {
838 ptxd->tmd[1] &= ~AM79C971_TMD1_OWN;
839 physmem_copy_u32_to_vm(d->vm,tx_current+4,ptxd->tmd[1]);
840 }
841
842 /* Set the next descriptor */
843 txdesc_set_next(d);
844
845 /* Stop now if end of packet has been reached */
846 if (ptxd->tmd[1] & AM79C971_TMD1_ENP)
847 break;
848
849 /* Read the next descriptor and try to acquire it */
850 tx_current = txdesc_get_current(d);
851 txdesc_read(d,tx_current,&ntxd);
852
853 if (!(ntxd.tmd[1] & AM79C971_TMD1_OWN)) {
854 AM79C971_LOG(d,"am79c971_handle_txring: UNDERFLOW!\n");
855 return(FALSE);
856 }
857
858 memcpy(&ctxd,&ntxd,sizeof(struct tx_desc));
859 ptxd = &ctxd;
860 }
861
862 if (tot_len != 0) {
863 #if DEBUG_TRANSMIT
864 AM79C971_LOG(d,"sending packet of %u bytes\n",tot_len);
865 mem_dump(log_file,pkt,tot_len);
866 #endif
867 /* send it on wire */
868 netio_send(d->nio,pkt,tot_len);
869 }
870
871 /* Clear the OWN flag of the first descriptor */
872 txd0.tmd[1] &= ~AM79C971_TMD1_OWN;
873 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.tmd[1]);
874
875 /* Generate TX interrupt */
876 d->csr[0] |= AM79C971_CSR0_TINT;
877 am79c971_update_intr_flag(d);
878 am79c971_trigger_irq(d);
879 return(TRUE);
880 }
881
882 /* Handle the TX ring */
883 static int am79c971_handle_txring(struct am79c971_data *d)
884 {
885 int i;
886
887 for(i=0;i<AM79C971_TXRING_PASS_COUNT;i++)
888 if (!am79c971_handle_txring_single(d))
889 break;
890
891 return(TRUE);
892 }
893
894 /*
895 * pci_am79c971_read()
896 *
897 * Read a PCI register.
898 */
899 static m_uint32_t pci_am79c971_read(cpu_mips_t *cpu,struct pci_device *dev,
900 int reg)
901 {
902 struct am79c971_data *d = dev->priv_data;
903
904 #if DEBUG_PCI_REGS
905 AM79C971_LOG(d,"read PCI register 0x%x\n",reg);
906 #endif
907
908 switch (reg) {
909 case 0x00:
910 return((AM79C971_PCI_PRODUCT_ID << 16) | AM79C971_PCI_VENDOR_ID);
911 case 0x08:
912 return(0x02000002);
913 case PCI_REG_BAR1:
914 return(d->dev->phys_addr);
915 default:
916 return(0);
917 }
918 }
919
920 /*
921 * pci_am79c971_write()
922 *
923 * Write a PCI register.
924 */
925 static void pci_am79c971_write(cpu_mips_t *cpu,struct pci_device *dev,
926 int reg,m_uint32_t value)
927 {
928 struct am79c971_data *d = dev->priv_data;
929
930 #if DEBUG_PCI_REGS
931 AM79C971_LOG(d,"write PCI register 0x%x, value 0x%x\n",reg,value);
932 #endif
933
934 switch(reg) {
935 case PCI_REG_BAR1:
936 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
937 AM79C971_LOG(d,"registers are mapped at 0x%x\n",value);
938 break;
939 }
940 }
941
942 /*
943 * dev_am79c971_init()
944 *
945 * Generic AMD Am79c971 initialization code.
946 */
947 struct am79c971_data *
948 dev_am79c971_init(vm_instance_t *vm,char *name,int interface_type,
949 struct pci_bus *pci_bus,int pci_device,int irq)
950 {
951 struct am79c971_data *d;
952 struct pci_device *pci_dev;
953 struct vdevice *dev;
954
955 /* Allocate the private data structure for AM79C971 */
956 if (!(d = malloc(sizeof(*d)))) {
957 fprintf(stderr,"%s (AM79C971): out of memory\n",name);
958 return NULL;
959 }
960
961 memset(d,0,sizeof(*d));
962 memcpy(d->mii_regs[0],mii_reg_values,sizeof(mii_reg_values));
963
964 /* Add as PCI device */
965 pci_dev = pci_dev_add(pci_bus,name,
966 AM79C971_PCI_VENDOR_ID,AM79C971_PCI_PRODUCT_ID,
967 pci_device,0,irq,
968 d,NULL,pci_am79c971_read,pci_am79c971_write);
969
970 if (!pci_dev) {
971 fprintf(stderr,"%s (AM79C971): unable to create PCI device.\n",name);
972 goto err_pci_dev;
973 }
974
975 /* Create the device itself */
976 if (!(dev = dev_create(name))) {
977 fprintf(stderr,"%s (AM79C971): unable to create device.\n",name);
978 goto err_dev;
979 }
980
981 d->name = name;
982 d->vm = vm;
983 d->type = interface_type;
984 d->pci_dev = pci_dev;
985 d->dev = dev;
986
987 dev->phys_addr = 0;
988 dev->phys_len = 0x4000;
989 dev->handler = dev_am79c971_access;
990 dev->priv_data = d;
991 return(d);
992
993 err_dev:
994 pci_dev_remove(pci_dev);
995 err_pci_dev:
996 free(d);
997 return NULL;
998 }
999
1000 /* Remove an AMD Am79c971 device */
1001 void dev_am79c971_remove(struct am79c971_data *d)
1002 {
1003 if (d != NULL) {
1004 pci_dev_remove(d->pci_dev);
1005 vm_unbind_device(d->vm,d->dev);
1006 cpu_group_rebuild_mts(d->vm->cpu_group);
1007 free(d->dev);
1008 free(d);
1009 }
1010 }
1011
1012 /* Bind a NIO to an AMD Am79c971 device */
1013 int dev_am79c971_set_nio(struct am79c971_data *d,netio_desc_t *nio)
1014 {
1015 /* check that a NIO is not already bound */
1016 if (d->nio != NULL)
1017 return(-1);
1018
1019 d->nio = nio;
1020 d->tx_tid = ptask_add((ptask_callback)am79c971_handle_txring,d,NULL);
1021 netio_rxl_add(nio,(netio_rx_handler_t)am79c971_handle_rxring,d,NULL);
1022 return(0);
1023 }
1024
1025 /* Unbind a NIO from an AMD Am79c971 device */
1026 void dev_am79c971_unset_nio(struct am79c971_data *d)
1027 {
1028 if (d->nio != NULL) {
1029 ptask_remove(d->tx_tid);
1030 netio_rxl_remove(d->nio);
1031 d->nio = NULL;
1032 }
1033 }

  ViewVC Help
Powered by ViewVC 1.1.26