/[dynamips]/upstream/dynamips-0.2.7-RC1/dev_am79c971.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/dynamips-0.2.7-RC1/dev_am79c971.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 7 - (show annotations)
Sat Oct 6 16:23:47 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 30096 byte(s)
dynamips-0.2.7-RC1

1 /*
2 * Cisco router simulation platform.
3 * Copyright (C) 2006 Christophe Fillot. All rights reserved.
4 *
5 * AMD Am79c971 FastEthernet chip emulation.
6 */
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <unistd.h>
13 #include <time.h>
14 #include <errno.h>
15 #include <assert.h>
16
17 #include "utils.h"
18 #include "cpu.h"
19 #include "vm.h"
20 #include "dynamips.h"
21 #include "memory.h"
22 #include "device.h"
23 #include "net.h"
24 #include "net_io.h"
25 #include "ptask.h"
26 #include "dev_am79c971.h"
27
28 /* Debugging flags */
29 #define DEBUG_CSR_REGS 0
30 #define DEBUG_BCR_REGS 0
31 #define DEBUG_PCI_REGS 0
32 #define DEBUG_ACCESS 0
33 #define DEBUG_TRANSMIT 0
34 #define DEBUG_RECEIVE 0
35 #define DEBUG_UNKNOWN 0
36
37 /* AMD Am79c971 PCI vendor/product codes */
38 #define AM79C971_PCI_VENDOR_ID 0x1022
39 #define AM79C971_PCI_PRODUCT_ID 0x2000
40
41 /* Maximum packet size */
42 #define AM79C971_MAX_PKT_SIZE 2048
43
44 /* Send up to 16 packets in a TX ring scan pass */
45 #define AM79C971_TXRING_PASS_COUNT 16
46
47 /* CSR0: Controller Status and Control Register */
48 #define AM79C971_CSR0_ERR 0x00008000 /* Error (BABL,CERR,MISS,MERR) */
49 #define AM79C971_CSR0_BABL 0x00004000 /* Transmitter Timeout Error */
50 #define AM79C971_CSR0_CERR 0x00002000 /* Collision Error */
51 #define AM79C971_CSR0_MISS 0x00001000 /* Missed Frame */
52 #define AM79C971_CSR0_MERR 0x00000800 /* Memory Error */
53 #define AM79C971_CSR0_RINT 0x00000400 /* Receive Interrupt */
54 #define AM79C971_CSR0_TINT 0x00000200 /* Transmit Interrupt */
55 #define AM79C971_CSR0_IDON 0x00000100 /* Initialization Done */
56 #define AM79C971_CSR0_INTR 0x00000080 /* Interrupt Flag */
57 #define AM79C971_CSR0_IENA 0x00000040 /* Interrupt Enable */
58 #define AM79C971_CSR0_RXON 0x00000020 /* Receive On */
59 #define AM79C971_CSR0_TXON 0x00000010 /* Transmit On */
60 #define AM79C971_CSR0_TDMD 0x00000008 /* Transmit Demand */
61 #define AM79C971_CSR0_STOP 0x00000004 /* Stop */
62 #define AM79C971_CSR0_STRT 0x00000002 /* Start */
63 #define AM79C971_CSR0_INIT 0x00000001 /* Initialization */
64
65 /* CSR3: Interrupt Masks and Deferral Control */
66 #define AM79C971_CSR3_BABLM 0x00004000 /* Transmit. Timeout Int. Mask */
67 #define AM79C971_CSR3_CERRM 0x00002000 /* Collision Error Int. Mask*/
68 #define AM79C971_CSR3_MISSM 0x00001000 /* Missed Frame Interrupt Mask */
69 #define AM79C971_CSR3_MERRM 0x00000800 /* Memory Error Interrupt Mask */
70 #define AM79C971_CSR3_RINTM 0x00000400 /* Receive Interrupt Mask */
71 #define AM79C971_CSR3_TINTM 0x00000200 /* Transmit Interrupt Mask */
72 #define AM79C971_CSR3_IDONM 0x00000100 /* Initialization Done Mask */
73 #define AM79C971_CSR3_BSWP 0x00000004 /* Byte Swap */
74 #define AM79C971_CSR3_IM_MASK 0x00007F00 /* Interrupt Masks for CSR3 */
75
76 /* CSR5: Extended Control and Interrupt 1 */
77 #define AM79C971_CSR5_TOKINTD 0x00008000 /* Receive Interrupt Mask */
78 #define AM79C971_CSR5_SPND 0x00000001 /* Suspend */
79
80 /* CSR15: Mode */
81 #define AM79C971_CSR15_PROM 0x00008000 /* Promiscous Mode */
82 #define AM79C971_CSR15_DRCVBC 0x00004000 /* Disable Receive Broadcast */
83 #define AM79C971_CSR15_DRCVPA 0x00002000 /* Disable Receive PHY address */
84 #define AM79C971_CSR15_DTX 0x00000002 /* Disable Transmit */
85 #define AM79C971_CSR15_DRX 0x00000001 /* Disable Receive */
86
87 /* AMD 79C971 Initialization block length */
88 #define AM79C971_INIT_BLOCK_LEN 0x1c
89
90 /* RX descriptors */
91 #define AM79C971_RMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */
92 #define AM79C971_RMD1_ERR 0x40000000 /* Error */
93 #define AM79C971_RMD1_FRAM 0x20000000 /* Framing Error */
94 #define AM79C971_RMD1_OFLO 0x10000000 /* Overflow Error */
95 #define AM79C971_RMD1_CRC 0x08000000 /* Invalid CRC */
96 #define AM79C971_RMD1_BUFF 0x08000000 /* Buffer Error (chaining) */
97 #define AM79C971_RMD1_STP 0x02000000 /* Start of Packet */
98 #define AM79C971_RMD1_ENP 0x01000000 /* End of Packet */
99 #define AM79C971_RMD1_BPE 0x00800000 /* Bus Parity Error */
100 #define AM79C971_RMD1_PAM 0x00400000 /* Physical Address Match */
101 #define AM79C971_RMD1_LAFM 0x00200000 /* Logical Addr. Filter Match */
102 #define AM79C971_RMD1_BAM 0x00100000 /* Broadcast Address Match */
103 #define AM79C971_RMD1_LEN 0x00000FFF /* Buffer Length */
104
105 #define AM79C971_RMD2_LEN 0x00000FFF /* Received byte count */
106
107 /* TX descriptors */
108 #define AM79C971_TMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */
109 #define AM79C971_TMD1_ERR 0x40000000 /* Error */
110 #define AM79C971_TMD1_ADD_FCS 0x20000000 /* FCS generation */
111 #define AM79C971_TMD1_STP 0x02000000 /* Start of Packet */
112 #define AM79C971_TMD1_ENP 0x01000000 /* End of Packet */
113 #define AM79C971_TMD1_LEN 0x00000FFF /* Buffer Length */
114
115 /* RX Descriptor */
116 struct rx_desc {
117 m_uint32_t rmd[4];
118 };
119
120 /* TX Descriptor */
121 struct tx_desc {
122 m_uint32_t tmd[4];
123 };
124
125 /* AMD 79C971 Data */
126 struct am79c971_data {
127 char *name;
128
129 /* Interface type (10baseT or 100baseTX) */
130 int type;
131
132 /* Current RAP (Register Address Pointer) value */
133 m_uint8_t rap;
134
135 /* CSR and BCR registers */
136 m_uint32_t csr[256],bcr[256];
137
138 /* RX/TX rings start addresses */
139 m_uint32_t rx_start,tx_start;
140
141 /* RX/TX number of descriptors (log2) */
142 m_uint32_t rx_l2len,tx_l2len;
143
144 /* RX/TX number of descriptors */
145 m_uint32_t rx_len,tx_len;
146
147 /* RX/TX ring positions */
148 m_uint32_t rx_pos,tx_pos;
149
150 /* MII registers */
151 m_uint16_t mii_regs[32][32];
152
153 /* Physical (MAC) address */
154 n_eth_addr_t mac_addr;
155
156 /* Device information */
157 struct vdevice *dev;
158
159 /* PCI device information */
160 struct pci_device *pci_dev;
161
162 /* Virtual machine */
163 vm_instance_t *vm;
164
165 /* NetIO descriptor */
166 netio_desc_t *nio;
167
168 /* TX ring scanner task id */
169 ptask_id_t tx_tid;
170 };
171
172 /* Log an am79c971 message */
173 #define AM79C971_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
174
175
176 static m_uint16_t mii_reg_values[32] = {
177 0x1000, 0x782D, 0x2000, 0x5C01, 0x01E1, 0x0000, 0x0000, 0x0000,
178 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
179 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8060,
180 0x8020, 0x0820, 0x0000, 0x3800, 0xA3B9, 0x0000, 0x0000, 0x0000,
181 };
182
183 /* Read a MII register */
184 static m_uint16_t mii_reg_read(struct am79c971_data *d,u_int phy,u_int reg)
185 {
186 if ((phy >= 32) || (reg >= 32))
187 return(0);
188
189 return(d->mii_regs[phy][reg]);
190 }
191
192 /* Write a MII register */
193 static void mii_reg_write(struct am79c971_data *d,u_int phy,u_int reg,
194 m_uint16_t value)
195 {
196 if ((phy < 32) && (reg < 32))
197 d->mii_regs[phy][reg] = value;
198 }
199
200 /* Check if a packet must be delivered to the emulated chip */
201 static inline int am79c971_handle_mac_addr(struct am79c971_data *d,
202 m_uint8_t *pkt)
203 {
204 n_eth_hdr_t *hdr = (n_eth_hdr_t *)pkt;
205
206 /* Accept systematically frames if we are running is promiscuous mode */
207 if (d->csr[15] & AM79C971_CSR15_PROM)
208 return(TRUE);
209
210 /* Accept systematically all multicast frames */
211 if (eth_addr_is_mcast(&hdr->daddr))
212 return(TRUE);
213
214 /* Accept frames directly for us, discard others */
215 if (!memcmp(&d->mac_addr,&hdr->daddr,N_ETH_ALEN))
216 return(TRUE);
217
218 return(FALSE);
219 }
220
221 /* Update the Interrupt Flag bit of csr0 */
222 static void am79c971_update_intr_flag(struct am79c971_data *d)
223 {
224 m_uint32_t mask;
225
226 mask = d->csr[3] & AM79C971_CSR3_IM_MASK;
227
228 if (d->csr[0] & mask)
229 d->csr[0] |= AM79C971_CSR0_INTR;
230 }
231
232 /* Trigger an interrupt */
233 static int am79c971_trigger_irq(struct am79c971_data *d)
234 {
235 if (d->csr[0] & (AM79C971_CSR0_INTR|AM79C971_CSR0_IENA)) {
236 pci_dev_trigger_irq(d->vm,d->pci_dev);
237 return(TRUE);
238 }
239
240 return(FALSE);
241 }
242
243 /* Update RX/TX ON bits of csr0 */
244 static void am79c971_update_rx_tx_on_bits(struct am79c971_data *d)
245 {
246 /*
247 * Set RX ON if DRX in csr15 is cleared, and set TX on if DTX
248 * in csr15 is cleared. The START bit must be set.
249 */
250 d->csr[0] &= ~(AM79C971_CSR0_RXON|AM79C971_CSR0_TXON);
251
252 if (d->csr[0] & AM79C971_CSR0_STRT) {
253 if (!(d->csr[15] & AM79C971_CSR15_DRX))
254 d->csr[0] |= AM79C971_CSR0_RXON;
255
256 if (!(d->csr[15] & AM79C971_CSR15_DTX))
257 d->csr[0] |= AM79C971_CSR0_TXON;
258 }
259 }
260
261 /* Update RX/TX descriptor lengths */
262 static void am79c971_update_rx_tx_len(struct am79c971_data *d)
263 {
264 d->rx_len = 1 << d->rx_l2len;
265 d->tx_len = 1 << d->tx_l2len;
266
267 /* Normalize ring sizes */
268 if (d->rx_len > 512) d->rx_len = 512;
269 if (d->tx_len > 512) d->tx_len = 512;
270 }
271
272 /* Fetch the initialization block from memory */
273 static int am79c971_fetch_init_block(struct am79c971_data *d)
274 {
275 m_uint32_t ib[AM79C971_INIT_BLOCK_LEN];
276 m_uint32_t ib_addr,ib_tmp;
277
278 /* The init block address is contained in csr1 (low) and csr2 (high) */
279 ib_addr = (d->csr[2] << 16) | d->csr[1];
280
281 if (!ib_addr) {
282 AM79C971_LOG(d,"trying to fetch init block at address 0...\n");
283 return(-1);
284 }
285
286 AM79C971_LOG(d,"fetching init block at address 0x%8.8x\n",ib_addr);
287 physmem_copy_from_vm(d->vm,ib,ib_addr,sizeof(ib));
288
289 /* Extract RX/TX ring addresses */
290 d->rx_start = vmtoh32(ib[5]);
291 d->tx_start = vmtoh32(ib[6]);
292
293 /* Set csr15 from mode field */
294 ib_tmp = vmtoh32(ib[0]);
295 d->csr[15] = ib_tmp & 0xffff;
296
297 /* Extract RX/TX ring sizes */
298 d->rx_l2len = (ib_tmp >> 20) & 0x0F;
299 d->tx_l2len = (ib_tmp >> 28) & 0x0F;
300 am79c971_update_rx_tx_len(d);
301
302 AM79C971_LOG(d,"rx_ring = 0x%8.8x (%u), tx_ring = 0x%8.8x (%u)\n",
303 d->rx_start,d->rx_len,d->tx_start,d->tx_len);
304
305 /* Get the physical MAC address */
306 ib_tmp = vmtoh32(ib[1]);
307 d->csr[12] = ib_tmp & 0xFFFF;
308 d->csr[13] = ib_tmp >> 16;
309
310 d->mac_addr.eth_addr_byte[3] = (ib_tmp >> 24) & 0xFF;
311 d->mac_addr.eth_addr_byte[2] = (ib_tmp >> 16) & 0xFF;
312 d->mac_addr.eth_addr_byte[1] = (ib_tmp >> 8) & 0xFF;
313 d->mac_addr.eth_addr_byte[0] = ib_tmp & 0xFF;
314
315 ib_tmp = vmtoh32(ib[2]);
316 d->csr[14] = ib_tmp & 0xFFFF;
317 d->mac_addr.eth_addr_byte[5] = (ib_tmp >> 8) & 0xFF;
318 d->mac_addr.eth_addr_byte[4] = ib_tmp & 0xFF;
319
320 /*
321 * Mark the initialization as done is csr0.
322 */
323 d->csr[0] |= AM79C971_CSR0_IDON;
324
325 /* Update RX/TX ON bits of csr0 since csr15 has been modified */
326 am79c971_update_rx_tx_on_bits(d);
327 AM79C971_LOG(d,"CSR0 = 0x%4.4x\n",d->csr[0]);
328
329 am79c971_update_intr_flag(d);
330
331 if (am79c971_trigger_irq(d))
332 AM79C971_LOG(d,"triggering IDON interrupt\n");
333
334 return(0);
335 }
336
337 /* RDP (Register Data Port) access */
338 static void am79c971_rdp_access(cpu_gen_t *cpu,struct am79c971_data *d,
339 u_int op_type,m_uint64_t *data)
340 {
341 m_uint32_t mask;
342
343 #if DEBUG_CSR_REGS
344 if (op_type == MTS_READ) {
345 cpu_log(cpu,d->name,"read access to CSR %d\n",d->rap);
346 } else {
347 cpu_log(cpu,d->name,"write access to CSR %d, value=0x%x\n",d->rap,*data);
348 }
349 #endif
350
351 switch(d->rap) {
352 case 0: /* CSR0: Controller Status and Control Register */
353 if (op_type == MTS_READ) {
354 //AM79C971_LOG(d,"reading CSR0 (val=0x%4.4x)\n",d->csr[0]);
355 *data = d->csr[0];
356 } else {
357 /*
358 * The STOP bit clears other bits.
359 * It has precedence over INIT and START bits.
360 */
361 if (*data & AM79C971_CSR0_STOP) {
362 //AM79C971_LOG(d,"stopping interface!\n");
363 d->csr[0] = AM79C971_CSR0_STOP;
364 d->tx_pos = d->rx_pos = 0;
365 break;
366 }
367
368 /* These bits are cleared when set to 1 */
369 mask = AM79C971_CSR0_BABL | AM79C971_CSR0_CERR;
370 mask |= AM79C971_CSR0_MISS | AM79C971_CSR0_MERR;
371 mask |= AM79C971_CSR0_RINT | AM79C971_CSR0_TINT;
372 mask |= AM79C971_CSR0_IDON;
373 d->csr[0] &= ~(*data & mask);
374
375 /* Save the Interrupt Enable bit */
376 d->csr[0] |= *data & AM79C971_CSR0_IENA;
377
378 /* If INIT bit is set, fetch the initialization block */
379 if (*data & AM79C971_CSR0_INIT) {
380 d->csr[0] |= AM79C971_CSR0_INIT;
381 d->csr[0] &= ~AM79C971_CSR0_STOP;
382 am79c971_fetch_init_block(d);
383 }
384
385 /* If STRT bit is set, clear the stop bit */
386 if (*data & AM79C971_CSR0_STRT) {
387 //AM79C971_LOG(d,"enabling interface!\n");
388 d->csr[0] |= AM79C971_CSR0_STRT;
389 d->csr[0] &= ~AM79C971_CSR0_STOP;
390 am79c971_update_rx_tx_on_bits(d);
391 }
392 }
393 break;
394
395 case 6: /* CSR6: RX/TX Descriptor Table Length */
396 if (op_type == MTS_WRITE) {
397 d->rx_l2len = (*data >> 8) & 0x0F;
398 d->tx_l2len = (*data >> 12) & 0x0F;
399 am79c971_update_rx_tx_len(d);
400 } else {
401 *data = (d->tx_l2len << 12) | (d->rx_l2len << 8);
402 }
403 break;
404
405 case 15: /* CSR15: Mode */
406 if (op_type == MTS_WRITE) {
407 d->csr[15] = *data;
408 am79c971_update_rx_tx_on_bits(d);
409 } else {
410 *data = d->csr[15];
411 }
412 break;
413
414 case 88:
415 if (op_type == MTS_READ) {
416 switch(d->type) {
417 case AM79C971_TYPE_100BASE_TX:
418 *data = 0x2623003;
419 break;
420 default:
421 *data = 0;
422 break;
423 }
424 }
425 break;
426
427 default:
428 if (op_type == MTS_READ) {
429 *data = d->csr[d->rap];
430 } else {
431 d->csr[d->rap] = *data;
432 }
433
434 #if DEBUG_UNKNOWN
435 if (op_type == MTS_READ) {
436 cpu_log(cpu,d->name,"read access to unknown CSR %d\n",d->rap);
437 } else {
438 cpu_log(cpu,d->name,"write access to unknown CSR %d, value=0x%x\n",
439 d->rap,*data);
440 }
441 #endif
442 }
443 }
444
445 /* BDP (BCR Data Port) access */
446 static void am79c971_bdp_access(cpu_gen_t *cpu,struct am79c971_data *d,
447 u_int op_type,m_uint64_t *data)
448 {
449 u_int mii_phy,mii_reg;
450
451 #if DEBUG_BCR_REGS
452 if (op_type == MTS_READ) {
453 cpu_log(cpu,d->name,"read access to BCR %d\n",d->rap);
454 } else {
455 cpu_log(cpu,d->name,"write access to BCR %d, value=0x%x\n",d->rap,*data);
456 }
457 #endif
458
459 switch(d->rap) {
460 case 9:
461 if (op_type == MTS_READ)
462 *data = 1;
463 break;
464
465 case 34: /* BCR34: MII Management Data Register */
466 mii_phy = (d->bcr[33] >> 5) & 0x1F;
467 mii_reg = (d->bcr[33] >> 0) & 0x1F;
468
469 if (op_type == MTS_READ)
470 *data = mii_reg_read(d,mii_phy,mii_reg);
471 //else
472 //mii_reg_write(d,mii_phy,mii_reg,*data);
473 break;
474
475 default:
476 if (op_type == MTS_READ) {
477 *data = d->bcr[d->rap];
478 } else {
479 d->bcr[d->rap] = *data;
480 }
481
482 #if DEBUG_UNKNOWN
483 if (op_type == MTS_READ) {
484 cpu_log(cpu,d->name,"read access to unknown BCR %d\n",d->rap);
485 } else {
486 cpu_log(cpu,d->name,"write access to unknown BCR %d, value=0x%x\n",
487 d->rap,*data);
488 }
489 #endif
490 }
491 }
492
493 /*
494 * dev_am79c971_access()
495 */
496 void *dev_am79c971_access(cpu_gen_t *cpu,struct vdevice *dev,
497 m_uint32_t offset,u_int op_size,u_int op_type,
498 m_uint64_t *data)
499 {
500 struct am79c971_data *d = dev->priv_data;
501
502 if (op_type == MTS_READ)
503 *data = 0;
504
505 #if DEBUG_ACCESS
506 if (op_type == MTS_READ) {
507 cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
508 offset,cpu_get_pc(cpu),op_size);
509 } else {
510 cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
511 "val=0x%llx, size=%u\n",offset,cpu_get_pc(cpu),*data,op_size);
512 }
513 #endif
514
515 switch(offset) {
516 case 0x14: /* RAP (Register Address Pointer) */
517 if (op_type == MTS_WRITE) {
518 d->rap = *data & 0xFF;
519 } else {
520 *data = d->rap;
521 }
522 break;
523
524 case 0x10: /* RDP (Register Data Port) */
525 am79c971_rdp_access(cpu,d,op_type,data);
526 break;
527
528 case 0x1c: /* BDP (BCR Data Port) */
529 am79c971_bdp_access(cpu,d,op_type,data);
530 break;
531 }
532
533 return NULL;
534 }
535
536 /* Read a RX descriptor */
537 static int rxdesc_read(struct am79c971_data *d,m_uint32_t rxd_addr,
538 struct rx_desc *rxd)
539 {
540 m_uint32_t buf[4];
541 m_uint8_t sw_style;
542
543 /* Get the software style */
544 sw_style = d->bcr[20];
545
546 /* Read the descriptor from VM physical RAM */
547 physmem_copy_from_vm(d->vm,&buf,rxd_addr,sizeof(struct rx_desc));
548
549 switch(sw_style) {
550 case 2:
551 rxd->rmd[0] = vmtoh32(buf[0]); /* rb addr */
552 rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */
553 rxd->rmd[2] = vmtoh32(buf[2]); /* rfrtag, mcnt, ... */
554 rxd->rmd[3] = vmtoh32(buf[3]); /* user */
555 break;
556
557 case 3:
558 rxd->rmd[0] = vmtoh32(buf[2]); /* rb addr */
559 rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */
560 rxd->rmd[2] = vmtoh32(buf[0]); /* rfrtag, mcnt, ... */
561 rxd->rmd[3] = vmtoh32(buf[3]); /* user */
562 break;
563
564 default:
565 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
566 return(-1);
567 }
568
569 return(0);
570 }
571
572 /* Set the address of the next RX descriptor */
573 static inline void rxdesc_set_next(struct am79c971_data *d)
574 {
575 d->rx_pos++;
576
577 if (d->rx_pos == d->rx_len)
578 d->rx_pos = 0;
579 }
580
581 /* Compute the address of the current RX descriptor */
582 static inline m_uint32_t rxdesc_get_current(struct am79c971_data *d)
583 {
584 return(d->rx_start + (d->rx_pos * sizeof(struct rx_desc)));
585 }
586
587 /* Put a packet in buffer of a descriptor */
588 static void rxdesc_put_pkt(struct am79c971_data *d,struct rx_desc *rxd,
589 u_char **pkt,ssize_t *pkt_len)
590 {
591 ssize_t len,cp_len;
592
593 /* Compute the data length to copy */
594 len = ~((rxd->rmd[1] & AM79C971_RMD1_LEN) - 1);
595 len &= AM79C971_RMD1_LEN;
596 cp_len = m_min(len,*pkt_len);
597
598 /* Copy packet data to the VM physical RAM */
599 #if DEBUG_RECEIVE
600 AM79C971_LOG(d,"am79c971_handle_rxring: storing %u bytes at 0x%8.8x\n",
601 cp_len, rxd->rmd[0]);
602 #endif
603 physmem_copy_to_vm(d->vm,*pkt,rxd->rmd[0],cp_len);
604
605 *pkt += cp_len;
606 *pkt_len -= cp_len;
607 }
608
609 /*
610 * Put a packet in the RX ring.
611 */
612 static int am79c971_receive_pkt(struct am79c971_data *d,
613 u_char *pkt,ssize_t pkt_len)
614 {
615 m_uint32_t rx_start,rx_current,rx_next,rxdn_rmd1;
616 struct rx_desc rxd0,rxdn,*rxdc;
617 ssize_t tot_len = pkt_len;
618 u_char *pkt_ptr = pkt;
619 m_uint8_t sw_style;
620 int i;
621
622 /* Truncate the packet if it is too big */
623 pkt_len = m_min(pkt_len,AM79C971_MAX_PKT_SIZE);
624
625 /* Copy the current rxring descriptor */
626 rx_start = rx_current = rxdesc_get_current(d);
627 rxdesc_read(d,rx_start,&rxd0);
628
629 /* We must have the first descriptor... */
630 if (!(rxd0.rmd[1] & AM79C971_RMD1_OWN))
631 return(FALSE);
632
633 for(i=0,rxdc=&rxd0;;i++)
634 {
635 #if DEBUG_RECEIVE
636 AM79C971_LOG(d,"am79c971_handle_rxring: i=%d, addr=0x%8.8x: "
637 "rmd[0]=0x%x, rmd[1]=0x%x, rmd[2]=0x%x, rmd[3]=0x%x\n",
638 i,rx_current,
639 rxdc->rmd[0],rxdc->rmd[1],rxdc->rmd[2],rxdc->rmd[3]);
640 #endif
641 /* Put data into the descriptor buffer */
642 rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
643
644 /* Go to the next descriptor */
645 rxdesc_set_next(d);
646
647 /* If this is not the first descriptor, clear the OWN bit */
648 if (i != 0)
649 rxdc->rmd[1] &= ~AM79C971_RMD1_OWN;
650
651 /* If we have finished, mark the descriptor as end of packet */
652 if (tot_len == 0) {
653 rxdc->rmd[1] |= AM79C971_RMD1_ENP;
654 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
655
656 /* Get the software style */
657 sw_style = d->bcr[20];
658
659 /* Update the message byte count field */
660 rxdc->rmd[2] &= ~AM79C971_RMD2_LEN;
661 rxdc->rmd[2] |= pkt_len + 4;
662
663 switch(sw_style) {
664 case 2:
665 physmem_copy_u32_to_vm(d->vm,rx_current+8,rxdc->rmd[2]);
666 break;
667 case 3:
668 physmem_copy_u32_to_vm(d->vm,rx_current,rxdc->rmd[2]);
669 break;
670 default:
671 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
672 }
673
674 break;
675 }
676
677 /* Try to acquire the next descriptor */
678 rx_next = rxdesc_get_current(d);
679 rxdn_rmd1 = physmem_copy_u32_from_vm(d->vm,rx_next+4);
680
681 if (!(rxdn_rmd1 & AM79C971_RMD1_OWN)) {
682 rxdc->rmd[1] |= AM79C971_RMD1_ERR | AM79C971_RMD1_BUFF;
683 rxdc->rmd[1] |= AM79C971_RMD1_ENP;
684 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
685 break;
686 }
687
688 /* Update rmd1 to store change of OWN bit */
689 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
690
691 /* Read the next descriptor from VM physical RAM */
692 rxdesc_read(d,rx_next,&rxdn);
693 rxdc = &rxdn;
694 rx_current = rx_next;
695 }
696
697 /* Update the first RX descriptor */
698 rxd0.rmd[1] &= ~AM79C971_RMD1_OWN;
699 rxd0.rmd[1] |= AM79C971_RMD1_STP;
700 physmem_copy_u32_to_vm(d->vm,rx_start+4,rxd0.rmd[1]);
701
702 d->csr[0] |= AM79C971_CSR0_RINT;
703 am79c971_update_intr_flag(d);
704 am79c971_trigger_irq(d);
705 return(TRUE);
706 }
707
708 /* Handle the RX ring */
709 static int am79c971_handle_rxring(netio_desc_t *nio,
710 u_char *pkt,ssize_t pkt_len,
711 struct am79c971_data *d)
712 {
713 n_eth_hdr_t *hdr;
714
715 /*
716 * Don't start receive if the RX ring address has not been set
717 * and if RX ON is not set.
718 */
719 if ((d->rx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON))
720 return(FALSE);
721
722 #if DEBUG_RECEIVE
723 AM79C971_LOG(d,"receiving a packet of %d bytes\n",pkt_len);
724 mem_dump(log_file,pkt,pkt_len);
725 #endif
726
727 /*
728 * Receive only multicast/broadcast trafic + unicast traffic
729 * for this virtual machine.
730 */
731 hdr = (n_eth_hdr_t *)pkt;
732 if (am79c971_handle_mac_addr(d,pkt))
733 am79c971_receive_pkt(d,pkt,pkt_len);
734
735 return(TRUE);
736 }
737
738 /* Read a TX descriptor */
739 static int txdesc_read(struct am79c971_data *d,m_uint32_t txd_addr,
740 struct tx_desc *txd)
741 {
742 m_uint32_t buf[4];
743 m_uint8_t sw_style;
744
745 /* Get the software style */
746 sw_style = d->bcr[20];
747
748 /* Read the descriptor from VM physical RAM */
749 physmem_copy_from_vm(d->vm,&buf,txd_addr,sizeof(struct tx_desc));
750
751 switch(sw_style) {
752 case 2:
753 txd->tmd[0] = vmtoh32(buf[0]); /* tb addr */
754 txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */
755 txd->tmd[2] = vmtoh32(buf[2]); /* buff, uflo, ... */
756 txd->tmd[3] = vmtoh32(buf[3]); /* user */
757 break;
758
759 case 3:
760 txd->tmd[0] = vmtoh32(buf[2]); /* tb addr */
761 txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */
762 txd->tmd[2] = vmtoh32(buf[0]); /* buff, uflo, ... */
763 txd->tmd[3] = vmtoh32(buf[3]); /* user */
764 break;
765
766 default:
767 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
768 return(-1);
769 }
770
771 return(0);
772 }
773
774 /* Set the address of the next TX descriptor */
775 static inline void txdesc_set_next(struct am79c971_data *d)
776 {
777 d->tx_pos++;
778
779 if (d->tx_pos == d->tx_len)
780 d->tx_pos = 0;
781 }
782
783 /* Compute the address of the current TX descriptor */
784 static inline m_uint32_t txdesc_get_current(struct am79c971_data *d)
785 {
786 return(d->tx_start + (d->tx_pos * sizeof(struct tx_desc)));
787 }
788
789 /* Handle the TX ring (single packet) */
790 static int am79c971_handle_txring_single(struct am79c971_data *d)
791 {
792 u_char pkt[AM79C971_MAX_PKT_SIZE],*pkt_ptr;
793 struct tx_desc txd0,ctxd,ntxd,*ptxd;
794 m_uint32_t tx_start,tx_current;
795 m_uint32_t clen,tot_len;
796
797 if ((d->tx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON))
798 return(FALSE);
799
800 /* Copy the current txring descriptor */
801 tx_start = tx_current = txdesc_get_current(d);
802 ptxd = &txd0;
803 txdesc_read(d,tx_start,ptxd);
804
805 /* If we don't own the first descriptor, we cannot transmit */
806 if (!(ptxd->tmd[1] & AM79C971_TMD1_OWN))
807 return(FALSE);
808
809 #if DEBUG_TRANSMIT
810 AM79C971_LOG(d,"am79c971_handle_txring: 1st desc: "
811 "tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n",
812 ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]);
813 #endif
814
815 /* Empty packet for now */
816 pkt_ptr = pkt;
817 tot_len = 0;
818
819 for(;;) {
820 #if DEBUG_TRANSMIT
821 AM79C971_LOG(d,"am79c971_handle_txring: loop: "
822 "tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n",
823 ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]);
824 #endif
825 /* Copy packet data */
826 clen = ~((ptxd->tmd[1] & AM79C971_TMD1_LEN) - 1);
827 clen &= AM79C971_TMD1_LEN;
828 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tmd[0],clen);
829
830 pkt_ptr += clen;
831 tot_len += clen;
832
833 /* Clear the OWN bit if this is not the first descriptor */
834 if (!(ptxd->tmd[1] & AM79C971_TMD1_STP)) {
835 ptxd->tmd[1] &= ~AM79C971_TMD1_OWN;
836 physmem_copy_u32_to_vm(d->vm,tx_current+4,ptxd->tmd[1]);
837 }
838
839 /* Set the next descriptor */
840 txdesc_set_next(d);
841
842 /* Stop now if end of packet has been reached */
843 if (ptxd->tmd[1] & AM79C971_TMD1_ENP)
844 break;
845
846 /* Read the next descriptor and try to acquire it */
847 tx_current = txdesc_get_current(d);
848 txdesc_read(d,tx_current,&ntxd);
849
850 if (!(ntxd.tmd[1] & AM79C971_TMD1_OWN)) {
851 AM79C971_LOG(d,"am79c971_handle_txring: UNDERFLOW!\n");
852 return(FALSE);
853 }
854
855 memcpy(&ctxd,&ntxd,sizeof(struct tx_desc));
856 ptxd = &ctxd;
857 }
858
859 if (tot_len != 0) {
860 #if DEBUG_TRANSMIT
861 AM79C971_LOG(d,"sending packet of %u bytes\n",tot_len);
862 mem_dump(log_file,pkt,tot_len);
863 #endif
864 /* send it on wire */
865 netio_send(d->nio,pkt,tot_len);
866 }
867
868 /* Clear the OWN flag of the first descriptor */
869 txd0.tmd[1] &= ~AM79C971_TMD1_OWN;
870 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.tmd[1]);
871
872 /* Generate TX interrupt */
873 d->csr[0] |= AM79C971_CSR0_TINT;
874 am79c971_update_intr_flag(d);
875 am79c971_trigger_irq(d);
876 return(TRUE);
877 }
878
879 /* Handle the TX ring */
880 static int am79c971_handle_txring(struct am79c971_data *d)
881 {
882 int i;
883
884 for(i=0;i<AM79C971_TXRING_PASS_COUNT;i++)
885 if (!am79c971_handle_txring_single(d))
886 break;
887
888 return(TRUE);
889 }
890
891 /*
892 * pci_am79c971_read()
893 *
894 * Read a PCI register.
895 */
896 static m_uint32_t pci_am79c971_read(cpu_gen_t *cpu,struct pci_device *dev,
897 int reg)
898 {
899 struct am79c971_data *d = dev->priv_data;
900
901 #if DEBUG_PCI_REGS
902 AM79C971_LOG(d,"read PCI register 0x%x\n",reg);
903 #endif
904
905 switch (reg) {
906 case 0x00:
907 return((AM79C971_PCI_PRODUCT_ID << 16) | AM79C971_PCI_VENDOR_ID);
908 case 0x08:
909 return(0x02000002);
910 case PCI_REG_BAR1:
911 return(d->dev->phys_addr);
912 default:
913 return(0);
914 }
915 }
916
917 /*
918 * pci_am79c971_write()
919 *
920 * Write a PCI register.
921 */
922 static void pci_am79c971_write(cpu_gen_t *cpu,struct pci_device *dev,
923 int reg,m_uint32_t value)
924 {
925 struct am79c971_data *d = dev->priv_data;
926
927 #if DEBUG_PCI_REGS
928 AM79C971_LOG(d,"write PCI register 0x%x, value 0x%x\n",reg,value);
929 #endif
930
931 switch(reg) {
932 case PCI_REG_BAR1:
933 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
934 AM79C971_LOG(d,"registers are mapped at 0x%x\n",value);
935 break;
936 }
937 }
938
939 /*
940 * dev_am79c971_init()
941 *
942 * Generic AMD Am79c971 initialization code.
943 */
944 struct am79c971_data *
945 dev_am79c971_init(vm_instance_t *vm,char *name,int interface_type,
946 struct pci_bus *pci_bus,int pci_device,int irq)
947 {
948 struct am79c971_data *d;
949 struct pci_device *pci_dev;
950 struct vdevice *dev;
951
952 /* Allocate the private data structure for AM79C971 */
953 if (!(d = malloc(sizeof(*d)))) {
954 fprintf(stderr,"%s (AM79C971): out of memory\n",name);
955 return NULL;
956 }
957
958 memset(d,0,sizeof(*d));
959 memcpy(d->mii_regs[0],mii_reg_values,sizeof(mii_reg_values));
960
961 /* Add as PCI device */
962 pci_dev = pci_dev_add(pci_bus,name,
963 AM79C971_PCI_VENDOR_ID,AM79C971_PCI_PRODUCT_ID,
964 pci_device,0,irq,
965 d,NULL,pci_am79c971_read,pci_am79c971_write);
966
967 if (!pci_dev) {
968 fprintf(stderr,"%s (AM79C971): unable to create PCI device.\n",name);
969 goto err_pci_dev;
970 }
971
972 /* Create the device itself */
973 if (!(dev = dev_create(name))) {
974 fprintf(stderr,"%s (AM79C971): unable to create device.\n",name);
975 goto err_dev;
976 }
977
978 d->name = name;
979 d->vm = vm;
980 d->type = interface_type;
981 d->pci_dev = pci_dev;
982 d->dev = dev;
983
984 dev->phys_addr = 0;
985 dev->phys_len = 0x4000;
986 dev->handler = dev_am79c971_access;
987 dev->priv_data = d;
988 return(d);
989
990 err_dev:
991 pci_dev_remove(pci_dev);
992 err_pci_dev:
993 free(d);
994 return NULL;
995 }
996
997 /* Remove an AMD Am79c971 device */
998 void dev_am79c971_remove(struct am79c971_data *d)
999 {
1000 if (d != NULL) {
1001 pci_dev_remove(d->pci_dev);
1002 vm_unbind_device(d->vm,d->dev);
1003 cpu_group_rebuild_mts(d->vm->cpu_group);
1004 free(d->dev);
1005 free(d);
1006 }
1007 }
1008
1009 /* Bind a NIO to an AMD Am79c971 device */
1010 int dev_am79c971_set_nio(struct am79c971_data *d,netio_desc_t *nio)
1011 {
1012 /* check that a NIO is not already bound */
1013 if (d->nio != NULL)
1014 return(-1);
1015
1016 d->nio = nio;
1017 d->tx_tid = ptask_add((ptask_callback)am79c971_handle_txring,d,NULL);
1018 netio_rxl_add(nio,(netio_rx_handler_t)am79c971_handle_rxring,d,NULL);
1019 return(0);
1020 }
1021
1022 /* Unbind a NIO from an AMD Am79c971 device */
1023 void dev_am79c971_unset_nio(struct am79c971_data *d)
1024 {
1025 if (d->nio != NULL) {
1026 ptask_remove(d->tx_tid);
1027 netio_rxl_remove(d->nio);
1028 d->nio = NULL;
1029 }
1030 }

  ViewVC Help
Powered by ViewVC 1.1.26