gem5
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
i8254xGBe.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 The Regents of The University of Michigan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Authors: Ali Saidi
29  */
30 
31 /* @file
32  * Device model for Intel's 8254x line of gigabit ethernet controllers.
33  * In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
34  * fewest workarounds in the driver. It will probably work with most of the
35  * other MACs with slight modifications.
36  */
37 
38 #include "dev/net/i8254xGBe.hh"
39 
40 /*
41  * @todo really there are multiple dma engines.. we should implement them.
42  */
43 
44 #include <algorithm>
45 #include <memory>
46 
47 #include "base/inet.hh"
48 #include "base/trace.hh"
49 #include "debug/Drain.hh"
50 #include "debug/EthernetAll.hh"
51 #include "mem/packet.hh"
52 #include "mem/packet_access.hh"
53 #include "params/IGbE.hh"
54 #include "sim/stats.hh"
55 #include "sim/system.hh"
56 
57 using namespace iGbReg;
58 using namespace Net;
59 
61  : EtherDevice(p), etherInt(NULL), cpa(NULL),
62  rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false),
63  txTick(false), txFifoTick(false), rxDmaPacket(false), pktOffset(0),
64  fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
65  fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
66  rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
67  rdtrEvent(this), radvEvent(this),
68  tadvEvent(this), tidvEvent(this), tickEvent(this), interEvent(this),
69  rxDescCache(this, name()+".RxDesc", p->rx_desc_cache_size),
70  txDescCache(this, name()+".TxDesc", p->tx_desc_cache_size),
71  lastInterrupt(0)
72 {
73  etherInt = new IGbEInt(name() + ".int", this);
74 
75  // Initialized internal registers per Intel documentation
76  // All registers intialized to 0 by per register constructor
77  regs.ctrl.fd(1);
78  regs.ctrl.lrst(1);
79  regs.ctrl.speed(2);
80  regs.ctrl.frcspd(1);
81  regs.sts.speed(3); // Say we're 1000Mbps
82  regs.sts.fd(1); // full duplex
83  regs.sts.lu(1); // link up
84  regs.eecd.fwe(1);
85  regs.eecd.ee_type(1);
86  regs.imr = 0;
87  regs.iam = 0;
88  regs.rxdctl.gran(1);
89  regs.rxdctl.wthresh(1);
90  regs.fcrth(1);
91  regs.tdwba = 0;
92  regs.rlpml = 0;
93  regs.sw_fw_sync = 0;
94 
95  regs.pba.rxa(0x30);
96  regs.pba.txa(0x10);
97 
98  eeOpBits = 0;
99  eeAddrBits = 0;
100  eeDataBits = 0;
101  eeOpcode = 0;
102 
103  // clear all 64 16 bit words of the eeprom
104  memset(&flash, 0, EEPROM_SIZE*2);
105 
106  // Set the MAC address
107  memcpy(flash, p->hardware_address.bytes(), ETH_ADDR_LEN);
108  for (int x = 0; x < ETH_ADDR_LEN/2; x++)
109  flash[x] = htobe(flash[x]);
110 
111  uint16_t csum = 0;
112  for (int x = 0; x < EEPROM_SIZE; x++)
113  csum += htobe(flash[x]);
114 
115 
116  // Magic happy checksum value
117  flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
118 
119  // Store the MAC address as queue ID
120  macAddr = p->hardware_address;
121 
122  rxFifo.clear();
123  txFifo.clear();
124 }
125 
127 {
128  delete etherInt;
129 }
130 
131 void
133 {
134  cpa = CPA::cpa();
135  PciDevice::init();
136 }
137 
138 EtherInt*
139 IGbE::getEthPort(const std::string &if_name, int idx)
140 {
141 
142  if (if_name == "interface") {
143  if (etherInt->getPeer())
144  panic("Port already connected to\n");
145  return etherInt;
146  }
147  return NULL;
148 }
149 
150 Tick
152 {
153  int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
154  if (offset < PCI_DEVICE_SPECIFIC)
156  else
157  panic("Device specific PCI config space not implemented.\n");
158 
159  //
160  // Some work may need to be done here based for the pci COMMAND bits.
161  //
162 
163  return configDelay;
164 }
165 
166 // Handy macro for range-testing register access addresses
167 #define IN_RANGE(val, base, len) (val >= base && val < (base + len))
168 
169 Tick
171 {
172  int bar;
173  Addr daddr;
174 
175  if (!getBAR(pkt->getAddr(), bar, daddr))
176  panic("Invalid PCI memory access to unmapped memory.\n");
177 
178  // Only Memory register BAR is allowed
179  assert(bar == 0);
180 
181  // Only 32bit accesses allowed
182  assert(pkt->getSize() == 4);
183 
184  DPRINTF(Ethernet, "Read device register %#X\n", daddr);
185 
186  //
187  // Handle read of register here
188  //
189 
190 
191  switch (daddr) {
192  case REG_CTRL:
193  pkt->set<uint32_t>(regs.ctrl());
194  break;
195  case REG_STATUS:
196  pkt->set<uint32_t>(regs.sts());
197  break;
198  case REG_EECD:
199  pkt->set<uint32_t>(regs.eecd());
200  break;
201  case REG_EERD:
202  pkt->set<uint32_t>(regs.eerd());
203  break;
204  case REG_CTRL_EXT:
205  pkt->set<uint32_t>(regs.ctrl_ext());
206  break;
207  case REG_MDIC:
208  pkt->set<uint32_t>(regs.mdic());
209  break;
210  case REG_ICR:
211  DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
212  regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
213  pkt->set<uint32_t>(regs.icr());
214  if (regs.icr.int_assert() || regs.imr == 0) {
215  regs.icr = regs.icr() & ~mask(30);
216  DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
217  }
218  if (regs.ctrl_ext.iame() && regs.icr.int_assert())
219  regs.imr &= ~regs.iam;
220  chkInterrupt();
221  break;
222  case REG_EICR:
223  // This is only useful for MSI, but the driver reads it every time
224  // Just don't do anything
225  pkt->set<uint32_t>(0);
226  break;
227  case REG_ITR:
228  pkt->set<uint32_t>(regs.itr());
229  break;
230  case REG_RCTL:
231  pkt->set<uint32_t>(regs.rctl());
232  break;
233  case REG_FCTTV:
234  pkt->set<uint32_t>(regs.fcttv());
235  break;
236  case REG_TCTL:
237  pkt->set<uint32_t>(regs.tctl());
238  break;
239  case REG_PBA:
240  pkt->set<uint32_t>(regs.pba());
241  break;
242  case REG_WUC:
243  case REG_WUFC:
244  case REG_WUS:
245  case REG_LEDCTL:
246  pkt->set<uint32_t>(0); // We don't care, so just return 0
247  break;
248  case REG_FCRTL:
249  pkt->set<uint32_t>(regs.fcrtl());
250  break;
251  case REG_FCRTH:
252  pkt->set<uint32_t>(regs.fcrth());
253  break;
254  case REG_RDBAL:
255  pkt->set<uint32_t>(regs.rdba.rdbal());
256  break;
257  case REG_RDBAH:
258  pkt->set<uint32_t>(regs.rdba.rdbah());
259  break;
260  case REG_RDLEN:
261  pkt->set<uint32_t>(regs.rdlen());
262  break;
263  case REG_SRRCTL:
264  pkt->set<uint32_t>(regs.srrctl());
265  break;
266  case REG_RDH:
267  pkt->set<uint32_t>(regs.rdh());
268  break;
269  case REG_RDT:
270  pkt->set<uint32_t>(regs.rdt());
271  break;
272  case REG_RDTR:
273  pkt->set<uint32_t>(regs.rdtr());
274  if (regs.rdtr.fpd()) {
276  DPRINTF(EthernetIntr,
277  "Posting interrupt because of RDTR.FPD write\n");
279  regs.rdtr.fpd(0);
280  }
281  break;
282  case REG_RXDCTL:
283  pkt->set<uint32_t>(regs.rxdctl());
284  break;
285  case REG_RADV:
286  pkt->set<uint32_t>(regs.radv());
287  break;
288  case REG_TDBAL:
289  pkt->set<uint32_t>(regs.tdba.tdbal());
290  break;
291  case REG_TDBAH:
292  pkt->set<uint32_t>(regs.tdba.tdbah());
293  break;
294  case REG_TDLEN:
295  pkt->set<uint32_t>(regs.tdlen());
296  break;
297  case REG_TDH:
298  pkt->set<uint32_t>(regs.tdh());
299  break;
300  case REG_TXDCA_CTL:
301  pkt->set<uint32_t>(regs.txdca_ctl());
302  break;
303  case REG_TDT:
304  pkt->set<uint32_t>(regs.tdt());
305  break;
306  case REG_TIDV:
307  pkt->set<uint32_t>(regs.tidv());
308  break;
309  case REG_TXDCTL:
310  pkt->set<uint32_t>(regs.txdctl());
311  break;
312  case REG_TADV:
313  pkt->set<uint32_t>(regs.tadv());
314  break;
315  case REG_TDWBAL:
316  pkt->set<uint32_t>(regs.tdwba & mask(32));
317  break;
318  case REG_TDWBAH:
319  pkt->set<uint32_t>(regs.tdwba >> 32);
320  break;
321  case REG_RXCSUM:
322  pkt->set<uint32_t>(regs.rxcsum());
323  break;
324  case REG_RLPML:
325  pkt->set<uint32_t>(regs.rlpml);
326  break;
327  case REG_RFCTL:
328  pkt->set<uint32_t>(regs.rfctl());
329  break;
330  case REG_MANC:
331  pkt->set<uint32_t>(regs.manc());
332  break;
333  case REG_SWSM:
334  pkt->set<uint32_t>(regs.swsm());
335  regs.swsm.smbi(1);
336  break;
337  case REG_FWSM:
338  pkt->set<uint32_t>(regs.fwsm());
339  break;
340  case REG_SWFWSYNC:
341  pkt->set<uint32_t>(regs.sw_fw_sync);
342  break;
343  default:
344  if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
346  !IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
348  panic("Read request to unknown register number: %#x\n", daddr);
349  else
350  pkt->set<uint32_t>(0);
351  };
352 
353  pkt->makeAtomicResponse();
354  return pioDelay;
355 }
356 
357 Tick
359 {
360  int bar;
361  Addr daddr;
362 
363 
364  if (!getBAR(pkt->getAddr(), bar, daddr))
365  panic("Invalid PCI memory access to unmapped memory.\n");
366 
367  // Only Memory register BAR is allowed
368  assert(bar == 0);
369 
370  // Only 32bit accesses allowed
371  assert(pkt->getSize() == sizeof(uint32_t));
372 
373  DPRINTF(Ethernet, "Wrote device register %#X value %#X\n",
374  daddr, pkt->get<uint32_t>());
375 
376  //
377  // Handle write of register here
378  //
379  uint32_t val = pkt->get<uint32_t>();
380 
381  Regs::RCTL oldrctl;
382  Regs::TCTL oldtctl;
383 
384  switch (daddr) {
385  case REG_CTRL:
386  regs.ctrl = val;
387  if (regs.ctrl.tfce())
388  warn("TX Flow control enabled, should implement\n");
389  if (regs.ctrl.rfce())
390  warn("RX Flow control enabled, should implement\n");
391  break;
392  case REG_CTRL_EXT:
393  regs.ctrl_ext = val;
394  break;
395  case REG_STATUS:
396  regs.sts = val;
397  break;
398  case REG_EECD:
399  int oldClk;
400  oldClk = regs.eecd.sk();
401  regs.eecd = val;
402  // See if this is a eeprom access and emulate accordingly
403  if (!oldClk && regs.eecd.sk()) {
404  if (eeOpBits < 8) {
405  eeOpcode = eeOpcode << 1 | regs.eecd.din();
406  eeOpBits++;
407  } else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
408  eeAddr = eeAddr << 1 | regs.eecd.din();
409  eeAddrBits++;
410  } else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
411  assert(eeAddr>>1 < EEPROM_SIZE);
412  DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
413  flash[eeAddr>>1] >> eeDataBits & 0x1,
414  flash[eeAddr>>1]);
415  regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
416  eeDataBits++;
417  } else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
418  regs.eecd.dout(0);
419  eeDataBits++;
420  } else
421  panic("What's going on with eeprom interface? opcode:"
422  " %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
423  (uint32_t)eeOpBits, (uint32_t)eeAddr,
424  (uint32_t)eeAddrBits, (uint32_t)eeDataBits);
425 
426  // Reset everything for the next command
427  if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
428  (eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
429  eeOpBits = 0;
430  eeAddrBits = 0;
431  eeDataBits = 0;
432  eeOpcode = 0;
433  eeAddr = 0;
434  }
435 
436  DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
437  (uint32_t)eeOpcode, (uint32_t) eeOpBits,
438  (uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
439  if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
440  eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
441  panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
442  (uint32_t)eeOpBits);
443 
444 
445  }
446  // If driver requests eeprom access, immediately give it to it
447  regs.eecd.ee_gnt(regs.eecd.ee_req());
448  break;
449  case REG_EERD:
450  regs.eerd = val;
451  if (regs.eerd.start()) {
452  regs.eerd.done(1);
453  assert(regs.eerd.addr() < EEPROM_SIZE);
454  regs.eerd.data(flash[regs.eerd.addr()]);
455  regs.eerd.start(0);
456  DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
457  regs.eerd.addr(), regs.eerd.data());
458  }
459  break;
460  case REG_MDIC:
461  regs.mdic = val;
462  if (regs.mdic.i())
463  panic("No support for interrupt on mdic complete\n");
464  if (regs.mdic.phyadd() != 1)
465  panic("No support for reading anything but phy\n");
466  DPRINTF(Ethernet, "%s phy address %x\n",
467  regs.mdic.op() == 1 ? "Writing" : "Reading",
468  regs.mdic.regadd());
469  switch (regs.mdic.regadd()) {
470  case PHY_PSTATUS:
471  regs.mdic.data(0x796D); // link up
472  break;
473  case PHY_PID:
474  regs.mdic.data(params()->phy_pid);
475  break;
476  case PHY_EPID:
477  regs.mdic.data(params()->phy_epid);
478  break;
479  case PHY_GSTATUS:
480  regs.mdic.data(0x7C00);
481  break;
482  case PHY_EPSTATUS:
483  regs.mdic.data(0x3000);
484  break;
485  case PHY_AGC:
486  regs.mdic.data(0x180); // some random length
487  break;
488  default:
489  regs.mdic.data(0);
490  }
491  regs.mdic.r(1);
492  break;
493  case REG_ICR:
494  DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
495  regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
496  if (regs.ctrl_ext.iame())
497  regs.imr &= ~regs.iam;
498  regs.icr = ~bits(val,30,0) & regs.icr();
499  chkInterrupt();
500  break;
501  case REG_ITR:
502  regs.itr = val;
503  break;
504  case REG_ICS:
505  DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
506  postInterrupt((IntTypes)val);
507  break;
508  case REG_IMS:
509  regs.imr |= val;
510  chkInterrupt();
511  break;
512  case REG_IMC:
513  regs.imr &= ~val;
514  chkInterrupt();
515  break;
516  case REG_IAM:
517  regs.iam = val;
518  break;
519  case REG_RCTL:
520  oldrctl = regs.rctl;
521  regs.rctl = val;
522  if (regs.rctl.rst()) {
523  rxDescCache.reset();
524  DPRINTF(EthernetSM, "RXS: Got RESET!\n");
525  rxFifo.clear();
526  regs.rctl.rst(0);
527  }
528  if (regs.rctl.en())
529  rxTick = true;
530  restartClock();
531  break;
532  case REG_FCTTV:
533  regs.fcttv = val;
534  break;
535  case REG_TCTL:
536  regs.tctl = val;
537  oldtctl = regs.tctl;
538  regs.tctl = val;
539  if (regs.tctl.en())
540  txTick = true;
541  restartClock();
542  if (regs.tctl.en() && !oldtctl.en()) {
543  txDescCache.reset();
544  }
545  break;
546  case REG_PBA:
547  regs.pba.rxa(val);
548  regs.pba.txa(64 - regs.pba.rxa());
549  break;
550  case REG_WUC:
551  case REG_WUFC:
552  case REG_WUS:
553  case REG_LEDCTL:
554  case REG_FCAL:
555  case REG_FCAH:
556  case REG_FCT:
557  case REG_VET:
558  case REG_AIFS:
559  case REG_TIPG:
560  ; // We don't care, so don't store anything
561  break;
562  case REG_IVAR0:
563  warn("Writing to IVAR0, ignoring...\n");
564  break;
565  case REG_FCRTL:
566  regs.fcrtl = val;
567  break;
568  case REG_FCRTH:
569  regs.fcrth = val;
570  break;
571  case REG_RDBAL:
572  regs.rdba.rdbal( val & ~mask(4));
574  break;
575  case REG_RDBAH:
576  regs.rdba.rdbah(val);
578  break;
579  case REG_RDLEN:
580  regs.rdlen = val & ~mask(7);
582  break;
583  case REG_SRRCTL:
584  regs.srrctl = val;
585  break;
586  case REG_RDH:
587  regs.rdh = val;
589  break;
590  case REG_RDT:
591  regs.rdt = val;
592  DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
593  if (drainState() == DrainState::Running) {
594  DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
596  } else {
597  DPRINTF(EthernetSM, "RXS: RDT NOT Fetching Desc b/c draining!\n");
598  }
599  break;
600  case REG_RDTR:
601  regs.rdtr = val;
602  break;
603  case REG_RADV:
604  regs.radv = val;
605  break;
606  case REG_RXDCTL:
607  regs.rxdctl = val;
608  break;
609  case REG_TDBAL:
610  regs.tdba.tdbal( val & ~mask(4));
612  break;
613  case REG_TDBAH:
614  regs.tdba.tdbah(val);
616  break;
617  case REG_TDLEN:
618  regs.tdlen = val & ~mask(7);
620  break;
621  case REG_TDH:
622  regs.tdh = val;
624  break;
625  case REG_TXDCA_CTL:
626  regs.txdca_ctl = val;
627  if (regs.txdca_ctl.enabled())
628  panic("No support for DCA\n");
629  break;
630  case REG_TDT:
631  regs.tdt = val;
632  DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
633  if (drainState() == DrainState::Running) {
634  DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
636  } else {
637  DPRINTF(EthernetSM, "TXS: TDT NOT Fetching Desc b/c draining!\n");
638  }
639  break;
640  case REG_TIDV:
641  regs.tidv = val;
642  break;
643  case REG_TXDCTL:
644  regs.txdctl = val;
645  break;
646  case REG_TADV:
647  regs.tadv = val;
648  break;
649  case REG_TDWBAL:
650  regs.tdwba &= ~mask(32);
651  regs.tdwba |= val;
653  regs.tdwba & mask(1));
654  break;
655  case REG_TDWBAH:
656  regs.tdwba &= mask(32);
657  regs.tdwba |= (uint64_t)val << 32;
659  regs.tdwba & mask(1));
660  break;
661  case REG_RXCSUM:
662  regs.rxcsum = val;
663  break;
664  case REG_RLPML:
665  regs.rlpml = val;
666  break;
667  case REG_RFCTL:
668  regs.rfctl = val;
669  if (regs.rfctl.exsten())
670  panic("Extended RX descriptors not implemented\n");
671  break;
672  case REG_MANC:
673  regs.manc = val;
674  break;
675  case REG_SWSM:
676  regs.swsm = val;
677  if (regs.fwsm.eep_fw_semaphore())
678  regs.swsm.swesmbi(0);
679  break;
680  case REG_SWFWSYNC:
681  regs.sw_fw_sync = val;
682  break;
683  default:
684  if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
687  panic("Write request to unknown register number: %#x\n", daddr);
688  };
689 
690  pkt->makeAtomicResponse();
691  return pioDelay;
692 }
693 
694 void
696 {
697  assert(t);
698 
699  // Interrupt is already pending
700  if (t & regs.icr() && !now)
701  return;
702 
703  regs.icr = regs.icr() | t;
704 
705  Tick itr_interval = SimClock::Int::ns * 256 * regs.itr.interval();
706  DPRINTF(EthernetIntr,
707  "EINT: postInterrupt() curTick(): %d itr: %d interval: %d\n",
708  curTick(), regs.itr.interval(), itr_interval);
709 
710  if (regs.itr.interval() == 0 || now ||
711  lastInterrupt + itr_interval <= curTick()) {
712  if (interEvent.scheduled()) {
714  }
715  cpuPostInt();
716  } else {
717  Tick int_time = lastInterrupt + itr_interval;
718  assert(int_time > 0);
719  DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %d\n",
720  int_time);
721  if (!interEvent.scheduled()) {
722  schedule(interEvent, int_time);
723  }
724  }
725 }
726 
727 void
729 {
730  cpuPostInt();
731 }
732 
733 
734 void
736 {
737 
739 
740  if (!(regs.icr() & regs.imr)) {
741  DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
742  return;
743  }
744 
745  DPRINTF(Ethernet, "Posting Interrupt\n");
746 
747 
748  if (interEvent.scheduled()) {
750  }
751 
752  if (rdtrEvent.scheduled()) {
753  regs.icr.rxt0(1);
755  }
756  if (radvEvent.scheduled()) {
757  regs.icr.rxt0(1);
759  }
760  if (tadvEvent.scheduled()) {
761  regs.icr.txdw(1);
763  }
764  if (tidvEvent.scheduled()) {
765  regs.icr.txdw(1);
767  }
768 
769  regs.icr.int_assert(1);
770  DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
771  regs.icr());
772 
773  intrPost();
774 
776 }
777 
778 void
780 {
781  if (regs.icr.int_assert()) {
782  regs.icr.int_assert(0);
783  DPRINTF(EthernetIntr,
784  "EINT: Clearing interrupt to CPU now. Vector %#x\n",
785  regs.icr());
786  intrClear();
787  }
788 }
789 
790 void
792 {
793  DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
794  regs.imr);
795  // Check if we need to clear the cpu interrupt
796  if (!(regs.icr() & regs.imr)) {
797  DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
798  if (interEvent.scheduled())
800  if (regs.icr.int_assert())
801  cpuClearInt();
802  }
803  DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
804  regs.itr(), regs.itr.interval());
805 
806  if (regs.icr() & regs.imr) {
807  if (regs.itr.interval() == 0) {
808  cpuPostInt();
809  } else {
810  DPRINTF(Ethernet,
811  "Possibly scheduling interrupt because of imr write\n");
812  if (!interEvent.scheduled()) {
813  Tick t = curTick() + SimClock::Int::ns * 256 * regs.itr.interval();
814  DPRINTF(Ethernet, "Scheduling for %d\n", t);
815  schedule(interEvent, t);
816  }
817  }
818  }
819 }
820 
821 
823 
824 template<class T>
825 IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
826  : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
827  wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
828  wbDelayEvent(this), fetchDelayEvent(this), fetchEvent(this),
829  wbEvent(this)
830 {
831  fetchBuf = new T[size];
832  wbBuf = new T[size];
833 }
834 
835 template<class T>
837 {
838  reset();
839  delete[] fetchBuf;
840  delete[] wbBuf;
841 }
842 
843 template<class T>
844 void
846 {
847  if (usedCache.size() > 0 || curFetching || wbOut)
848  panic("Descriptor Address, Length or Head changed. Bad\n");
849  reset();
850 
851 }
852 
853 template<class T>
854 void
856 {
857  int curHead = descHead();
858  int max_to_wb = usedCache.size();
859 
860  // Check if this writeback is less restrictive that the previous
861  // and if so setup another one immediately following it
862  if (wbOut) {
863  if (aMask < wbAlignment) {
864  moreToWb = true;
865  wbAlignment = aMask;
866  }
867  DPRINTF(EthernetDesc,
868  "Writing back already in process, returning\n");
869  return;
870  }
871 
872  moreToWb = false;
873  wbAlignment = aMask;
874 
875 
876  DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: "
877  "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n",
878  curHead, descTail(), descLen(), cachePnt, max_to_wb,
879  descLeft());
880 
881  if (max_to_wb + curHead >= descLen()) {
882  max_to_wb = descLen() - curHead;
883  moreToWb = true;
884  // this is by definition aligned correctly
885  } else if (wbAlignment != 0) {
886  // align the wb point to the mask
887  max_to_wb = max_to_wb & ~wbAlignment;
888  }
889 
890  DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb);
891 
892  if (max_to_wb <= 0) {
893  if (usedCache.size())
894  igbe->anBegin(annSmWb, "Wait Alignment", CPA::FL_WAIT);
895  else
896  igbe->anWe(annSmWb, annUsedCacheQ);
897  return;
898  }
899 
900  wbOut = max_to_wb;
901 
902  assert(!wbDelayEvent.scheduled());
903  igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
904  igbe->anBegin(annSmWb, "Prepare Writeback Desc");
905 }
906 
907 template<class T>
908 void
910 {
911  // If we're draining delay issuing this DMA
912  if (igbe->drainState() != DrainState::Running) {
913  igbe->schedule(wbDelayEvent, curTick() + igbe->wbDelay);
914  return;
915  }
916 
917  DPRINTF(EthernetDesc, "Begining DMA of %d descriptors\n", wbOut);
918 
919  for (int x = 0; x < wbOut; x++) {
920  assert(usedCache.size());
921  memcpy(&wbBuf[x], usedCache[x], sizeof(T));
922  igbe->anPq(annSmWb, annUsedCacheQ);
923  igbe->anPq(annSmWb, annDescQ);
924  igbe->anQ(annSmWb, annUsedDescQ);
925  }
926 
927 
928  igbe->anBegin(annSmWb, "Writeback Desc DMA");
929 
930  assert(wbOut);
931  igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
932  wbOut * sizeof(T), &wbEvent, (uint8_t*)wbBuf,
933  igbe->wbCompDelay);
934 }
935 
936 template<class T>
937 void
939 {
940  size_t max_to_fetch;
941 
942  if (curFetching) {
943  DPRINTF(EthernetDesc,
944  "Currently fetching %d descriptors, returning\n",
945  curFetching);
946  return;
947  }
948 
949  if (descTail() >= cachePnt)
950  max_to_fetch = descTail() - cachePnt;
951  else
952  max_to_fetch = descLen() - cachePnt;
953 
954  size_t free_cache = size - usedCache.size() - unusedCache.size();
955 
956  if (!max_to_fetch)
957  igbe->anWe(annSmFetch, annUnusedDescQ);
958  else
959  igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
960 
961  if (max_to_fetch) {
962  if (!free_cache)
963  igbe->anWf(annSmFetch, annDescQ);
964  else
965  igbe->anRq(annSmFetch, annDescQ, free_cache);
966  }
967 
968  max_to_fetch = std::min(max_to_fetch, free_cache);
969 
970 
971  DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: "
972  "%d len: %d cachePnt: %d max_to_fetch: %d descleft: %d\n",
973  descHead(), descTail(), descLen(), cachePnt,
974  max_to_fetch, descLeft());
975 
976  // Nothing to do
977  if (max_to_fetch == 0)
978  return;
979 
980  // So we don't have two descriptor fetches going on at once
981  curFetching = max_to_fetch;
982 
983  assert(!fetchDelayEvent.scheduled());
984  igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
985  igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
986 }
987 
988 template<class T>
989 void
991 {
992  // If we're draining delay issuing this DMA
993  if (igbe->drainState() != DrainState::Running) {
994  igbe->schedule(fetchDelayEvent, curTick() + igbe->fetchDelay);
995  return;
996  }
997 
998  igbe->anBegin(annSmFetch, "Fetch Desc");
999 
1000  DPRINTF(EthernetDesc, "Fetching descriptors at %#x (%#x), size: %#x\n",
1001  descBase() + cachePnt * sizeof(T),
1002  pciToDma(descBase() + cachePnt * sizeof(T)),
1003  curFetching * sizeof(T));
1004  assert(curFetching);
1005  igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
1006  curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf,
1007  igbe->fetchCompDelay);
1008 }
1009 
1010 template<class T>
1011 void
1013 {
1014  T *newDesc;
1015  igbe->anBegin(annSmFetch, "Fetch Complete");
1016  for (int x = 0; x < curFetching; x++) {
1017  newDesc = new T;
1018  memcpy(newDesc, &fetchBuf[x], sizeof(T));
1019  unusedCache.push_back(newDesc);
1020  igbe->anDq(annSmFetch, annUnusedDescQ);
1021  igbe->anQ(annSmFetch, annUnusedCacheQ);
1022  igbe->anQ(annSmFetch, annDescQ);
1023  }
1024 
1025 
1026 #ifndef NDEBUG
1027  int oldCp = cachePnt;
1028 #endif
1029 
1030  cachePnt += curFetching;
1031  assert(cachePnt <= descLen());
1032  if (cachePnt == descLen())
1033  cachePnt = 0;
1034 
1035  curFetching = 0;
1036 
1037  DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n",
1038  oldCp, cachePnt);
1039 
1040  if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
1041  cachePnt)) == 0)
1042  {
1043  igbe->anWe(annSmFetch, annUnusedDescQ);
1044  } else if (!(size - usedCache.size() - unusedCache.size())) {
1045  igbe->anWf(annSmFetch, annDescQ);
1046  } else {
1047  igbe->anBegin(annSmFetch, "Wait", CPA::FL_WAIT);
1048  }
1049 
1050  enableSm();
1051  igbe->checkDrain();
1052 }
1053 
1054 template<class T>
1055 void
1057 {
1058 
1059  igbe->anBegin(annSmWb, "Finish Writeback");
1060 
1061  long curHead = descHead();
1062 #ifndef NDEBUG
1063  long oldHead = curHead;
1064 #endif
1065 
1066  for (int x = 0; x < wbOut; x++) {
1067  assert(usedCache.size());
1068  delete usedCache[0];
1069  usedCache.pop_front();
1070 
1071  igbe->anDq(annSmWb, annUsedCacheQ);
1072  igbe->anDq(annSmWb, annDescQ);
1073  }
1074 
1075  curHead += wbOut;
1076  wbOut = 0;
1077 
1078  if (curHead >= descLen())
1079  curHead -= descLen();
1080 
1081  // Update the head
1082  updateHead(curHead);
1083 
1084  DPRINTF(EthernetDesc, "Writeback complete curHead %d -> %d\n",
1085  oldHead, curHead);
1086 
1087  // If we still have more to wb, call wb now
1088  actionAfterWb();
1089  if (moreToWb) {
1090  moreToWb = false;
1091  DPRINTF(EthernetDesc, "Writeback has more todo\n");
1092  writeback(wbAlignment);
1093  }
1094 
1095  if (!wbOut) {
1096  igbe->checkDrain();
1097  if (usedCache.size())
1098  igbe->anBegin(annSmWb, "Wait", CPA::FL_WAIT);
1099  else
1100  igbe->anWe(annSmWb, annUsedCacheQ);
1101  }
1102  fetchAfterWb();
1103 }
1104 
1105 template<class T>
1106 void
1108 {
1109  DPRINTF(EthernetDesc, "Reseting descriptor cache\n");
1110  for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
1111  delete usedCache[x];
1112  for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
1113  delete unusedCache[x];
1114 
1115  usedCache.clear();
1116  unusedCache.clear();
1117 
1118  cachePnt = 0;
1119 
1120 }
1121 
1122 template<class T>
1123 void
1125 {
1126  SERIALIZE_SCALAR(cachePnt);
1127  SERIALIZE_SCALAR(curFetching);
1128  SERIALIZE_SCALAR(wbOut);
1129  SERIALIZE_SCALAR(moreToWb);
1130  SERIALIZE_SCALAR(wbAlignment);
1131 
1132  typename CacheType::size_type usedCacheSize = usedCache.size();
1133  SERIALIZE_SCALAR(usedCacheSize);
1134  for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1135  arrayParamOut(cp, csprintf("usedCache_%d", x),
1136  (uint8_t*)usedCache[x],sizeof(T));
1137  }
1138 
1139  typename CacheType::size_type unusedCacheSize = unusedCache.size();
1140  SERIALIZE_SCALAR(unusedCacheSize);
1141  for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1142  arrayParamOut(cp, csprintf("unusedCache_%d", x),
1143  (uint8_t*)unusedCache[x],sizeof(T));
1144  }
1145 
1146  Tick fetch_delay = 0, wb_delay = 0;
1147  if (fetchDelayEvent.scheduled())
1148  fetch_delay = fetchDelayEvent.when();
1149  SERIALIZE_SCALAR(fetch_delay);
1150  if (wbDelayEvent.scheduled())
1151  wb_delay = wbDelayEvent.when();
1152  SERIALIZE_SCALAR(wb_delay);
1153 
1154 
1155 }
1156 
1157 template<class T>
1158 void
1160 {
1161  UNSERIALIZE_SCALAR(cachePnt);
1162  UNSERIALIZE_SCALAR(curFetching);
1163  UNSERIALIZE_SCALAR(wbOut);
1164  UNSERIALIZE_SCALAR(moreToWb);
1165  UNSERIALIZE_SCALAR(wbAlignment);
1166 
1167  typename CacheType::size_type usedCacheSize;
1168  UNSERIALIZE_SCALAR(usedCacheSize);
1169  T *temp;
1170  for (typename CacheType::size_type x = 0; x < usedCacheSize; x++) {
1171  temp = new T;
1172  arrayParamIn(cp, csprintf("usedCache_%d", x),
1173  (uint8_t*)temp,sizeof(T));
1174  usedCache.push_back(temp);
1175  }
1176 
1177  typename CacheType::size_type unusedCacheSize;
1178  UNSERIALIZE_SCALAR(unusedCacheSize);
1179  for (typename CacheType::size_type x = 0; x < unusedCacheSize; x++) {
1180  temp = new T;
1181  arrayParamIn(cp, csprintf("unusedCache_%d", x),
1182  (uint8_t*)temp,sizeof(T));
1183  unusedCache.push_back(temp);
1184  }
1185  Tick fetch_delay = 0, wb_delay = 0;
1186  UNSERIALIZE_SCALAR(fetch_delay);
1187  UNSERIALIZE_SCALAR(wb_delay);
1188  if (fetch_delay)
1189  igbe->schedule(fetchDelayEvent, fetch_delay);
1190  if (wb_delay)
1191  igbe->schedule(wbDelayEvent, wb_delay);
1192 
1193 
1194 }
1195 
1197 
1198 IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
1199  : DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
1200  pktEvent(this), pktHdrEvent(this), pktDataEvent(this)
1201 
1202 {
1203  annSmFetch = "RX Desc Fetch";
1204  annSmWb = "RX Desc Writeback";
1205  annUnusedDescQ = "RX Unused Descriptors";
1206  annUnusedCacheQ = "RX Unused Descriptor Cache";
1207  annUsedCacheQ = "RX Used Descriptor Cache";
1208  annUsedDescQ = "RX Used Descriptors";
1209  annDescQ = "RX Descriptors";
1210 }
1211 
1212 void
1214 {
1215  splitCount++;
1216  DPRINTF(EthernetDesc,
1217  "Part of split packet done: splitcount now %d\n", splitCount);
1218  assert(splitCount <= 2);
1219  if (splitCount != 2)
1220  return;
1221  splitCount = 0;
1222  DPRINTF(EthernetDesc,
1223  "Part of split packet done: calling pktComplete()\n");
1224  pktComplete();
1225 }
1226 
1227 int
1229 {
1230  assert(unusedCache.size());
1231  //if (!unusedCache.size())
1232  // return false;
1233 
1234  pktPtr = packet;
1235  pktDone = false;
1236  unsigned buf_len, hdr_len;
1237 
1238  RxDesc *desc = unusedCache.front();
1239  switch (igbe->regs.srrctl.desctype()) {
1240  case RXDT_LEGACY:
1241  assert(pkt_offset == 0);
1242  bytesCopied = packet->length;
1243  DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
1244  packet->length, igbe->regs.rctl.descSize());
1245  assert(packet->length < igbe->regs.rctl.descSize());
1246  igbe->dmaWrite(pciToDma(desc->legacy.buf),
1247  packet->length, &pktEvent, packet->data,
1248  igbe->rxWriteDelay);
1249  break;
1250  case RXDT_ADV_ONEBUF:
1251  assert(pkt_offset == 0);
1252  bytesCopied = packet->length;
1253  buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1254  igbe->regs.rctl.descSize();
1255  DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
1256  packet->length, igbe->regs.srrctl(), buf_len);
1257  assert(packet->length < buf_len);
1258  igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1259  packet->length, &pktEvent, packet->data,
1260  igbe->rxWriteDelay);
1261  desc->adv_wb.header_len = htole(0);
1262  desc->adv_wb.sph = htole(0);
1263  desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
1264  break;
1265  case RXDT_ADV_SPLIT_A:
1266  int split_point;
1267 
1268  buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
1269  igbe->regs.rctl.descSize();
1270  hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
1271  DPRINTF(EthernetDesc,
1272  "lpe: %d Packet Length: %d offset: %d srrctl: %#x "
1273  "hdr addr: %#x Hdr Size: %d desc addr: %#x Desc Size: %d\n",
1274  igbe->regs.rctl.lpe(), packet->length, pkt_offset,
1275  igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
1276  desc->adv_read.pkt, buf_len);
1277 
1278  split_point = hsplit(pktPtr);
1279 
1280  if (packet->length <= hdr_len) {
1281  bytesCopied = packet->length;
1282  assert(pkt_offset == 0);
1283  DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
1284  igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1285  packet->length, &pktEvent, packet->data,
1286  igbe->rxWriteDelay);
1287  desc->adv_wb.header_len = htole((uint16_t)packet->length);
1288  desc->adv_wb.sph = htole(0);
1289  desc->adv_wb.pkt_len = htole(0);
1290  } else if (split_point) {
1291  if (pkt_offset) {
1292  // we are only copying some data, header/data has already been
1293  // copied
1294  int max_to_copy =
1295  std::min(packet->length - pkt_offset, buf_len);
1296  bytesCopied += max_to_copy;
1297  DPRINTF(EthernetDesc,
1298  "Hdr split: Continuing data buffer copy\n");
1299  igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1300  max_to_copy, &pktEvent,
1301  packet->data + pkt_offset, igbe->rxWriteDelay);
1302  desc->adv_wb.header_len = htole(0);
1303  desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
1304  desc->adv_wb.sph = htole(0);
1305  } else {
1306  int max_to_copy =
1307  std::min(packet->length - split_point, buf_len);
1308  bytesCopied += max_to_copy + split_point;
1309 
1310  DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
1311  split_point);
1312  igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
1313  split_point, &pktHdrEvent,
1314  packet->data, igbe->rxWriteDelay);
1315  igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
1316  max_to_copy, &pktDataEvent,
1317  packet->data + split_point, igbe->rxWriteDelay);
1318  desc->adv_wb.header_len = htole(split_point);
1319  desc->adv_wb.sph = 1;
1320  desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
1321  }
1322  } else {
1323  panic("Header split not fitting within header buffer or "
1324  "undecodable packet not fitting in header unsupported\n");
1325  }
1326  break;
1327  default:
1328  panic("Unimplemnted RX receive buffer type: %d\n",
1329  igbe->regs.srrctl.desctype());
1330  }
1331  return bytesCopied;
1332 
1333 }
1334 
1335 void
1337 {
1338  assert(unusedCache.size());
1339  RxDesc *desc;
1340  desc = unusedCache.front();
1341 
1342  igbe->anBegin("RXS", "Update Desc");
1343 
1344  uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
1345  DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
1346  "stripcrc offset: %d value written: %d %d\n",
1347  pktPtr->length, bytesCopied, crcfixup,
1348  htole((uint16_t)(pktPtr->length + crcfixup)),
1349  (uint16_t)(pktPtr->length + crcfixup));
1350 
1351  // no support for anything but starting at 0
1352  assert(igbe->regs.rxcsum.pcss() == 0);
1353 
1354  DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
1355 
1356  uint16_t status = RXDS_DD;
1357  uint8_t err = 0;
1358  uint16_t ext_err = 0;
1359  uint16_t csum = 0;
1360  uint16_t ptype = 0;
1361  uint16_t ip_id = 0;
1362 
1363  assert(bytesCopied <= pktPtr->length);
1364  if (bytesCopied == pktPtr->length)
1365  status |= RXDS_EOP;
1366 
1367  IpPtr ip(pktPtr);
1368 
1369  if (ip) {
1370  DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n", ip->id());
1371  ptype |= RXDP_IPV4;
1372  ip_id = ip->id();
1373 
1374  if (igbe->regs.rxcsum.ipofld()) {
1375  DPRINTF(EthernetDesc, "Checking IP checksum\n");
1376  status |= RXDS_IPCS;
1377  csum = htole(cksum(ip));
1378  igbe->rxIpChecksums++;
1379  if (cksum(ip) != 0) {
1380  err |= RXDE_IPE;
1381  ext_err |= RXDEE_IPE;
1382  DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1383  }
1384  }
1385  TcpPtr tcp(ip);
1386  if (tcp && igbe->regs.rxcsum.tuofld()) {
1387  DPRINTF(EthernetDesc, "Checking TCP checksum\n");
1388  status |= RXDS_TCPCS;
1389  ptype |= RXDP_TCP;
1390  csum = htole(cksum(tcp));
1391  igbe->rxTcpChecksums++;
1392  if (cksum(tcp) != 0) {
1393  DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1394  err |= RXDE_TCPE;
1395  ext_err |= RXDEE_TCPE;
1396  }
1397  }
1398 
1399  UdpPtr udp(ip);
1400  if (udp && igbe->regs.rxcsum.tuofld()) {
1401  DPRINTF(EthernetDesc, "Checking UDP checksum\n");
1402  status |= RXDS_UDPCS;
1403  ptype |= RXDP_UDP;
1404  csum = htole(cksum(udp));
1405  igbe->rxUdpChecksums++;
1406  if (cksum(udp) != 0) {
1407  DPRINTF(EthernetDesc, "Checksum is bad!!\n");
1408  ext_err |= RXDEE_TCPE;
1409  err |= RXDE_TCPE;
1410  }
1411  }
1412  } else { // if ip
1413  DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1414  }
1415 
1416  switch (igbe->regs.srrctl.desctype()) {
1417  case RXDT_LEGACY:
1418  desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
1419  desc->legacy.status = htole(status);
1420  desc->legacy.errors = htole(err);
1421  // No vlan support at this point... just set it to 0
1422  desc->legacy.vlan = 0;
1423  break;
1424  case RXDT_ADV_SPLIT_A:
1425  case RXDT_ADV_ONEBUF:
1426  desc->adv_wb.rss_type = htole(0);
1427  desc->adv_wb.pkt_type = htole(ptype);
1428  if (igbe->regs.rxcsum.pcsd()) {
1429  // no rss support right now
1430  desc->adv_wb.rss_hash = htole(0);
1431  } else {
1432  desc->adv_wb.id = htole(ip_id);
1433  desc->adv_wb.csum = htole(csum);
1434  }
1435  desc->adv_wb.status = htole(status);
1436  desc->adv_wb.errors = htole(ext_err);
1437  // no vlan support
1438  desc->adv_wb.vlan_tag = htole(0);
1439  break;
1440  default:
1441  panic("Unimplemnted RX receive buffer type %d\n",
1442  igbe->regs.srrctl.desctype());
1443  }
1444 
1445  DPRINTF(EthernetDesc, "Descriptor complete w0: %#x w1: %#x\n",
1446  desc->adv_read.pkt, desc->adv_read.hdr);
1447 
1448  if (bytesCopied == pktPtr->length) {
1449  DPRINTF(EthernetDesc,
1450  "Packet completely written to descriptor buffers\n");
1451  // Deal with the rx timer interrupts
1452  if (igbe->regs.rdtr.delay()) {
1453  Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
1454  DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", delay);
1455  igbe->reschedule(igbe->rdtrEvent, curTick() + delay);
1456  }
1457 
1458  if (igbe->regs.radv.idv()) {
1459  Tick delay = igbe->regs.radv.idv() * igbe->intClock();
1460  DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", delay);
1461  if (!igbe->radvEvent.scheduled()) {
1462  igbe->schedule(igbe->radvEvent, curTick() + delay);
1463  }
1464  }
1465 
1466  // if neither radv or rdtr, maybe itr is set...
1467  if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
1468  DPRINTF(EthernetSM,
1469  "RXS: Receive interrupt delay disabled, posting IT_RXT\n");
1470  igbe->postInterrupt(IT_RXT);
1471  }
1472 
1473  // If the packet is small enough, interrupt appropriately
1474  // I wonder if this is delayed or not?!
1475  if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
1476  DPRINTF(EthernetSM,
1477  "RXS: Posting IT_SRPD beacuse small packet received\n");
1478  igbe->postInterrupt(IT_SRPD);
1479  }
1480  bytesCopied = 0;
1481  }
1482 
1483  pktPtr = NULL;
1484  igbe->checkDrain();
1485  enableSm();
1486  pktDone = true;
1487 
1488  igbe->anBegin("RXS", "Done Updating Desc");
1489  DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
1490  igbe->anDq("RXS", annUnusedCacheQ);
1491  unusedCache.pop_front();
1492  igbe->anQ("RXS", annUsedCacheQ);
1493  usedCache.push_back(desc);
1494 }
1495 
1496 void
1498 {
1499  if (igbe->drainState() != DrainState::Draining) {
1500  igbe->rxTick = true;
1501  igbe->restartClock();
1502  }
1503 }
1504 
1505 bool
1507 {
1508  if (pktDone) {
1509  pktDone = false;
1510  return true;
1511  }
1512  return false;
1513 }
1514 
1515 bool
1517 {
1518  return pktEvent.scheduled() || wbEvent.scheduled() ||
1519  fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
1520  pktDataEvent.scheduled();
1521 
1522 }
1523 
1524 void
1526 {
1528  SERIALIZE_SCALAR(pktDone);
1529  SERIALIZE_SCALAR(splitCount);
1530  SERIALIZE_SCALAR(bytesCopied);
1531 }
1532 
1533 void
1535 {
1537  UNSERIALIZE_SCALAR(pktDone);
1538  UNSERIALIZE_SCALAR(splitCount);
1539  UNSERIALIZE_SCALAR(bytesCopied);
1540 }
1541 
1542 
1544 
1545 IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
1546  : DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
1547  pktWaiting(false), pktMultiDesc(false),
1548  completionAddress(0), completionEnabled(false),
1549  useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
1550  tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
1551  tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
1552  pktEvent(this), headerEvent(this), nullEvent(this)
1553 {
1554  annSmFetch = "TX Desc Fetch";
1555  annSmWb = "TX Desc Writeback";
1556  annUnusedDescQ = "TX Unused Descriptors";
1557  annUnusedCacheQ = "TX Unused Descriptor Cache";
1558  annUsedCacheQ = "TX Used Descriptor Cache";
1559  annUsedDescQ = "TX Used Descriptors";
1560  annDescQ = "TX Descriptors";
1561 }
1562 
1563 void
1565 {
1566  assert(unusedCache.size());
1567  TxDesc *desc;
1568 
1569  DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
1570 
1571  while (!useTso && unusedCache.size() &&
1572  TxdOp::isContext(unusedCache.front())) {
1573  DPRINTF(EthernetDesc, "Got context descriptor type...\n");
1574 
1575  desc = unusedCache.front();
1576  DPRINTF(EthernetDesc, "Descriptor upper: %#x lower: %#X\n",
1577  desc->d1, desc->d2);
1578 
1579 
1580  // is this going to be a tcp or udp packet?
1581  isTcp = TxdOp::tcp(desc) ? true : false;
1582 
1583  // setup all the TSO variables, they'll be ignored if we don't use
1584  // tso for this connection
1585  tsoHeaderLen = TxdOp::hdrlen(desc);
1586  tsoMss = TxdOp::mss(desc);
1587 
1588  if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
1589  DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
1590  "%d mss: %d paylen %d\n", TxdOp::hdrlen(desc),
1591  TxdOp::mss(desc), TxdOp::getLen(desc));
1592  useTso = true;
1593  tsoTotalLen = TxdOp::getLen(desc);
1594  tsoLoadedHeader = false;
1595  tsoDescBytesUsed = 0;
1596  tsoUsedLen = 0;
1597  tsoPrevSeq = 0;
1598  tsoPktHasHeader = false;
1599  tsoPkts = 0;
1600  tsoCopyBytes = 0;
1601  }
1602 
1603  TxdOp::setDd(desc);
1604  unusedCache.pop_front();
1605  igbe->anDq("TXS", annUnusedCacheQ);
1606  usedCache.push_back(desc);
1607  igbe->anQ("TXS", annUsedCacheQ);
1608  }
1609 
1610  if (!unusedCache.size())
1611  return;
1612 
1613  desc = unusedCache.front();
1614  if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
1615  TxdOp::tse(desc)) {
1616  DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
1617  "hdrlen: %d mss: %d paylen %d\n",
1618  tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
1619  useTso = true;
1620  tsoTotalLen = TxdOp::getTsoLen(desc);
1621  tsoLoadedHeader = false;
1622  tsoDescBytesUsed = 0;
1623  tsoUsedLen = 0;
1624  tsoPrevSeq = 0;
1625  tsoPktHasHeader = false;
1626  tsoPkts = 0;
1627  }
1628 
1629  if (useTso && !tsoLoadedHeader) {
1630  // we need to fetch a header
1631  DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
1632  assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
1633  pktWaiting = true;
1634  assert(tsoHeaderLen <= 256);
1635  igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1636  tsoHeaderLen, &headerEvent, tsoHeader, 0);
1637  }
1638 }
1639 
1640 void
1642 {
1643  DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
1644  pktWaiting = false;
1645 
1646  assert(unusedCache.size());
1647  TxDesc *desc = unusedCache.front();
1648  DPRINTF(EthernetDesc, "TSO: len: %d tsoHeaderLen: %d\n",
1649  TxdOp::getLen(desc), tsoHeaderLen);
1650 
1651  if (TxdOp::getLen(desc) == tsoHeaderLen) {
1652  tsoDescBytesUsed = 0;
1653  tsoLoadedHeader = true;
1654  unusedCache.pop_front();
1655  usedCache.push_back(desc);
1656  } else {
1657  DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
1658  tsoDescBytesUsed = tsoHeaderLen;
1659  tsoLoadedHeader = true;
1660  }
1661  enableSm();
1662  igbe->checkDrain();
1663 }
1664 
1665 unsigned
1667 {
1668  if (!unusedCache.size())
1669  return 0;
1670 
1671  DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
1672 
1673  assert(!useTso || tsoLoadedHeader);
1674  TxDesc *desc = unusedCache.front();
1675 
1676  if (useTso) {
1677  DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
1678  "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1679  DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1680  "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1681  tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1682 
1683  if (tsoPktHasHeader)
1684  tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
1685  TxdOp::getLen(desc) - tsoDescBytesUsed);
1686  else
1687  tsoCopyBytes = std::min(tsoMss,
1688  TxdOp::getLen(desc) - tsoDescBytesUsed);
1689  unsigned pkt_size =
1690  tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
1691 
1692  DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d "
1693  "this descLen: %d\n",
1694  tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
1695  DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
1696  DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
1697  return pkt_size;
1698  }
1699 
1700  DPRINTF(EthernetDesc, "Next TX packet is %d bytes\n",
1701  TxdOp::getLen(unusedCache.front()));
1702  return TxdOp::getLen(desc);
1703 }
1704 
1705 void
1707 {
1708  assert(unusedCache.size());
1709 
1710  TxDesc *desc;
1711  desc = unusedCache.front();
1712 
1713  DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
1714  "d1: %#llx d2: %#llx\n", desc->d1, desc->d2);
1715  assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1716  TxdOp::getLen(desc));
1717 
1718  pktPtr = p;
1719 
1720  pktWaiting = true;
1721 
1722  DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
1723 
1724  if (useTso) {
1725  assert(tsoLoadedHeader);
1726  if (!tsoPktHasHeader) {
1727  DPRINTF(EthernetDesc,
1728  "Loading TSO header (%d bytes) into start of packet\n",
1729  tsoHeaderLen);
1730  memcpy(p->data, &tsoHeader,tsoHeaderLen);
1731  p->length +=tsoHeaderLen;
1732  tsoPktHasHeader = true;
1733  }
1734  }
1735 
1736  if (useTso) {
1737  DPRINTF(EthernetDesc,
1738  "Starting DMA of packet at offset %d length: %d\n",
1739  p->length, tsoCopyBytes);
1740  igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
1741  + tsoDescBytesUsed,
1742  tsoCopyBytes, &pktEvent, p->data + p->length,
1743  igbe->txReadDelay);
1744  tsoDescBytesUsed += tsoCopyBytes;
1745  assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
1746  } else {
1747  igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
1748  TxdOp::getLen(desc), &pktEvent, p->data + p->length,
1749  igbe->txReadDelay);
1750  }
1751 }
1752 
1753 void
1755 {
1756 
1757  TxDesc *desc;
1758  assert(unusedCache.size());
1759  assert(pktPtr);
1760 
1761  igbe->anBegin("TXS", "Update Desc");
1762 
1763  DPRINTF(EthernetDesc, "DMA of packet complete\n");
1764 
1765 
1766  desc = unusedCache.front();
1767  assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
1768  TxdOp::getLen(desc));
1769 
1770  DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1771  desc->d1, desc->d2);
1772 
1773  // Set the length of the data in the EtherPacket
1774  if (useTso) {
1775  DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %d mss: %d total: %d "
1776  "used: %d loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
1777  tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
1778  pktPtr->simLength += tsoCopyBytes;
1779  pktPtr->length += tsoCopyBytes;
1780  tsoUsedLen += tsoCopyBytes;
1781  DPRINTF(EthernetDesc, "TSO: descBytesUsed: %d copyBytes: %d\n",
1782  tsoDescBytesUsed, tsoCopyBytes);
1783  } else {
1784  pktPtr->simLength += TxdOp::getLen(desc);
1785  pktPtr->length += TxdOp::getLen(desc);
1786  }
1787 
1788 
1789 
1790  if ((!TxdOp::eop(desc) && !useTso) ||
1791  (pktPtr->length < ( tsoMss + tsoHeaderLen) &&
1792  tsoTotalLen != tsoUsedLen && useTso)) {
1793  assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
1794  igbe->anDq("TXS", annUnusedCacheQ);
1795  unusedCache.pop_front();
1796  igbe->anQ("TXS", annUsedCacheQ);
1797  usedCache.push_back(desc);
1798 
1799  tsoDescBytesUsed = 0;
1800  pktDone = true;
1801  pktWaiting = false;
1802  pktMultiDesc = true;
1803 
1804  DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
1805  pktPtr->length);
1806  pktPtr = NULL;
1807 
1808  enableSm();
1809  igbe->checkDrain();
1810  return;
1811  }
1812 
1813 
1814  pktMultiDesc = false;
1815  // no support for vlans
1816  assert(!TxdOp::vle(desc));
1817 
1818  // we only support single packet descriptors at this point
1819  if (!useTso)
1820  assert(TxdOp::eop(desc));
1821 
1822  // set that this packet is done
1823  if (TxdOp::rs(desc))
1824  TxdOp::setDd(desc);
1825 
1826  DPRINTF(EthernetDesc, "TxDescriptor data d1: %#llx d2: %#llx\n",
1827  desc->d1, desc->d2);
1828 
1829  if (useTso) {
1830  IpPtr ip(pktPtr);
1831  if (ip) {
1832  DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
1833  tsoPkts);
1834  ip->id(ip->id() + tsoPkts++);
1835  ip->len(pktPtr->length - EthPtr(pktPtr)->size());
1836 
1837  TcpPtr tcp(ip);
1838  if (tcp) {
1839  DPRINTF(EthernetDesc,
1840  "TSO: Modifying TCP header. old seq %d + %d\n",
1841  tcp->seq(), tsoPrevSeq);
1842  tcp->seq(tcp->seq() + tsoPrevSeq);
1843  if (tsoUsedLen != tsoTotalLen)
1844  tcp->flags(tcp->flags() & ~9); // clear fin & psh
1845  }
1846  UdpPtr udp(ip);
1847  if (udp) {
1848  DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
1849  udp->len(pktPtr->length - EthPtr(pktPtr)->size());
1850  }
1851  }
1852  tsoPrevSeq = tsoUsedLen;
1853  }
1854 
1855  if (DTRACE(EthernetDesc)) {
1856  IpPtr ip(pktPtr);
1857  if (ip)
1858  DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
1859  ip->id());
1860  else
1861  DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
1862  }
1863 
1864  // Checksums are only ofloaded for new descriptor types
1865  if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) {
1866  DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
1867  IpPtr ip(pktPtr);
1868  assert(ip);
1869  if (TxdOp::ixsm(desc)) {
1870  ip->sum(0);
1871  ip->sum(cksum(ip));
1872  igbe->txIpChecksums++;
1873  DPRINTF(EthernetDesc, "Calculated IP checksum\n");
1874  }
1875  if (TxdOp::txsm(desc)) {
1876  TcpPtr tcp(ip);
1877  UdpPtr udp(ip);
1878  if (tcp) {
1879  tcp->sum(0);
1880  tcp->sum(cksum(tcp));
1881  igbe->txTcpChecksums++;
1882  DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
1883  } else if (udp) {
1884  assert(udp);
1885  udp->sum(0);
1886  udp->sum(cksum(udp));
1887  igbe->txUdpChecksums++;
1888  DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
1889  } else {
1890  panic("Told to checksum, but don't know how\n");
1891  }
1892  }
1893  }
1894 
1895  if (TxdOp::ide(desc)) {
1896  // Deal with the rx timer interrupts
1897  DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
1898  if (igbe->regs.tidv.idv()) {
1899  Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
1900  DPRINTF(EthernetDesc, "setting tidv\n");
1901  igbe->reschedule(igbe->tidvEvent, curTick() + delay, true);
1902  }
1903 
1904  if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
1905  Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
1906  DPRINTF(EthernetDesc, "setting tadv\n");
1907  if (!igbe->tadvEvent.scheduled()) {
1908  igbe->schedule(igbe->tadvEvent, curTick() + delay);
1909  }
1910  }
1911  }
1912 
1913 
1914  if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
1915  DPRINTF(EthernetDesc, "Descriptor Done\n");
1916  igbe->anDq("TXS", annUnusedCacheQ);
1917  unusedCache.pop_front();
1918  igbe->anQ("TXS", annUsedCacheQ);
1919  usedCache.push_back(desc);
1920  tsoDescBytesUsed = 0;
1921  }
1922 
1923  if (useTso && tsoUsedLen == tsoTotalLen)
1924  useTso = false;
1925 
1926 
1927  DPRINTF(EthernetDesc,
1928  "------Packet of %d bytes ready for transmission-------\n",
1929  pktPtr->length);
1930  pktDone = true;
1931  pktWaiting = false;
1932  pktPtr = NULL;
1933  tsoPktHasHeader = false;
1934 
1935  if (igbe->regs.txdctl.wthresh() == 0) {
1936  igbe->anBegin("TXS", "Desc Writeback");
1937  DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
1938  writeback(0);
1939  } else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
1940  descInBlock(usedCache.size())) {
1941  DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1942  igbe->anBegin("TXS", "Desc Writeback");
1943  writeback((igbe->cacheBlockSize()-1)>>4);
1944  } else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
1945  DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
1946  igbe->anBegin("TXS", "Desc Writeback");
1947  writeback((igbe->cacheBlockSize()-1)>>4);
1948  }
1949 
1950  enableSm();
1951  igbe->checkDrain();
1952 }
1953 
1954 void
1956 {
1957  DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
1958  completionEnabled);
1959  igbe->postInterrupt(iGbReg::IT_TXDW);
1960  if (completionEnabled) {
1961  descEnd = igbe->regs.tdh();
1962  DPRINTF(EthernetDesc,
1963  "Completion writing back value: %d to addr: %#x\n", descEnd,
1964  completionAddress);
1965  igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
1966  sizeof(descEnd), &nullEvent, (uint8_t*)&descEnd, 0);
1967  }
1968 }
1969 
1970 void
1972 {
1974 
1975  SERIALIZE_SCALAR(pktDone);
1976  SERIALIZE_SCALAR(isTcp);
1977  SERIALIZE_SCALAR(pktWaiting);
1978  SERIALIZE_SCALAR(pktMultiDesc);
1979 
1980  SERIALIZE_SCALAR(useTso);
1981  SERIALIZE_SCALAR(tsoHeaderLen);
1982  SERIALIZE_SCALAR(tsoMss);
1983  SERIALIZE_SCALAR(tsoTotalLen);
1984  SERIALIZE_SCALAR(tsoUsedLen);
1985  SERIALIZE_SCALAR(tsoPrevSeq);;
1986  SERIALIZE_SCALAR(tsoPktPayloadBytes);
1987  SERIALIZE_SCALAR(tsoLoadedHeader);
1988  SERIALIZE_SCALAR(tsoPktHasHeader);
1989  SERIALIZE_ARRAY(tsoHeader, 256);
1990  SERIALIZE_SCALAR(tsoDescBytesUsed);
1991  SERIALIZE_SCALAR(tsoCopyBytes);
1992  SERIALIZE_SCALAR(tsoPkts);
1993 
1994  SERIALIZE_SCALAR(completionAddress);
1995  SERIALIZE_SCALAR(completionEnabled);
1996  SERIALIZE_SCALAR(descEnd);
1997 }
1998 
1999 void
2001 {
2003 
2004  UNSERIALIZE_SCALAR(pktDone);
2005  UNSERIALIZE_SCALAR(isTcp);
2006  UNSERIALIZE_SCALAR(pktWaiting);
2007  UNSERIALIZE_SCALAR(pktMultiDesc);
2008 
2009  UNSERIALIZE_SCALAR(useTso);
2010  UNSERIALIZE_SCALAR(tsoHeaderLen);
2011  UNSERIALIZE_SCALAR(tsoMss);
2012  UNSERIALIZE_SCALAR(tsoTotalLen);
2013  UNSERIALIZE_SCALAR(tsoUsedLen);
2014  UNSERIALIZE_SCALAR(tsoPrevSeq);;
2015  UNSERIALIZE_SCALAR(tsoPktPayloadBytes);
2016  UNSERIALIZE_SCALAR(tsoLoadedHeader);
2017  UNSERIALIZE_SCALAR(tsoPktHasHeader);
2018  UNSERIALIZE_ARRAY(tsoHeader, 256);
2019  UNSERIALIZE_SCALAR(tsoDescBytesUsed);
2020  UNSERIALIZE_SCALAR(tsoCopyBytes);
2021  UNSERIALIZE_SCALAR(tsoPkts);
2022 
2023  UNSERIALIZE_SCALAR(completionAddress);
2024  UNSERIALIZE_SCALAR(completionEnabled);
2025  UNSERIALIZE_SCALAR(descEnd);
2026 }
2027 
2028 bool
2030 {
2031  if (pktDone) {
2032  pktDone = false;
2033  return true;
2034  }
2035  return false;
2036 }
2037 
2038 void
2040 {
2041  if (igbe->drainState() != DrainState::Draining) {
2042  igbe->txTick = true;
2043  igbe->restartClock();
2044  }
2045 }
2046 
2047 bool
2049 {
2050  return pktEvent.scheduled() || wbEvent.scheduled() ||
2051  fetchEvent.scheduled();
2052 }
2053 
2054 
2056 
2057 void
2059 {
2060  if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
2063 }
2064 
2065 DrainState
2067 {
2068  unsigned int count(0);
2071  count++;
2072  }
2073 
2074  txFifoTick = false;
2075  txTick = false;
2076  rxTick = false;
2077 
2078  if (tickEvent.scheduled())
2080 
2081  if (count) {
2082  DPRINTF(Drain, "IGbE not drained\n");
2083  return DrainState::Draining;
2084  } else
2085  return DrainState::Drained;
2086 }
2087 
2088 void
2090 {
2092 
2093  txFifoTick = true;
2094  txTick = true;
2095  rxTick = true;
2096 
2097  restartClock();
2098  DPRINTF(EthernetSM, "resuming from drain");
2099 }
2100 
2101 void
2103 {
2105  return;
2106 
2107  txFifoTick = false;
2108  txTick = false;
2109  rxTick = false;
2112  DPRINTF(Drain, "IGbE done draining, processing drain event\n");
2113  signalDrainDone();
2114  }
2115 }
2116 
2117 void
2119 {
2120  if (!regs.tctl.en()) {
2121  txTick = false;
2122  DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
2123  return;
2124  }
2125 
2126  // If we have a packet available and it's length is not 0 (meaning it's not
2127  // a multidescriptor packet) put it in the fifo, otherwise an the next
2128  // iteration we'll get the rest of the data
2130  && !txDescCache.packetMultiDesc() && txPacket->length) {
2131  anQ("TXS", "TX FIFO Q");
2132  DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
2133 #ifndef NDEBUG
2134  bool success =
2135 #endif
2136  txFifo.push(txPacket);
2138  assert(success);
2139  txPacket = NULL;
2140  anBegin("TXS", "Desc Writeback");
2142  return;
2143  }
2144 
2145  // Only support descriptor granularity
2146  if (regs.txdctl.lwthresh() &&
2147  txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
2148  DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
2150  }
2151 
2152  if (!txPacket) {
2153  txPacket = std::make_shared<EthPacketData>(16384);
2154  }
2155 
2156  if (!txDescCache.packetWaiting()) {
2157  if (txDescCache.descLeft() == 0) {
2159  anBegin("TXS", "Desc Writeback");
2161  anBegin("TXS", "Desc Fetch");
2164  DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
2165  "writeback stopping ticking and posting TXQE\n");
2166  txTick = false;
2167  return;
2168  }
2169 
2170 
2171  if (!(txDescCache.descUnused())) {
2172  anBegin("TXS", "Desc Fetch");
2175  DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
2176  "fetching and stopping ticking\n");
2177  txTick = false;
2178  return;
2179  }
2181 
2182 
2184  if (txDescCache.packetWaiting()) {
2185  DPRINTF(EthernetSM,
2186  "TXS: Fetching TSO header, stopping ticking\n");
2187  txTick = false;
2188  return;
2189  }
2190 
2191  unsigned size = txDescCache.getPacketSize(txPacket);
2192  if (size > 0 && txFifo.avail() > size) {
2193  anRq("TXS", "TX FIFO Q");
2194  anBegin("TXS", "DMA Packet");
2195  DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
2196  "beginning DMA of next packet\n", size);
2197  txFifo.reserve(size);
2199  } else if (size == 0) {
2200  DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
2201  DPRINTF(EthernetSM,
2202  "TXS: No packets to get, writing back used descriptors\n");
2203  anBegin("TXS", "Desc Writeback");
2205  } else {
2206  anWf("TXS", "TX FIFO Q");
2207  DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
2208  "available in FIFO\n");
2209  txTick = false;
2210  }
2211 
2212 
2213  return;
2214  }
2215  DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
2216  txTick = false;
2217 }
2218 
2219 bool
2221 {
2222  rxBytes += pkt->length;
2223  rxPackets++;
2224 
2225  DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
2226  anBegin("RXQ", "Wire Recv");
2227 
2228 
2229  if (!regs.rctl.en()) {
2230  DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
2231  anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2232  return true;
2233  }
2234 
2235  // restart the state machines if they are stopped
2236  rxTick = true && drainState() != DrainState::Draining;
2237  if ((rxTick || txTick) && !tickEvent.scheduled()) {
2238  DPRINTF(EthernetSM,
2239  "RXS: received packet into fifo, starting ticking\n");
2240  restartClock();
2241  }
2242 
2243  if (!rxFifo.push(pkt)) {
2244  DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
2245  postInterrupt(IT_RXO, true);
2246  anBegin("RXQ", "FIFO Drop", CPA::FL_BAD);
2247  return false;
2248  }
2249 
2250  if (CPA::available() && cpa->enabled()) {
2251  assert(sys->numSystemsRunning <= 2);
2252  System *other_sys;
2253  if (sys->systemList[0] == sys)
2254  other_sys = sys->systemList[1];
2255  else
2256  other_sys = sys->systemList[0];
2257 
2258  cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2259  anQ("RXQ", "RX FIFO Q");
2260  cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
2261  }
2262 
2263  return true;
2264 }
2265 
2266 
2267 void
2269 {
2270  if (!regs.rctl.en()) {
2271  rxTick = false;
2272  DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
2273  return;
2274  }
2275 
2276  // If the packet is done check for interrupts/descriptors/etc
2277  if (rxDescCache.packetDone()) {
2278  rxDmaPacket = false;
2279  DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
2280  int descLeft = rxDescCache.descLeft();
2281  DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
2282  descLeft, regs.rctl.rdmts(), regs.rdlen());
2283  switch (regs.rctl.rdmts()) {
2284  case 2: if (descLeft > .125 * regs.rdlen()) break;
2285  case 1: if (descLeft > .250 * regs.rdlen()) break;
2286  case 0: if (descLeft > .500 * regs.rdlen()) break;
2287  DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
2288  "because of descriptors left\n");
2290  break;
2291  }
2292 
2293  if (rxFifo.empty())
2295 
2296  if (descLeft == 0) {
2297  anBegin("RXS", "Writeback Descriptors");
2299  DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
2300  " writeback and stopping ticking\n");
2301  rxTick = false;
2302  }
2303 
2304  // only support descriptor granulaties
2305  assert(regs.rxdctl.gran());
2306 
2307  if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
2308  DPRINTF(EthernetSM,
2309  "RXS: Writing back because WTHRESH >= descUsed\n");
2310  anBegin("RXS", "Writeback Descriptors");
2311  if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
2312  rxDescCache.writeback(regs.rxdctl.wthresh()-1);
2313  else
2315  }
2316 
2317  if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
2319  regs.rxdctl.hthresh())) {
2320  DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
2321  "descUnused < PTHRESH\n");
2322  anBegin("RXS", "Fetch Descriptors");
2324  }
2325 
2326  if (rxDescCache.descUnused() == 0) {
2327  anBegin("RXS", "Fetch Descriptors");
2330  DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2331  "fetching descriptors and stopping ticking\n");
2332  rxTick = false;
2333  }
2334  return;
2335  }
2336 
2337  if (rxDmaPacket) {
2338  DPRINTF(EthernetSM,
2339  "RXS: stopping ticking until packet DMA completes\n");
2340  rxTick = false;
2341  return;
2342  }
2343 
2344  if (!rxDescCache.descUnused()) {
2345  anBegin("RXS", "Fetch Descriptors");
2348  DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
2349  "stopping ticking\n");
2350  rxTick = false;
2351  DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
2352  return;
2353  }
2355 
2356  if (rxFifo.empty()) {
2357  anWe("RXS", "RX FIFO Q");
2358  DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
2359  rxTick = false;
2360  return;
2361  }
2362  anPq("RXS", "RX FIFO Q");
2363  anBegin("RXS", "Get Desc");
2364 
2365  EthPacketPtr pkt;
2366  pkt = rxFifo.front();
2367 
2368 
2370  DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
2371  if (pktOffset == pkt->length) {
2372  anBegin( "RXS", "FIFO Dequeue");
2373  DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
2374  pktOffset = 0;
2375  anDq("RXS", "RX FIFO Q");
2376  rxFifo.pop();
2377  }
2378 
2379  DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
2380  rxTick = false;
2381  rxDmaPacket = true;
2382  anBegin("RXS", "DMA Packet");
2383 }
2384 
2385 void
2387 {
2388  if (txFifo.empty()) {
2389  anWe("TXQ", "TX FIFO Q");
2390  txFifoTick = false;
2391  return;
2392  }
2393 
2394 
2395  anPq("TXQ", "TX FIFO Q");
2396  if (etherInt->sendPacket(txFifo.front())) {
2397  anQ("TXQ", "WireQ");
2398  if (DTRACE(EthernetSM)) {
2399  IpPtr ip(txFifo.front());
2400  if (ip)
2401  DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
2402  ip->id());
2403  else
2404  DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
2405  }
2406  anDq("TXQ", "TX FIFO Q");
2407  anBegin("TXQ", "Wire Send");
2408  DPRINTF(EthernetSM,
2409  "TxFIFO: Successful transmit, bytes available in fifo: %d\n",
2410  txFifo.avail());
2411 
2412  txBytes += txFifo.front()->length;
2413  txPackets++;
2414  txFifoTick = false;
2415 
2416  txFifo.pop();
2417  } else {
2418  // We'll get woken up when the packet ethTxDone() gets called
2419  txFifoTick = false;
2420  }
2421 }
2422 
2423 void
2425 {
2426  DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
2427 
2428  if (rxTick)
2429  rxStateMachine();
2430 
2431  if (txTick)
2432  txStateMachine();
2433 
2434  if (txFifoTick)
2435  txWire();
2436 
2437 
2438  if (rxTick || txTick || txFifoTick)
2440 }
2441 
2442 void
2444 {
2445  anBegin("TXQ", "Send Done");
2446  // restart the tx state machines if they are stopped
2447  // fifo to send another packet
2448  // tx sm to put more data into the fifo
2451  txTick = true;
2452 
2453  restartClock();
2454  txWire();
2455  DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
2456 }
2457 
2458 void
2460 {
2462 
2463  regs.serialize(cp);
2471 
2472  rxFifo.serialize("rxfifo", cp);
2473  txFifo.serialize("txfifo", cp);
2474 
2475  bool txPktExists = txPacket != nullptr;
2476  SERIALIZE_SCALAR(txPktExists);
2477  if (txPktExists)
2478  txPacket->serialize("txpacket", cp);
2479 
2480  Tick rdtr_time = 0, radv_time = 0, tidv_time = 0, tadv_time = 0,
2481  inter_time = 0;
2482 
2483  if (rdtrEvent.scheduled())
2484  rdtr_time = rdtrEvent.when();
2485  SERIALIZE_SCALAR(rdtr_time);
2486 
2487  if (radvEvent.scheduled())
2488  radv_time = radvEvent.when();
2489  SERIALIZE_SCALAR(radv_time);
2490 
2491  if (tidvEvent.scheduled())
2492  tidv_time = tidvEvent.when();
2493  SERIALIZE_SCALAR(tidv_time);
2494 
2495  if (tadvEvent.scheduled())
2496  tadv_time = tadvEvent.when();
2497  SERIALIZE_SCALAR(tadv_time);
2498 
2499  if (interEvent.scheduled())
2500  inter_time = interEvent.when();
2501  SERIALIZE_SCALAR(inter_time);
2502 
2504 
2505  txDescCache.serializeSection(cp, "TxDescCache");
2506  rxDescCache.serializeSection(cp, "RxDescCache");
2507 }
2508 
2509 void
2511 {
2513 
2514  regs.unserialize(cp);
2522 
2523  rxFifo.unserialize("rxfifo", cp);
2524  txFifo.unserialize("txfifo", cp);
2525 
2526  bool txPktExists;
2527  UNSERIALIZE_SCALAR(txPktExists);
2528  if (txPktExists) {
2529  txPacket = std::make_shared<EthPacketData>(16384);
2530  txPacket->unserialize("txpacket", cp);
2531  }
2532 
2533  rxTick = true;
2534  txTick = true;
2535  txFifoTick = true;
2536 
2537  Tick rdtr_time, radv_time, tidv_time, tadv_time, inter_time;
2538  UNSERIALIZE_SCALAR(rdtr_time);
2539  UNSERIALIZE_SCALAR(radv_time);
2540  UNSERIALIZE_SCALAR(tidv_time);
2541  UNSERIALIZE_SCALAR(tadv_time);
2542  UNSERIALIZE_SCALAR(inter_time);
2543 
2544  if (rdtr_time)
2545  schedule(rdtrEvent, rdtr_time);
2546 
2547  if (radv_time)
2548  schedule(radvEvent, radv_time);
2549 
2550  if (tidv_time)
2551  schedule(tidvEvent, tidv_time);
2552 
2553  if (tadv_time)
2554  schedule(tadvEvent, tadv_time);
2555 
2556  if (inter_time)
2557  schedule(interEvent, inter_time);
2558 
2560 
2561  txDescCache.unserializeSection(cp, "TxDescCache");
2562  rxDescCache.unserializeSection(cp, "RxDescCache");
2563 }
2564 
2565 IGbE *
2566 IGbEParams::create()
2567 {
2568  return new IGbE(this);
2569 }
count
Definition: misc.hh:704
const uint32_t REG_VFTA
const uint32_t REG_IMC
const uint8_t MULTICAST_TABLE_SIZE
#define DPRINTF(x,...)
Definition: trace.hh:212
void anQ(std::string sm, std::string q)
Definition: i8254xGBe.hh:186
EthPacketPtr front()
Definition: pktfifo.hh:120
void hwDq(flags f, System *sys, uint64_t frame, std::string sm, std::string q, uint64_t qid, System *q_sys=NULL, int32_t count=1)
Definition: cp_annotate.hh:109
void processContextDesc()
Definition: i8254xGBe.cc:1564
const uint16_t RXDEE_TCPE
Addr getLen(TxDesc *d)
void set(T v, ByteOrder endian)
Set the value in the data pointer to v using the specified endianness.
TxDescCache txDescCache
Definition: i8254xGBe.hh:513
const uint32_t REG_TADV
void pktComplete()
Called by event when dma to write packet is completed.
Definition: i8254xGBe.cc:1336
const uint32_t REG_CRCERRS
unsigned descUsed() const
Definition: i8254xGBe.hh:322
IGbEInt * etherInt
Definition: i8254xGBe.hh:59
const uint32_t REG_FCAL
const uint8_t RCV_ADDRESS_TABLE_SIZE
cbk_rst func reset
Definition: gpu_nomali.cc:101
unsigned pktOffset
Definition: i8254xGBe.hh:85
const uint32_t REG_TDT
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
void unserialize(CheckpointIn &cp) override
Reconstruct the state of this object from a checkpoint.
Definition: device.cc:490
static bool available()
Definition: cp_annotate.hh:85
uint64_t macAddr
Definition: i8254xGBe.hh:141
void txStateMachine()
Definition: i8254xGBe.cc:2118
const uint32_t REG_ICR
struct iGbReg::RxDesc::@61::@63 legacy
const std::string & name()
Definition: trace.cc:49
void unserialize(const std::string &base, CheckpointIn &cp)
Definition: pktfifo.cc:101
const uint8_t PHY_GSTATUS
Bitfield< 7 > i
Definition: miscregs.hh:1378
void setDd(TxDesc *d)
uint16_t sum() const
Definition: inet.hh:264
uint32_t sw_fw_sync
DrainState
Object drain/handover states.
Definition: drain.hh:71
#define panic(...)
Definition: misc.hh:153
Running normally.
Addr getBuf(TxDesc *d)
void chkInterrupt()
Check and see if changes to the mask register have caused an interrupt to need to be sent or perhaps ...
Definition: i8254xGBe.cc:791
int hdrlen(TxDesc *d)
const uint8_t PHY_EPSTATUS
const uint32_t REG_RXCSUM
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: i8254xGBe.cc:1124
uint16_t cksum(const IpPtr &ptr)
Definition: inet.cc:207
bool tse(TxDesc *d)
#define PCI_CONFIG_SIZE
Definition: pcireg.h:152
bool empty() const
Definition: pktfifo.hh:103
void unserializeSection(CheckpointIn &cp, const char *name)
Unserialize an a child object.
Definition: serialize.cc:585
void serialize(CheckpointOut &cp) const override
Serialize an object.
TxDescCache(IGbE *i, std::string n, int s)
Definition: i8254xGBe.cc:1545
int hsplit(const EthPacketPtr &ptr)
Definition: inet.cc:379
const uint8_t PHY_PID
const uint32_t REG_SRRCTL
void enableSm() override
Definition: i8254xGBe.cc:1497
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:381
const uint8_t EEPROM_READ_OPCODE_SPI
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: i8254xGBe.cc:1159
EtherInt * getPeer()
Definition: etherint.hh:63
const uint16_t RXDS_DD
const uint32_t REG_WUC
const uint32_t REG_CTRL_EXT
bool isType(TxDesc *d, uint8_t type)
Definition: system.hh:83
void restartClock()
This function is used to restart the clock so it can handle things like draining and resume in one pl...
Definition: i8254xGBe.cc:2058
Bitfield< 23, 0 > offset
Definition: types.hh:149
const uint32_t REG_FCT
const uint32_t REG_TIPG
int eeOpBits
Definition: i8254xGBe.hh:66
const uint32_t REG_VET
void hwWe(flags f, System *sys, uint64_t frame, std::string sm, std::string q, uint64_t qid, System *q_sys=NULL, int32_t count=1)
Definition: cp_annotate.hh:121
DescCache(IGbE *i, const std::string n, int s)
Definition: i8254xGBe.cc:825
void rxStateMachine()
Definition: i8254xGBe.cc:2268
bool packetDone()
Check if the dma on the packet has completed and RX state machine can continue.
Definition: i8254xGBe.cc:1506
void writeback(Addr aMask)
Definition: i8254xGBe.cc:855
const uint32_t REG_RDT
Stats::Scalar txBytes
Definition: etherdevice.hh:73
const uint32_t REG_RFCTL
Addr pciToDma(Addr pci_addr) const
Definition: device.hh:189
bool vle(TxDesc *d)
const uint8_t RXDT_ADV_SPLIT_A
unsigned int size_type
Definition: types.hh:56
void serialize(const std::string &base, CheckpointOut &cp) const
Serialization stuff.
Definition: pktfifo.cc:88
const uint32_t REG_RLPML
std::string annSmFetch
Annotate sm.
Definition: i8254xGBe.hh:270
void deschedule(Event &event)
Definition: eventq.hh:734
int mss(TxDesc *d)
T get(ByteOrder endian) const
Get the data in the packet byte swapped from the specified endianness.
Bitfield< 63 > val
Definition: misc.hh:770
PacketFifo rxFifo
Definition: i8254xGBe.hh:71
unsigned getPacketSize(EthPacketPtr p)
Tell the cache to DMA a packet from main memory into its buffer and return the size the of the packet...
Definition: i8254xGBe.cc:1666
Bitfield< 31 > n
Definition: miscregs.hh:1636
void wbComplete()
Called by event when dma to writeback descriptors is completed.
Definition: i8254xGBe.cc:1056
const uint8_t RXDE_TCPE
uint16_t id() const
Definition: inet.hh:259
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: dma_device.cc:123
const uint32_t STATS_REGS_SIZE
void anRq(std::string sm, std::string q, int num=1)
Definition: i8254xGBe.hh:201
#define warn(...)
Definition: misc.hh:219
Bitfield< 5, 0 > status
Definition: miscregs.hh:1604
void delayIntEvent()
Send an interrupt to the cpu.
Definition: i8254xGBe.cc:728
const uint32_t REG_RDBAL
const uint32_t REG_RXDCTL
const uint32_t REG_AIFS
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: i8254xGBe.cc:1971
Stats::Scalar rxPackets
Definition: etherdevice.hh:76
const uint32_t REG_TDBAH
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:145
EtherInt * getEthPort(const std::string &if_name, int idx) override
Additional function to return the Port of a memory object.
Definition: i8254xGBe.cc:139
void drainResume() override
Resume execution after a successful drain.
Definition: i8254xGBe.cc:2089
void anWf(std::string sm, std::string q)
Definition: i8254xGBe.hh:211
const uint32_t REG_ICS
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
T htole(T value)
Definition: byteswap.hh:151
uint8_t eeAddr
Definition: i8254xGBe.hh:67
uint8_t eeOpcode
Definition: i8254xGBe.hh:67
Tick curTick()
The current simulated tick.
Definition: core.hh:47
void postInterrupt(iGbReg::IntTypes t, bool now=false)
Write an interrupt into the interrupt pending register and check mask and interrupt limit timer befor...
Definition: i8254xGBe.cc:695
void anWe(std::string sm, std::string q)
Definition: i8254xGBe.hh:206
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
const uint16_t RXDS_UDPCS
const uint32_t REG_LEDCTL
#define DTRACE(x)
Definition: trace.hh:210
Bitfield< 4 > s
Definition: miscregs.hh:1738
const uint32_t REG_STATUS
static CPA * cpa()
Definition: cp_annotate.hh:84
const uint32_t REG_WUFC
unsigned avail() const
Definition: pktfifo.hh:102
const uint16_t RXDEE_IPE
bool tcp(TxDesc *d)
EventWrapper< IGbE,&IGbE::delayIntEvent > interEvent
Definition: i8254xGBe.hh:164
TXDCA_CTL txdca_ctl
Tick when() const
Get the time that the event is scheduled.
Definition: eventq.hh:397
void makeAtomicResponse()
Definition: packet.hh:857
DmaDeviceParams Params
Definition: dma_device.hh:160
uint64_t Tick
Tick count type.
Definition: types.hh:63
const uint8_t RXDT_LEGACY
bool packetWaiting()
Ask if we are still waiting for the packet to be transfered.
Definition: i8254xGBe.hh:473
const uint8_t EEPROM_SIZE
Definition: ns_gige.hh:56
bool txFifoTick
Definition: i8254xGBe.hh:80
CTRL_EXT ctrl_ext
virtual Tick writeConfig(PacketPtr pkt)
Write to the PCI config space data that is stored locally.
Definition: device.cc:288
void writeback1()
Definition: i8254xGBe.cc:909
uint8_t flags() const
Definition: inet.hh:517
const uint32_t REG_MANC
bool txTick
Definition: i8254xGBe.hh:79
void checkDrain()
Check if all the draining things that need to occur have occured and handle the drain event if so...
Definition: i8254xGBe.cc:2102
void serializeSection(CheckpointOut &cp, const char *name) const
Serialize an object into a new section.
Definition: serialize.cc:578
uint16_t len() const
Definition: inet.hh:258
const uint32_t REG_CTRL
const uint32_t REG_TDH
void anBegin(std::string sm, std::string st, int flags=CPA::FL_NONE)
Definition: i8254xGBe.hh:181
unsigned int cacheBlockSize() const
Definition: dma_device.hh:180
#define SERIALIZE_ARRAY(member, size)
Definition: serialize.hh:158
PacketFifo txFifo
Definition: i8254xGBe.hh:72
const uint32_t REG_TDWBAL
const uint32_t REG_SWFWSYNC
std::shared_ptr< EthPacketData > EthPacketPtr
Definition: etherpkt.hh:90
const uint32_t REG_TDLEN
uint16_t sum() const
Definition: inet.hh:619
void pktComplete()
Called by event when dma to write packet is completed.
Definition: i8254xGBe.cc:1754
bool isData(TxDesc *d)
~IGbE()
Definition: i8254xGBe.cc:126
int getBAR(Addr addr)
Which base address register (if any) maps the given address?
Definition: device.hh:134
const uint32_t REG_IMS
#define PCI_DEVICE_SPECIFIC
Definition: pcireg.h:151
unsigned reserve(unsigned len=0)
Definition: pktfifo.hh:107
Tick pioDelay
Definition: device.hh:185
const uint32_t REG_TCTL
const uint32_t REG_RDBAH
void unserialize(CheckpointIn &cp) override
Unserialize an object.
void arrayParamOut(CheckpointOut &cp, const std::string &name, const CircleBuf< T > &param)
Definition: circlebuf.hh:251
EventWrapper< IGbE,&IGbE::tick > tickEvent
Definition: i8254xGBe.hh:138
bool isContext(TxDesc *d)
const uint16_t EEPROM_CSUM
Tick configDelay
Definition: device.hh:186
const uint32_t REG_FWSM
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
Draining buffers pending serialization/handover.
bool enabled()
Definition: cp_annotate.hh:86
System * sys
Definition: io_device.hh:87
const uint32_t REG_RADV
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:245
void areaChanged()
If the address/len/head change when we've got descriptors that are dirty that is very bad...
Definition: i8254xGBe.cc:845
bool isLegacy(TxDesc *d)
void tick()
Definition: i8254xGBe.cc:2424
iGbReg::Regs regs
Definition: i8254xGBe.hh:63
bool rxDmaPacket
Definition: i8254xGBe.hh:82
RxDescCache(IGbE *i, std::string n, int s)
Definition: i8254xGBe.cc:1198
void txWire()
Definition: i8254xGBe.cc:2386
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: i8254xGBe.cc:1525
DrainState drain() override
Notify an object that it needs to drain its state.
Definition: i8254xGBe.cc:2066
const uint32_t REG_EECD
Bitfield< 21 > writeback
Definition: types.hh:131
bool ethRxPkt(EthPacketPtr packet)
Definition: i8254xGBe.cc:2220
Tick write(PacketPtr pkt) override
Pure virtual function that the device must implement.
Definition: i8254xGBe.cc:358
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: i8254xGBe.cc:132
const uint32_t REG_EERD
Stats::Scalar rxBytes
Definition: etherdevice.hh:74
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:143
const uint8_t PHY_PSTATUS
const uint32_t REG_FCAH
RxDescCache rxDescCache
Definition: i8254xGBe.hh:401
const uint32_t REG_RDLEN
IGbE(const Params *params)
Definition: i8254xGBe.cc:60
const uint32_t REG_IVAR0
const uint8_t EEPROM_RDSR_OPCODE_SPI
#define UNSERIALIZE_ARRAY(member, size)
Definition: serialize.hh:161
const uint32_t REG_TDBAL
const uint16_t RXDS_EOP
bool sendPacket(EthPacketPtr packet)
Definition: etherint.hh:68
bool hasOutstandingEvents() override
Definition: i8254xGBe.cc:1516
const uint32_t REG_MDIC
void actionAfterWb() override
Definition: i8254xGBe.cc:1955
Tick clockPeriod() const
const uint32_t REG_RCTL
bool ixsm(TxDesc *d)
void completionWriteback(Addr a, bool enabled)
Definition: i8254xGBe.hh:492
CPA * cpa
Definition: i8254xGBe.hh:60
int size()
Definition: pagetable.hh:146
virtual const std::string name() const
Definition: sim_object.hh:117
const uint32_t REG_FCRTL
unsigned descUnused() const
Definition: i8254xGBe.hh:325
struct iGbReg::RxDesc::@61::@64 adv_read
Declaration of the Packet class.
unsigned descLeft() const
Definition: i8254xGBe.hh:309
T htobe(T value)
Definition: byteswap.hh:153
std::ostream CheckpointOut
Definition: serialize.hh:67
static int numSystemsRunning
Definition: system.hh:551
bool txsm(TxDesc *d)
const uint32_t REG_TIDV
void anDq(std::string sm, std::string q)
Definition: i8254xGBe.hh:191
const uint32_t REG_EICR
virtual void drainResume()
Resume execution after a successful drain.
Definition: drain.hh:257
const uint32_t REG_WUS
int writePacket(EthPacketPtr packet, int pkt_offset)
Write the given packet into the buffer(s) pointed to by the descriptor and update the book keeping...
Definition: i8254xGBe.cc:1228
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: i8254xGBe.cc:2510
const uint32_t REG_ITR
EventWrapper< IGbE,&IGbE::tidvProcess > tidvEvent
Definition: i8254xGBe.hh:133
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:267
const Params * params() const
Definition: i8254xGBe.hh:518
EventWrapper< IGbE,&IGbE::rdtrProcess > rdtrEvent
Definition: i8254xGBe.hh:101
void anPq(std::string sm, std::string q, int num=1)
Definition: i8254xGBe.hh:196
void schedule(Event &event, Tick when)
Definition: eventq.hh:728
const uint16_t RXDP_IPV4
int getTsoLen(TxDesc *d)
const uint16_t RXDS_IPCS
const uint32_t REG_FCRTH
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:282
const uint32_t REG_MTA
Tick ns
nanosecond
Definition: core.cc:66
const uint8_t PHY_AGC
EthPacketPtr txPacket
Definition: i8254xGBe.hh:75
const uint16_t RXDS_TCPCS
const uint32_t REG_FCTTV
const uint8_t TXD_CNXT
void arrayParamIn(CheckpointIn &cp, const std::string &name, CircleBuf< T > &param)
Definition: circlebuf.hh:261
const uint8_t EEPROM_SIZE
void ethTxDone()
Definition: i8254xGBe.cc:2443
void fetchDescriptors()
Fetch a chunk of descriptors into the descriptor cache.
Definition: i8254xGBe.cc:938
const uint8_t RXDE_IPE
const uint16_t RXDP_TCP
void fetchComplete()
Called by event when dma to read descriptors is completed.
Definition: i8254xGBe.cc:1012
int eeAddrBits
Definition: i8254xGBe.hh:66
Bitfield< 3, 0 > mask
Definition: types.hh:64
const uint32_t REG_TXDCA_CTL
uint8_t length
Definition: inet.hh:334
const uint32_t REG_RDH
Tick lastInterrupt
Definition: i8254xGBe.hh:528
#define IN_RANGE(val, base, len)
Definition: i8254xGBe.cc:167
void cpuPostInt()
Definition: i8254xGBe.cc:735
The base EtherObject class, allows for an accesor function to a simobj that returns the Port...
Definition: etherdevice.hh:51
bool ip(TxDesc *d)
uint16_t sum() const
Definition: inet.hh:519
Bitfield< 5 > t
Definition: miscregs.hh:1382
unsigned getSize() const
Definition: packet.hh:649
const uint8_t VLAN_FILTER_TABLE_SIZE
Bitfield< 15 > ide
Definition: miscregs.hh:1625
uint16_t len() const
Definition: inet.hh:618
EventWrapper< IGbE,&IGbE::tadvProcess > tadvEvent
Definition: i8254xGBe.hh:123
void enableSm() override
Definition: i8254xGBe.cc:2039
bool packetMultiDesc()
Ask if this packet is composed of multiple descriptors so even if we've got data, we need to wait for...
Definition: i8254xGBe.hh:481
const uint32_t REG_SWSM
T mbits(T val, int first, int last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:91
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
Definition: bitfield.hh:67
void pop()
Definition: pktfifo.hh:139
void cpuClearInt()
Clear the interupt line to the cpu.
Definition: i8254xGBe.cc:779
const uint32_t REG_RAL
void clear()
Definition: pktfifo.hh:151
const uint32_t REG_RDTR
void intrPost()
Definition: device.hh:193
uint16_t flash[iGbReg::EEPROM_SIZE]
Definition: i8254xGBe.hh:68
bool push(EthPacketPtr ptr)
Definition: pktfifo.hh:122
Stats::Scalar postedInterrupts
Definition: etherdevice.hh:120
const uint16_t RXDP_UDP
Bitfield< 0 > p
Bitfield< 9, 8 > rs
Definition: miscregs.hh:1560
bool packetAvailable()
Ask if the packet has been transfered so the state machine can give it to the fifo.
Definition: i8254xGBe.cc:2029
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: i8254xGBe.cc:1534
Bitfield< 1 > x
Definition: types.hh:105
uint32_t seq() const
Definition: inet.hh:514
bool eop(TxDesc *d)
const uint32_t REG_IAM
int eeDataBits
Definition: i8254xGBe.hh:66
void getPacketData(EthPacketPtr p)
Definition: i8254xGBe.cc:1706
void intrClear()
Definition: device.hh:194
EventWrapper< IGbE,&IGbE::radvProcess > radvEvent
Definition: i8254xGBe.hh:112
const uint8_t TXD_ADVDATA
const uint32_t REG_TXDCTL
virtual ~DescCache()
Definition: i8254xGBe.cc:836
Tick read(PacketPtr pkt) override
Pure virtual function that the device must implement.
Definition: i8254xGBe.cc:170
bool hasOutstandingEvents() override
Definition: i8254xGBe.cc:2048
const uint8_t PHY_EPID
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: i8254xGBe.cc:2459
void serialize(CheckpointOut &cp) const override
Serialize this object to the given output stream.
Definition: device.cc:425
const uint32_t REG_PBA
struct iGbReg::RxDesc::@61::@65 adv_wb
bool rxTick
Definition: i8254xGBe.hh:78
void fetchDescriptors1()
Definition: i8254xGBe.cc:990
Addr getAddr() const
Definition: packet.hh:639
Stats::Scalar txPackets
Definition: etherdevice.hh:75
const uint8_t RXDT_ADV_ONEBUF
const uint32_t REG_TDWBAH
Tick writeConfig(PacketPtr pkt) override
Write to the PCI config space data that is stored locally.
Definition: i8254xGBe.cc:151
static std::vector< System * > systemList
Definition: system.hh:550
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: i8254xGBe.cc:2000

Generated on Fri Jun 9 2017 13:03:46 for gem5 by doxygen 1.8.6