o Sort includes and add <endian.h> to support endianness.

o Removed unneeded header files.
 o bus_dma(9) fix:
   - created parent tag with 1GB dma address limit with no
     alignment restrictions.
   - set 4096 alignment limit for Tx/Rx descriptor rings.
   - separate Rx buffer tag from Tx buffer tag such that Tx tag
     allows up-to 16 segments while Rx buffer tag only allows
     single segment.
   - it seems the controller has no alignment restrictions on Tx/Rx
     buffers. Remove ETHER_ALIGN alignment restriction in Tx/Rx
     buffers.
   - created a spare Rx dma map which would be used to cope with
     failure of loading a dma map.
   - make sure to load full Tx/Rx descriptor size for Tx/Rx
     descriptor dma maps, previously bfe(4) used to load single
     descriptor size for each descriptor rings. I have no idea how
     it could be run without problems.
   - don't blindly cast bus_addr_t type to 32bits in bfe_dma_map().
   - created bfe_dma_free() to free allocated dma memory/tags.
   - make sure to invoke bus_dmamap_sync(9) before/after processing
     descriptor rings/buffers. Because the hardware has severe dma
     address space limitation, bounce-buffers would be always used
     on systems with more than 1GB memory during
     descriptors/buffers access.
   - added Tx descriptor ring initialization function,
     bfe_list_tx_init().
   - moved producer/consumer index initialization to
     bfe_list_tx_init() and bfe_list_rx_init() from
     bfe_chip_reset().
   - added bfe_discard_buf() which will update loaded descriptors
     without unloading/reloading the dma map to speed up error
     recovery.
   - implemented Tx side bus_dmamap_load_mbuf_sg(9). The number of
     segments allowed was chosen to be 16 which should be enough for
     non-TSO capable hardwares. Setting SOF bit of Tx descriptor is
     done in the last to avoid potential race.
   - don't give up sending frames in bfe_start() until the hardware
     lacks free descriptors.
   - added XXX comment to second kick command and possible workaround.
   - implemented Rx side bus_dmamap_load_mbuf_sg(9).
   - removed bfe_dma_map_desc() as it's not needed anymore after
     the conversion to bus_dmamap_load_mbuf_sg(9).
   - added endianness support. With this change bfe(4) should work
     on any architectures that can create bounce buffers within 1GB
     address range.
   - add missing bus_dmamap_sync() in bfe_tx_eof()/bfe_rx_eof().
 o Use PCI_BAR instead of hardcoded value to set BARs.
   Simplified register access with bus_write_4(9)/bus_read_4(9) and
   removed bfe_btag, bfe_bhandle, bfe_vhandle in softc as it's not
   used anymore.
 o Reorder device detach logic such that bfe_detach() is also used
   for handling driver attach failure case.
 o Remove unnecessary KASSERT in bfe_detach().
 o Remove bfe_rx_cnt, bfe_up, bfe_vpd_prodname, bfe_vpd_readonly in
   softc.  It's not used at all.
 o Remove BFE_RX_RING_SIZE/BFE_RX_RING_SIZE/BFE_LINK_DOWN.

Tested by:	kib, Gleb Kurtsou gleb.kurtsou at gmail dot com
		Ulrich Spoerlein uspoerlein at gmail dot com
This commit is contained in:
Pyun YongHyeon 2008-08-21 04:21:53 +00:00
parent 4e683d7252
commit 96ee09c546
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=181953
2 changed files with 506 additions and 382 deletions

File diff suppressed because it is too large Load diff

View file

@ -428,9 +428,6 @@
#define PCI_CLRBIT(dev, reg, x, s) \
pci_write_config(dev, reg, (pci_read_config(dev, reg, s) & ~(x)), s)
#define BFE_RX_RING_SIZE 512
#define BFE_TX_RING_SIZE 512
#define BFE_LINK_DOWN 5
#define BFE_TX_LIST_CNT 128
#define BFE_RX_LIST_CNT 128
#define BFE_TX_LIST_SIZE BFE_TX_LIST_CNT * sizeof(struct bfe_desc)
@ -438,11 +435,15 @@
#define BFE_RX_OFFSET 30
#define BFE_TX_QLEN 256
#define CSR_READ_4(sc, reg) \
bus_space_read_4(sc->bfe_btag, sc->bfe_bhandle, reg)
#define BFE_RX_RING_ALIGN 4096
#define BFE_TX_RING_ALIGN 4096
#define BFE_MAXTXSEGS 16
#define BFE_DMA_MAXADDR 0x3FFFFFFF /* 1GB DMA address limit. */
#define BFE_ADDR_LO(x) ((uint64_t)(x) & 0xFFFFFFFF)
#define CSR_WRITE_4(sc, reg, val) \
bus_space_write_4(sc->bfe_btag, sc->bfe_bhandle, reg, val)
#define CSR_READ_4(sc, reg) bus_read_4(sc->bfe_res, reg)
#define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->bfe_res, reg, val)
#define BFE_OR(sc, name, val) \
CSR_WRITE_4(sc, name, CSR_READ_4(sc, name) | val)
@ -456,11 +457,17 @@
#define BFE_INC(x, y) (x) = ((x) == ((y)-1)) ? 0 : (x)+1
struct bfe_data {
struct bfe_tx_data {
struct mbuf *bfe_mbuf;
bus_dmamap_t bfe_map;
};
struct bfe_rx_data {
struct mbuf *bfe_mbuf;
bus_dmamap_t bfe_map;
u_int32_t bfe_ctrl;
};
struct bfe_desc {
u_int32_t bfe_ctrl;
u_int32_t bfe_addr;
@ -498,38 +505,34 @@ struct bfe_softc
struct ifnet *bfe_ifp; /* interface info */
device_t bfe_dev;
device_t bfe_miibus;
bus_space_handle_t bfe_bhandle;
vm_offset_t bfe_vhandle;
bus_space_tag_t bfe_btag;
bus_dma_tag_t bfe_tag;
bus_dma_tag_t bfe_parent_tag;
bus_dma_tag_t bfe_tx_tag, bfe_rx_tag;
bus_dmamap_t bfe_tx_map, bfe_rx_map;
bus_dma_tag_t bfe_txmbuf_tag, bfe_rxmbuf_tag;
bus_dmamap_t bfe_rx_sparemap;
void *bfe_intrhand;
struct resource *bfe_irq;
struct resource *bfe_res;
struct callout bfe_stat_co;
struct bfe_hw_stats bfe_hwstats;
struct bfe_desc *bfe_tx_list, *bfe_rx_list;
struct bfe_data bfe_tx_ring[BFE_TX_LIST_CNT]; /* XXX */
struct bfe_data bfe_rx_ring[BFE_RX_LIST_CNT]; /* XXX */
struct bfe_tx_data bfe_tx_ring[BFE_TX_LIST_CNT]; /* XXX */
struct bfe_rx_data bfe_rx_ring[BFE_RX_LIST_CNT]; /* XXX */
struct mtx bfe_mtx;
u_int32_t bfe_flags;
u_int32_t bfe_imask;
u_int32_t bfe_dma_offset;
u_int32_t bfe_tx_cnt, bfe_tx_cons, bfe_tx_prod;
u_int32_t bfe_rx_cnt, bfe_rx_prod, bfe_rx_cons;
u_int32_t bfe_rx_prod, bfe_rx_cons;
u_int32_t bfe_tx_dma, bfe_rx_dma;
u_int32_t bfe_link;
int bfe_watchdog_timer;
u_int8_t bfe_phyaddr; /* Address of the card's PHY */
u_int8_t bfe_mdc_port;
u_int8_t bfe_core_unit;
u_int8_t bfe_up;
u_char bfe_enaddr[6];
int bfe_if_flags;
char *bfe_vpd_prodname;
char *bfe_vpd_readonly;
};
struct bfe_type