staging: et131x: Converting et1310_tx.c function and local names from CamelCase

Also some minor code clean-ups.
Tested on an ET-131x device.

Signed-off-by: Mark Einon <mark.einon@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Mark Einon 2011-08-23 23:40:34 +01:00 committed by Greg Kroah-Hartman
parent 1c863c8bcb
commit 567486ac76
3 changed files with 132 additions and 140 deletions

View file

@ -88,12 +88,6 @@
#include "et1310_tx.h" #include "et1310_tx.h"
#include "et131x.h" #include "et131x.h"
static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
struct tcb *tcb);
static int et131x_send_packet(struct sk_buff *skb,
struct et131x_adapter *etdev);
static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
/** /**
* et131x_tx_dma_memory_alloc * et131x_tx_dma_memory_alloc
* @adapter: pointer to our private adapter structure * @adapter: pointer to our private adapter structure
@ -186,13 +180,13 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
} }
/** /**
* ConfigTxDmaRegs - Set up the tx dma section of the JAGCore. * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
* @etdev: pointer to our private adapter structure * @etdev: pointer to our private adapter structure
* *
* Configure the transmit engine with the ring buffers we have created * Configure the transmit engine with the ring buffers we have created
* and prepare it for use. * and prepare it for use.
*/ */
void ConfigTxDmaRegs(struct et131x_adapter *etdev) void et131x_config_tx_dma_regs(struct et131x_adapter *etdev)
{ {
struct txdma_regs __iomem *txdma = &etdev->regs->txdma; struct txdma_regs __iomem *txdma = &etdev->regs->txdma;
@ -276,131 +270,6 @@ void et131x_init_send(struct et131x_adapter *adapter)
tx_ring->send_tail = NULL; tx_ring->send_tail = NULL;
} }
/**
* et131x_send_packets - This function is called by the OS to send packets
* @skb: the packet(s) to send
* @netdev:device on which to TX the above packet(s)
*
* Return 0 in almost all cases; non-zero value in extreme hard failure only
*/
int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *etdev = NULL;
etdev = netdev_priv(netdev);
/* Send these packets
*
* NOTE: The Linux Tx entry point is only given one packet at a time
* to Tx, so the PacketCount and it's array used makes no sense here
*/
/* TCB is not available */
if (etdev->tx_ring.used >= NUM_TCB) {
/* NOTE: If there's an error on send, no need to queue the
* packet under Linux; if we just send an error up to the
* netif layer, it will resend the skb to us.
*/
status = -ENOMEM;
} else {
/* We need to see if the link is up; if it's not, make the
* netif layer think we're good and drop the packet
*/
if ((etdev->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
!netif_carrier_ok(netdev)) {
dev_kfree_skb_any(skb);
skb = NULL;
etdev->net_stats.tx_dropped++;
} else {
status = et131x_send_packet(skb, etdev);
if (status != 0 && status != -ENOMEM) {
/* On any other error, make netif think we're
* OK and drop the packet
*/
dev_kfree_skb_any(skb);
skb = NULL;
etdev->net_stats.tx_dropped++;
}
}
}
return status;
}
/**
* et131x_send_packet - Do the work to send a packet
* @skb: the packet(s) to send
* @etdev: a pointer to the device's private adapter structure
*
* Return 0 in almost all cases; non-zero value in extreme hard failure only.
*
* Assumption: Send spinlock has been acquired
*/
static int et131x_send_packet(struct sk_buff *skb,
struct et131x_adapter *etdev)
{
int status;
struct tcb *tcb = NULL;
u16 *shbufva;
unsigned long flags;
/* All packets must have at least a MAC address and a protocol type */
if (skb->len < ETH_HLEN)
return -EIO;
/* Get a TCB for this packet */
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
tcb = etdev->tx_ring.tcb_qhead;
if (tcb == NULL) {
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
return -ENOMEM;
}
etdev->tx_ring.tcb_qhead = tcb->next;
if (etdev->tx_ring.tcb_qhead == NULL)
etdev->tx_ring.tcb_qtail = NULL;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
tcb->skb = skb;
if (skb->data != NULL && skb->len - skb->data_len >= 6) {
shbufva = (u16 *) skb->data;
if ((shbufva[0] == 0xffff) &&
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
tcb->flags |= fMP_DEST_BROAD;
} else if ((shbufva[0] & 0x3) == 0x0001) {
tcb->flags |= fMP_DEST_MULTI;
}
}
tcb->next = NULL;
/* Call the NIC specific send handler. */
status = nic_send_packet(etdev, tcb);
if (status != 0) {
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
if (etdev->tx_ring.tcb_qtail)
etdev->tx_ring.tcb_qtail->next = tcb;
else
/* Apparently ready Q is empty. */
etdev->tx_ring.tcb_qhead = tcb;
etdev->tx_ring.tcb_qtail = tcb;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
return status;
}
WARN_ON(etdev->tx_ring.used > NUM_TCB);
return 0;
}
/** /**
* nic_send_packet - NIC specific send handler for version B silicon. * nic_send_packet - NIC specific send handler for version B silicon.
* @etdev: pointer to our adapter * @etdev: pointer to our adapter
@ -618,16 +487,139 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
return 0; return 0;
} }
/**
* send_packet - Do the work to send a packet
* @skb: the packet(s) to send
* @etdev: a pointer to the device's private adapter structure
*
* Return 0 in almost all cases; non-zero value in extreme hard failure only.
*
* Assumption: Send spinlock has been acquired
*/
static int send_packet(struct sk_buff *skb, struct et131x_adapter *etdev)
{
int status;
struct tcb *tcb = NULL;
u16 *shbufva;
unsigned long flags;
/* All packets must have at least a MAC address and a protocol type */
if (skb->len < ETH_HLEN)
return -EIO;
/* Get a TCB for this packet */
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
tcb = etdev->tx_ring.tcb_qhead;
if (tcb == NULL) {
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
return -ENOMEM;
}
etdev->tx_ring.tcb_qhead = tcb->next;
if (etdev->tx_ring.tcb_qhead == NULL)
etdev->tx_ring.tcb_qtail = NULL;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
tcb->skb = skb;
if (skb->data != NULL && skb->len - skb->data_len >= 6) {
shbufva = (u16 *) skb->data;
if ((shbufva[0] == 0xffff) &&
(shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
tcb->flags |= fMP_DEST_BROAD;
} else if ((shbufva[0] & 0x3) == 0x0001) {
tcb->flags |= fMP_DEST_MULTI;
}
}
tcb->next = NULL;
/* Call the NIC specific send handler. */
status = nic_send_packet(etdev, tcb);
if (status != 0) {
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
if (etdev->tx_ring.tcb_qtail)
etdev->tx_ring.tcb_qtail->next = tcb;
else
/* Apparently ready Q is empty. */
etdev->tx_ring.tcb_qhead = tcb;
etdev->tx_ring.tcb_qtail = tcb;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
return status;
}
WARN_ON(etdev->tx_ring.used > NUM_TCB);
return 0;
}
/** /**
* et131x_free_send_packet - Recycle a struct tcb * et131x_send_packets - This function is called by the OS to send packets
* @skb: the packet(s) to send
* @netdev:device on which to TX the above packet(s)
*
* Return 0 in almost all cases; non-zero value in extreme hard failure only
*/
int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *etdev = NULL;
etdev = netdev_priv(netdev);
/* Send these packets
*
* NOTE: The Linux Tx entry point is only given one packet at a time
* to Tx, so the PacketCount and it's array used makes no sense here
*/
/* TCB is not available */
if (etdev->tx_ring.used >= NUM_TCB) {
/* NOTE: If there's an error on send, no need to queue the
* packet under Linux; if we just send an error up to the
* netif layer, it will resend the skb to us.
*/
status = -ENOMEM;
} else {
/* We need to see if the link is up; if it's not, make the
* netif layer think we're good and drop the packet
*/
if ((etdev->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
!netif_carrier_ok(netdev)) {
dev_kfree_skb_any(skb);
skb = NULL;
etdev->net_stats.tx_dropped++;
} else {
status = send_packet(skb, etdev);
if (status != 0 && status != -ENOMEM) {
/* On any other error, make netif think we're
* OK and drop the packet
*/
dev_kfree_skb_any(skb);
skb = NULL;
etdev->net_stats.tx_dropped++;
}
}
}
return status;
}
/**
* free_send_packet - Recycle a struct tcb
* @etdev: pointer to our adapter * @etdev: pointer to our adapter
* @tcb: pointer to struct tcb * @tcb: pointer to struct tcb
* *
* Complete the packet if necessary * Complete the packet if necessary
* Assumption - Send spinlock has been acquired * Assumption - Send spinlock has been acquired
*/ */
inline void et131x_free_send_packet(struct et131x_adapter *etdev, static inline void free_send_packet(struct et131x_adapter *etdev,
struct tcb *tcb) struct tcb *tcb)
{ {
unsigned long flags; unsigned long flags;
@ -717,7 +709,7 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
freed++; freed++;
et131x_free_send_packet(etdev, tcb); free_send_packet(etdev, tcb);
spin_lock_irqsave(&etdev->TCBSendQLock, flags); spin_lock_irqsave(&etdev->TCBSendQLock, flags);
@ -766,7 +758,7 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
etdev->tx_ring.send_tail = NULL; etdev->tx_ring.send_tail = NULL;
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
et131x_free_send_packet(etdev, tcb); free_send_packet(etdev, tcb);
spin_lock_irqsave(&etdev->TCBSendQLock, flags); spin_lock_irqsave(&etdev->TCBSendQLock, flags);
/* Goto the next packet */ /* Goto the next packet */
@ -781,7 +773,7 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
etdev->tx_ring.send_tail = NULL; etdev->tx_ring.send_tail = NULL;
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
et131x_free_send_packet(etdev, tcb); free_send_packet(etdev, tcb);
spin_lock_irqsave(&etdev->TCBSendQLock, flags); spin_lock_irqsave(&etdev->TCBSendQLock, flags);
/* Goto the next packet */ /* Goto the next packet */

View file

@ -137,7 +137,7 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *adapter);
/* et131x_tx.c */ /* et131x_tx.c */
int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter); int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter);
void et131x_tx_dma_memory_free(struct et131x_adapter *adapter); void et131x_tx_dma_memory_free(struct et131x_adapter *adapter);
void ConfigTxDmaRegs(struct et131x_adapter *adapter); void et131x_config_tx_dma_regs(struct et131x_adapter *adapter);
void et131x_init_send(struct et131x_adapter *adapter); void et131x_init_send(struct et131x_adapter *adapter);
void et131x_tx_dma_disable(struct et131x_adapter *adapter); void et131x_tx_dma_disable(struct et131x_adapter *adapter);
void et131x_tx_dma_enable(struct et131x_adapter *adapter); void et131x_tx_dma_enable(struct et131x_adapter *adapter);

View file

@ -389,7 +389,7 @@ int et131x_adapter_setup(struct et131x_adapter *etdev)
et1310_config_txmac_regs(etdev); et1310_config_txmac_regs(etdev);
et131x_config_rx_dma_regs(etdev); et131x_config_rx_dma_regs(etdev);
ConfigTxDmaRegs(etdev); et131x_config_tx_dma_regs(etdev);
et1310_config_macstat_regs(etdev); et1310_config_macstat_regs(etdev);