- Correct the vlan filter programming. The device filter is built in

reverse order.
 - Name the cq taskqueues according to whether they handle rx or tx.
 - Default LRO to on.
This commit is contained in:
Jeff Roberson 2011-03-23 02:47:04 +00:00
parent 50de5d07d3
commit a340f09abe
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=219893
3 changed files with 15 additions and 17 deletions

View file

@ -51,21 +51,23 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT,
taskqueue_thread_enqueue, &cq->tq);
if (mode == RX) {
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
cq->vector = (ring + priv->port) %
mdev->dev->caps.num_comp_vectors;
TASK_INIT(&cq->cq_task, 0, mlx4_en_rx_que, cq);
taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s rx cq",
if_name(priv->dev));
} else {
cq->buf_size = sizeof(struct mlx4_cqe);
cq->vector = MLX4_LEAST_ATTACHED_VECTOR;
TASK_INIT(&cq->cq_task, 0, mlx4_en_tx_que, cq);
taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s tx cq",
if_name(priv->dev));
}
cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT,
taskqueue_thread_enqueue, &cq->tq);
taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s cq",
if_name(priv->dev));
cq->ring = ring;
cq->is_tx = mode;
mtx_init(&cq->lock.m, "mlx4 cq", NULL, MTX_DEF);

View file

@ -53,13 +53,11 @@ static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid)
if ((vid == 0) || (vid > 4095)) /* Invalid */
return;
en_dbg(HW, priv, "adding VLAN:%d\n", vid);
spin_lock(&priv->vlan_lock);
priv->vlgrp_modified = true;
idx = vid >> 5;
field = 1 << (vid & 0x1f);
spin_lock(&priv->vlan_lock);
priv->vlgrp_modified = true;
if (priv->vlan_unregister[idx] & field)
priv->vlan_unregister[idx] &= ~field;
else
@ -77,10 +75,10 @@ static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid)
if ((vid == 0) || (vid > 4095)) /* Invalid */
return;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
spin_lock(&priv->vlan_lock);
priv->vlgrp_modified = true;
idx = vid >> 5;
field = 1 << (vid & 0x1f);
spin_lock(&priv->vlan_lock);
priv->vlgrp_modified = true;
if (priv->vlan_register[idx] & field)
priv->vlan_register[idx] &= ~field;
else
@ -1541,12 +1539,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
#endif
if (mdev->LSO_support)
dev->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
/* Don't enable LOR unless the user requests. */
dev->if_capenable = dev->if_capabilities;
if (mdev->profile.num_lro)
dev->if_capabilities |= IFCAP_LRO;
dev->if_capenable = dev->if_capabilities;
/* Register for VLAN events */
priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,

View file

@ -51,7 +51,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, u32 *vlans)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_vlan_fltr_mbox *filter;
int i;
int i, j;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@ -61,8 +61,9 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, u32 *vlans)
filter = mailbox->buf;
memset(filter, 0, sizeof *filter);
if (vlans)
for (i = 0; i < VLAN_FLTR_SIZE; i ++)
filter->entry[i] = cpu_to_be32(vlans[i]);
for (i = 0, j = VLAN_FLTR_SIZE - 1; i < VLAN_FLTR_SIZE;
i++, j--)
filter->entry[j] = cpu_to_be32(vlans[i]);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);