mana: add lro and tso stat counters

Add a few stat counters for tso and lro.

MFC after:	3 days
Sponsored by:	Microsoft
This commit is contained in:
Wei Hu 2023-09-14 11:56:20 +00:00
parent 79278872ad
commit b167e449c8
3 changed files with 153 additions and 0 deletions

View file

@ -170,6 +170,9 @@ struct mana_txq {
struct mtx txq_mtx;
char txq_mtx_name[16];
uint64_t tso_pkts;
uint64_t tso_bytes;
struct task enqueue_task;
struct taskqueue *enqueue_tq;
@ -423,6 +426,8 @@ struct mana_rxq {
uint32_t buf_index;
uint64_t lro_tried;
uint64_t lro_failed;
struct mana_stats stats;
/* MUST BE THE LAST MEMBER:

View file

@ -501,6 +501,7 @@ mana_xmit(struct mana_txq *txq)
struct gdma_queue *gdma_sq;
struct mana_cq *cq;
int err, len;
bool is_tso;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq->idx].tx_cq;
@ -578,7 +579,10 @@ mana_xmit(struct mana_txq *txq)
pkg.wqe_req.flags = 0;
pkg.wqe_req.client_data_unit = 0;
is_tso = false;
if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
is_tso = true;
if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
else
@ -641,6 +645,11 @@ mana_xmit(struct mana_txq *txq)
packets++;
bytes += len;
if (is_tso) {
txq->tso_pkts++;
txq->tso_bytes += len;
}
}
counter_enter();
@ -1697,9 +1706,12 @@ mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
do_if_input = true;
if ((if_getcapenable(ndev) & IFCAP_LRO) && do_lro) {
rxq->lro_tried++;
if (rxq->lro.lro_cnt != 0 &&
tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
do_if_input = false;
else
rxq->lro_failed++;
}
if (do_if_input) {
if_input(ndev, mbuf);

View file

@ -46,6 +46,96 @@ SYSCTL_INT(_hw_mana, OID_AUTO, log_level, CTLFLAG_RWTUN,
SYSCTL_CONST_STRING(_hw_mana, OID_AUTO, driver_version, CTLFLAG_RD,
DRV_MODULE_VERSION, "MANA driver version");
static int
mana_sysctl_rx_stat_agg_u64(SYSCTL_HANDLER_ARGS)
{
struct mana_port_context *apc = arg1;
int offset = arg2, i, err;
struct mana_rxq *rxq;
uint64_t stat;
stat = 0;
for (i = 0; i < apc->num_queues; i++) {
rxq = apc->rxqs[i];
stat += *((uint64_t *)((uint8_t *)rxq + offset));
}
err = sysctl_handle_64(oidp, &stat, 0, req);
if (err || req->newptr == NULL)
return err;
for (i = 0; i < apc->num_queues; i++) {
rxq = apc->rxqs[i];
*((uint64_t *)((uint8_t *)rxq + offset)) = 0;
}
return 0;
}
static int
mana_sysctl_rx_stat_u16(SYSCTL_HANDLER_ARGS)
{
struct mana_port_context *apc = arg1;
int offset = arg2, err;
struct mana_rxq *rxq;
uint64_t stat;
uint16_t val;
rxq = apc->rxqs[0];
val = *((uint16_t *)((uint8_t *)rxq + offset));
stat = val;
err = sysctl_handle_64(oidp, &stat, 0, req);
if (err || req->newptr == NULL)
return err;
else
return 0;
}
static int
mana_sysctl_rx_stat_u32(SYSCTL_HANDLER_ARGS)
{
struct mana_port_context *apc = arg1;
int offset = arg2, err;
struct mana_rxq *rxq;
uint64_t stat;
uint32_t val;
rxq = apc->rxqs[0];
val = *((uint32_t *)((uint8_t *)rxq + offset));
stat = val;
err = sysctl_handle_64(oidp, &stat, 0, req);
if (err || req->newptr == NULL)
return err;
else
return 0;
}
static int
mana_sysctl_tx_stat_agg_u64(SYSCTL_HANDLER_ARGS)
{
struct mana_port_context *apc = arg1;
int offset = arg2, i, err;
struct mana_txq *txq;
uint64_t stat;
stat = 0;
for (i = 0; i < apc->num_queues; i++) {
txq = &apc->tx_qp[i].txq;
stat += *((uint64_t *)((uint8_t *)txq + offset));
}
err = sysctl_handle_64(oidp, &stat, 0, req);
if (err || req->newptr == NULL)
return err;
for (i = 0; i < apc->num_queues; i++) {
txq = &apc->tx_qp[i].txq;
*((uint64_t *)((uint8_t *)txq + offset)) = 0;
}
return 0;
}
void
mana_sysctl_add_port(struct mana_port_context *apc)
{
@ -99,6 +189,52 @@ mana_sysctl_add_port(struct mana_port_context *apc)
CTLFLAG_RD, &port_stats->rx_drops, "Receive packet drops");
SYSCTL_ADD_COUNTER_U64(ctx, stats_list, OID_AUTO, "tx_drops",
CTLFLAG_RD, &port_stats->tx_drops, "Transmit packet drops");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_queued",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc,
__offsetof(struct mana_rxq, lro.lro_queued),
mana_sysctl_rx_stat_agg_u64, "LU", "LRO queued");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_flushed",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc,
__offsetof(struct mana_rxq, lro.lro_flushed),
mana_sysctl_rx_stat_agg_u64, "LU", "LRO flushed");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_bad_csum",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_STATS, apc,
__offsetof(struct mana_rxq, lro.lro_bad_csum),
mana_sysctl_rx_stat_agg_u64, "LU", "LRO bad checksum");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_tried",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
__offsetof(struct mana_rxq, lro_tried),
mana_sysctl_rx_stat_agg_u64, "LU", "LRO tried");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "rx_lro_failed",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
__offsetof(struct mana_rxq, lro_failed),
mana_sysctl_rx_stat_agg_u64, "LU", "LRO failed");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_ackcnt_lim",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
__offsetof(struct mana_rxq, lro.lro_ackcnt_lim),
mana_sysctl_rx_stat_u16,
"LU", "Max # of ACKs to be aggregated by LRO");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_length_lim",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
__offsetof(struct mana_rxq, lro.lro_length_lim),
mana_sysctl_rx_stat_u32,
"LU", "Max len of aggregated data in byte by LRO");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "lro_cnt",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
__offsetof(struct mana_rxq, lro.lro_cnt),
mana_sysctl_rx_stat_u32,
"LU", "Max # or LRO packet count");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "tx_tso_packets",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
__offsetof(struct mana_txq, tso_pkts),
mana_sysctl_tx_stat_agg_u64, "LU", "TSO packets");
SYSCTL_ADD_PROC(ctx, stats_list, OID_AUTO, "tx_tso_bytes",
CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, apc,
__offsetof(struct mana_txq, tso_bytes),
mana_sysctl_tx_stat_agg_u64, "LU", "TSO bytes");
}
void