Another round of removing historical mbuf(9) allocator flags.

They are breeding! New ones arouse since last round.

Sponsored by:	Nginx, Inc.
This commit is contained in:
Gleb Smirnoff 2014-01-16 13:44:47 +00:00
parent fec721bc43
commit b8c83a1957
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=260718
5 changed files with 13 additions and 13 deletions

View file

@ -1174,7 +1174,7 @@ atse_rx_locked(struct atse_softc *sc)
sc->atse_rx_cycles--;
if (sc->atse_rx_m == NULL) {
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (rx_npkts);
m->m_len = m->m_pkthdr.len = MCLBYTES;

View file

@ -5443,7 +5443,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
} else if (error == EFBIG) {
/* possibly recoverable with defragmentation */
fp->eth_q_stats.mbuf_defrag_attempts++;
m0 = m_defrag(*m_head, M_DONTWAIT);
m0 = m_defrag(*m_head, M_NOWAIT);
if (m0 == NULL) {
fp->eth_q_stats.mbuf_defrag_failures++;
rc = ENOBUFS;
@ -5504,7 +5504,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
/* lets try to defragment this mbuf */
fp->eth_q_stats.mbuf_defrag_attempts++;
m0 = m_defrag(*m_head, M_DONTWAIT);
m0 = m_defrag(*m_head, M_NOWAIT);
if (m0 == NULL) {
fp->eth_q_stats.mbuf_defrag_failures++;
/* Ugh, just drop the frame... :( */
@ -6564,7 +6564,7 @@ bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
rc = 0;
/* allocate the new RX BD mbuf */
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
if (__predict_false(m == NULL)) {
fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
return (ENOBUFS);
@ -6645,7 +6645,7 @@ bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
int rc = 0;
/* allocate the new TPA mbuf */
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
if (__predict_false(m == NULL)) {
fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
return (ENOBUFS);
@ -6707,7 +6707,7 @@ bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
int rc = 0;
/* allocate a new SGE mbuf */
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
if (__predict_false(m == NULL)) {
fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
return (ENOMEM);
@ -6769,7 +6769,7 @@ bxe_alloc_fp_buffers(struct bxe_softc *sc)
#if __FreeBSD_version >= 800000
fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
M_DONTWAIT, &fp->tx_mtx);
M_NOWAIT, &fp->tx_mtx);
if (fp->tx_br == NULL) {
BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i);
goto bxe_alloc_fp_buffers_error;

View file

@ -485,7 +485,7 @@ hn_start_locked(struct ifnet *ifp)
* bpf_mtap code has a chance to run.
*/
if (ifp->if_bpf) {
mc_head = m_copypacket(m_head, M_DONTWAIT);
mc_head = m_copypacket(m_head, M_NOWAIT);
}
retry_send:
/* Set the completion routine */
@ -594,7 +594,7 @@ hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
* Allocate a new mbuf; could check space
* and allocate a cluster instead.
*/
n = m_getjcl(M_DONTWAIT, m->m_type, 0, MJUMPAGESIZE);
n = m_getjcl(M_NOWAIT, m->m_type, 0, MJUMPAGESIZE);
if (n == NULL)
break;
n->m_len = min(MJUMPAGESIZE, remainder);
@ -658,7 +658,7 @@ netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet)
size = MJUMPAGESIZE;
}
m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
if (m_new == NULL)
return (0);

View file

@ -1158,7 +1158,7 @@ qls_send(qla_host_t *ha, struct mbuf **m_headp)
QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
m_head->m_pkthdr.len));
m = m_defrag(m_head, M_DONTWAIT);
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
ha->err_tx_defrag++;
m_freem(m_head);
@ -1413,7 +1413,7 @@ qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
if (mp == NULL) {
mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, ha->msize);
mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize);
if (mp == NULL) {

View file

@ -2693,7 +2693,7 @@ mesh_send_action(struct ieee80211_node *ni,
return EIO; /* XXX */
}
M_PREPEND(m, sizeof(struct ieee80211_frame), M_DONTWAIT);
M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT);
if (m == NULL) {
ieee80211_free_node(ni);
return ENOMEM;