mt76 patches for 6.9

* fixes
 * support for newer chips in the mt7915 driver
 * mt7996 improvements
 * page pool performance improvement
 -----BEGIN PGP SIGNATURE-----
 Comment: GPGTools - http://gpgtools.org
 
 iF0EABECAB0WIQR10Rp9kadxD0kAQu/XfRQdAqdu9QUCZdcMxAAKCRDXfRQdAqdu
 9egBAJ0bsdK3QvOEMJ433TCjMIHkaDhw/QCgrnYliO8V40ftYrCv1bqXltw2ixc=
 =IAKf
 -----END PGP SIGNATURE-----

Merge tag 'mt76-for-kvalo-2024-02-22' of https://github.com/nbd168/wireless

mt76 patches for 6.9

* fixes
* support for newer chips in the mt7915 driver
* mt7996 improvements
* page pool performance improvement
This commit is contained in:
Kalle Valo 2024-02-27 16:59:41 +02:00
commit 734940143f
49 changed files with 976 additions and 526 deletions

View file

@ -19,9 +19,6 @@ description: |
Alternatively, it can specify the wireless part of the MT7628/MT7688
or MT7622/MT7986 SoC.
allOf:
- $ref: ieee80211.yaml#
properties:
compatible:
enum:
@ -38,7 +35,12 @@ properties:
MT7986 should contain 3 regions consys, dcm, and sku, in this order.
interrupts:
maxItems: 1
minItems: 1
items:
- description: major interrupt for rings
- description: additional interrupt for ring 19
- description: additional interrupt for ring 4
- description: additional interrupt for ring 5
power-domains:
maxItems: 1
@ -217,6 +219,24 @@ required:
- compatible
- reg
allOf:
- $ref: ieee80211.yaml#
- if:
properties:
compatible:
contains:
enum:
- mediatek,mt7981-wmac
- mediatek,mt7986-wmac
then:
properties:
interrupts:
minItems: 4
else:
properties:
interrupts:
maxItems: 1
unevaluatedProperties: false
examples:
@ -293,7 +313,10 @@ examples:
reg = <0x18000000 0x1000000>,
<0x10003000 0x1000>,
<0x11d10000 0x1000>;
interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&topckgen 50>,
<&topckgen 62>;
clock-names = "mcu", "ap2conn";

View file

@ -10,7 +10,7 @@ obj-$(CONFIG_MT792x_USB) += mt792x-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
tx.o agg-rx.o mcu.o
tx.o agg-rx.o mcu.o wed.o
mt76-$(CONFIG_PCI) += pci.o
mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o

View file

@ -122,7 +122,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct mt76_rx_tid *tid;
u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
u8 tidno;
u16 seqno;
if (!ieee80211_is_ctl(bar->frame_control))

View file

@ -197,8 +197,7 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
q->tail = q->head;
}
static void
__mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
bool reset_idx)
{
if (!q || !q->ndesc)
@ -219,8 +218,7 @@ __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
mt76_dma_sync_idx(dev, q);
}
static void
mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
__mt76_dma_queue_reset(dev, q, true);
}
@ -632,8 +630,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return ret;
}
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct)
{
int len = SKB_WITH_OVERHEAD(q->buf_size);
@ -681,81 +678,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
return frames;
}
int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int ret = 0, type, ring;
u16 flags;
if (!q || !q->ndesc)
return -EINVAL;
flags = q->flags;
if (!q->wed || !mtk_wed_device_active(q->wed))
q->flags &= ~MT_QFLAG_WED;
if (!(q->flags & MT_QFLAG_WED))
return 0;
type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
switch (type) {
case MT76_WED_Q_TX:
ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
reset);
if (!ret)
q->wed_regs = q->wed->tx_ring[ring].reg_base;
break;
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
mt76_dma_queue_reset(dev, q);
mt76_dma_rx_fill(dev, q, false);
ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
if (!ret)
q->wed_regs = q->wed->txfree_ring.reg_base;
break;
case MT76_WED_Q_RX:
ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
reset);
if (!ret)
q->wed_regs = q->wed->rx_ring[ring].reg_base;
break;
case MT76_WED_RRO_Q_DATA:
q->flags &= ~MT_QFLAG_WED;
__mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_MSDU_PG:
q->flags &= ~MT_QFLAG_WED;
__mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_IND:
q->flags &= ~MT_QFLAG_WED;
mt76_dma_queue_reset(dev, q);
mt76_dma_rx_fill(dev, q, false);
mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
break;
default:
ret = -EINVAL;
break;
}
q->flags = flags;
return ret;
#else
return 0;
#endif
}
EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
@ -800,7 +722,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (ret)
return ret;
ret = mt76_dma_wed_setup(dev, q, false);
ret = mt76_wed_dma_setup(dev, q, false);
if (ret)
return ret;
@ -863,7 +785,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_dma_rx_cleanup(dev, q);
/* reset WED rx queues */
mt76_dma_wed_setup(dev, q, true);
mt76_wed_dma_setup(dev, q, true);
if (mt76_queue_is_wed_tx_free(q))
return;
@ -1054,20 +976,6 @@ void mt76_dma_attach(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
void mt76_dma_wed_reset(struct mt76_dev *dev)
{
struct mt76_mmio *mmio = &dev->mmio;
if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
return;
complete(&mmio->wed_reset);
if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
dev_err(dev->dev, "wed reset complete timeout\n");
}
EXPORT_SYMBOL_GPL(mt76_dma_wed_reset);
void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;

View file

@ -79,15 +79,18 @@ enum mt76_dma_wed_ind_reason {
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
void mt76_dma_wed_reset(struct mt76_dev *dev);
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct);
void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
bool reset_idx);
void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
dev->queue_ops->reset_q(dev, q);
if (mtk_wed_device_active(&dev->mmio.wed))
mt76_dma_wed_setup(dev, q, true);
mt76_wed_dma_setup(dev, q, true);
}
static inline void

View file

@ -579,13 +579,18 @@ EXPORT_SYMBOL_GPL(mt76_unregister_phy);
int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
{
bool is_qrx = mt76_queue_is_rx(dev, q);
struct page_pool_params pp_params = {
.order = 0,
.flags = 0,
.nid = NUMA_NO_NODE,
.dev = dev->dma_dev,
};
int idx = q - dev->q_rx;
int idx = is_qrx ? q - dev->q_rx : -1;
/* Allocate page_pools just for rx/wed_tx_free queues */
if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
return 0;
switch (idx) {
case MT_RXQ_MAIN:
@ -604,6 +609,9 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
pp_params.dma_dir = DMA_FROM_DEVICE;
pp_params.max_len = PAGE_SIZE;
pp_params.offset = 0;
/* NAPI is available just for rx queues */
if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
pp_params.napi = &dev->napi[idx];
}
q->page_pool = page_pool_create(&pp_params);
@ -1854,19 +1862,3 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
return MT_DFS_STATE_ACTIVE;
}
EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct mt76_phy *phy = hw->priv;
struct mtk_wed_device *wed = &phy->dev->mmio.wed;
if (!mtk_wed_device_active(wed))
return -EOPNOTSUPP;
return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
}
EXPORT_SYMBOL_GPL(mt76_net_setup_tc);
#endif /* CONFIG_NET_MEDIATEK_SOC_WED */

View file

@ -85,113 +85,6 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
int i;
for (i = 0; i < dev->rx_token_size; i++) {
struct mt76_txwi_cache *t;
t = mt76_rx_token_release(dev, i);
if (!t || !t->ptr)
continue;
mt76_put_page_pool_buf(t->ptr, false);
t->ptr = NULL;
mt76_put_rxwi(dev, t);
}
mt76_free_pending_rxwi(dev);
}
EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i, len = SKB_WITH_OVERHEAD(q->buf_size);
struct mt76_txwi_cache *t = NULL;
for (i = 0; i < size; i++) {
enum dma_data_direction dir;
dma_addr_t addr;
u32 offset;
int token;
void *buf;
t = mt76_get_rxwi(dev);
if (!t)
goto unmap;
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
if (!buf)
goto unmap;
addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
dir = page_pool_get_dma_dir(q->page_pool);
dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
desc->buf0 = cpu_to_le32(addr);
token = mt76_rx_token_consume(dev, buf, t, addr);
if (token < 0) {
mt76_put_page_pool_buf(buf, false);
goto unmap;
}
token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
#endif
desc->token |= cpu_to_le32(token);
desc++;
}
return 0;
unmap:
if (t)
mt76_put_rxwi(dev, t);
mt76_mmio_wed_release_rx_buf(wed);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf);
int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
spin_lock_bh(&dev->token_lock);
dev->token_size = wed->wlan.token_start;
spin_unlock_bh(&dev->token_lock);
return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
}
EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable);
void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
spin_lock_bh(&dev->token_lock);
dev->token_size = dev->drv->token_size;
spin_unlock_bh(&dev->token_lock);
}
EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable);
void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
complete(&dev->mmio.wed_reset_complete);
}
EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete);
#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
{
static const struct mt76_bus_ops mt76_mmio_ops = {

View file

@ -210,6 +210,8 @@ struct mt76_queue {
u16 first;
u16 head;
u16 tail;
u8 hw_idx;
u8 ep;
int ndesc;
int queued;
int buf_size;
@ -217,7 +219,6 @@ struct mt76_queue {
bool blocked;
u8 buf_offset;
u8 hw_idx;
u16 flags;
struct mtk_wed_device *wed;
@ -1081,12 +1082,6 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct net_device *netdev, enum tc_setup_type type,
void *type_data);
#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
static inline u16 mt76_chip(struct mt76_dev *dev)
{
return dev->rev >> 16;
@ -1097,12 +1092,33 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
return dev->rev & 0xffff;
}
void mt76_wed_release_rx_buf(struct mtk_wed_device *wed);
void mt76_wed_offload_disable(struct mtk_wed_device *wed);
void mt76_wed_reset_complete(struct mtk_wed_device *wed);
void mt76_wed_dma_reset(struct mt76_dev *dev);
int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct net_device *netdev, enum tc_setup_type type,
void *type_data);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed);
int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed);
void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed);
void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed);
u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
int mt76_wed_offload_enable(struct mtk_wed_device *wed);
int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
#else
static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
return 0;
}
static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed)
{
return 0;
}
static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
bool reset)
{
return 0;
}
#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
@ -1470,13 +1486,6 @@ static inline bool mt76u_urb_error(struct urb *urb)
urb->status != -ENOENT;
}
/* Map hardware queues to usb endpoints */
static inline u8 q2ep(u8 qid)
{
/* TODO: take management packets to queue 5 */
return qid + 1;
}
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout, int ep)
@ -1598,6 +1607,18 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_power_limits *dest,
s8 target_power);
static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q)
{
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
if (q == &dev->q_rx[i])
return true;
}
return false;
}
static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q)
{
return (q->flags & MT_QFLAG_WED) &&

View file

@ -227,6 +227,11 @@ static inline bool is_mt7992(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7992;
}
static inline bool is_mt799x(struct mt76_dev *dev)
{
return is_mt7996(dev) || is_mt7992(dev);
}
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))

View file

@ -32,6 +32,11 @@ enum {
MT_LMAC_PSMP0,
};
enum {
MT_TXS_MPDU_FMT = 0,
MT_TXS_PPDU_FMT = 2,
};
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_COUNT GENMASK(12, 0)

View file

@ -544,7 +544,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD5_PID, pid);
if (pid >= MT_PACKET_ID_FIRST) {
val |= MT_TXD5_TX_STATUS_HOST;
amsdu_en = amsdu_en && !is_mt7921(dev);
amsdu_en = 0;
}
txwi[5] = cpu_to_le32(val);
@ -579,6 +579,8 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
spe_idx = 24 + phy_idx;
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, spe_idx));
}
txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
}
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
@ -714,6 +716,9 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff_head list;
struct sk_buff *skb;
if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == MT_TXS_PPDU_FMT)
return false;
mt76_tx_status_lock(dev, &list);
skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
if (skb) {

View file

@ -66,7 +66,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000) ||
(is_mt7925(dev) && addr == 0x900000) ||
(is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) ||
(is_mt7996(dev) && addr == 0x900000) ||
(is_mt7992(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
@ -283,6 +283,9 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
if (is_mt799x(dev) && !wcid->sta)
hdr.muar_idx = 0xe;
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
skb = mt76_mcu_msg_alloc(dev, NULL, len);
@ -2101,7 +2104,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
int j, msg_len, num_ch;
struct sk_buff *skb;
num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
num_ch = i == batch_size - 1 ? n_chan - i * batch_len : batch_len;
msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
if (!skb) {

View file

@ -808,6 +808,7 @@ enum {
STA_REC_MLD = 0x20,
STA_REC_EHT = 0x22,
STA_REC_PN_INFO = 0x26,
STA_REC_KEY_V3 = 0x27,
STA_REC_HDRT = 0x28,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
@ -935,6 +936,9 @@ enum {
PHY_TYPE_INDEX_NUM
};
#define HR_DSSS_ERP_BASIC_RATE GENMASK(3, 0)
#define OFDM_BASIC_RATE (BIT(6) | BIT(8) | BIT(10))
#define PHY_TYPE_BIT_HR_DSSS BIT(PHY_TYPE_HR_DSSS_INDEX)
#define PHY_TYPE_BIT_ERP BIT(PHY_TYPE_ERP_INDEX)
#define PHY_TYPE_BIT_OFDM BIT(PHY_TYPE_OFDM_INDEX)

View file

@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx);
int pid, len = tx_info->skb->len, ep = dev->mphy.q_tx[qid]->ep;
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;

View file

@ -18,6 +18,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
{ USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
{ USB_DEVICE(0x0846, 0x9014) }, /* Netgear WNDA3100v3 */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
{ USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
{ USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */

View file

@ -614,7 +614,7 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
mtk_wed_device_dma_reset(wed);
mt7915_dma_disable(dev, force);
mt76_dma_wed_reset(&dev->mt76);
mt76_wed_dma_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {

View file

@ -1520,12 +1520,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
return;
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
mtk_wed_device_stop(&dev->mt76.mmio.wed);
if (!is_mt798x(&dev->mt76))
mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
}
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
ieee80211_stop_queues(ext_phy->hw);
@ -1545,6 +1539,9 @@ void mt7915_mac_reset_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mtk_wed_device_stop(&dev->mt76.mmio.wed);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {

View file

@ -1708,6 +1708,6 @@ const struct ieee80211_ops mt7915_ops = {
.set_radar_background = mt7915_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7915_net_fill_forward_path,
.net_setup_tc = mt76_net_setup_tc,
.net_setup_tc = mt76_wed_net_setup_tc,
#endif
};

View file

@ -490,6 +490,11 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
return dev->reg.map[i].maps + ofs;
}
return 0;
}
static u32 __mt7915_reg_remap_addr(struct mt7915_dev *dev, u32 addr)
{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@ -514,15 +519,30 @@ void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7915_reg_addr(dev, offset);
if (addr) {
memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
return;
}
spin_lock_bh(&dev->reg_lock);
memcpy_fromio(buf, dev->mt76.mmio.regs +
__mt7915_reg_remap_addr(dev, offset), len);
spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
u32 addr = __mt7915_reg_addr(dev, offset), val;
if (addr)
return dev->bus_ops->rr(mdev, addr);
spin_lock_bh(&dev->reg_lock);
val = dev->bus_ops->rr(mdev, __mt7915_reg_remap_addr(dev, offset));
spin_unlock_bh(&dev->reg_lock);
return val;
}
static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
@ -530,7 +550,14 @@ static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
if (addr) {
dev->bus_ops->wr(mdev, addr, val);
return;
}
spin_lock_bh(&dev->reg_lock);
dev->bus_ops->wr(mdev, __mt7915_reg_remap_addr(dev, offset), val);
spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
@ -538,7 +565,14 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
if (addr)
return dev->bus_ops->rmw(mdev, addr, mask, val);
spin_lock_bh(&dev->reg_lock);
val = dev->bus_ops->rmw(mdev, __mt7915_reg_remap_addr(dev, offset), mask, val);
spin_unlock_bh(&dev->reg_lock);
return val;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
@ -672,13 +706,13 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
}
wed->wlan.init_buf = mt7915_wed_init_buf;
wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
wed->wlan.offload_enable = mt76_wed_offload_enable;
wed->wlan.offload_disable = mt76_wed_offload_disable;
wed->wlan.init_rx_buf = mt76_wed_init_rx_buf;
wed->wlan.release_rx_buf = mt76_wed_release_rx_buf;
wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
wed->wlan.reset = mt7915_mmio_wed_reset;
wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
wed->wlan.reset_complete = mt76_wed_reset_complete;
dev->mt76.rx_token_size = wed->wlan.rx_npkt;
@ -707,6 +741,7 @@ static int mt7915_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7915:

View file

@ -287,6 +287,7 @@ struct mt7915_dev {
struct list_head sta_rc_list;
struct list_head twt_list;
spinlock_t reg_lock;
u32 hw_pattern;

View file

@ -516,7 +516,8 @@ static int mt798x_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
if (ret)
return ret;
if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) {
if (version == 0x8a00 || version == 0x8a10 ||
version == 0x8b00 || version == 0x8c10) {
rg_xo_01 = 0x1d59080f;
rg_xo_03 = 0x34c00fe0;
} else {

View file

@ -138,9 +138,14 @@ mt7921_regd_notifier(struct wiphy *wiphy,
if (pm->suspended)
return;
dev->regd_in_progress = true;
mt792x_mutex_acquire(dev);
mt7921_regd_update(dev);
mt792x_mutex_release(dev);
dev->regd_in_progress = false;
wake_up(&dev->wait);
}
int mt7921_mac_init(struct mt792x_dev *dev)
@ -261,6 +266,7 @@ int mt7921_register_device(struct mt792x_dev *dev)
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
init_waitqueue_head(&dev->wait);
if (mt76_is_sdio(&dev->mt76))
init_waitqueue_head(&dev->mt76.sdio.wait);
spin_lock_init(&dev->pm.txq_lock);

View file

@ -325,6 +325,19 @@ static void mt7921_roc_iter(void *priv, u8 *mac,
mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id);
}
void mt7921_roc_abort_sync(struct mt792x_dev *dev)
{
struct mt792x_phy *phy = &dev->phy;
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
ieee80211_iterate_active_interfaces(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_roc_iter, (void *)phy);
}
EXPORT_SYMBOL_GPL(mt7921_roc_abort_sync);
void mt7921_roc_work(struct work_struct *work)
{
struct mt792x_phy *phy;

View file

@ -1272,7 +1272,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
.mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2),
};
int ret, valid_cnt = 0;
u16 buf_len = 0;
u32 buf_len = 0;
u8 *pos;
if (!clc)
@ -1283,7 +1283,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
if (mt76_find_power_limits_node(&dev->mt76))
req.cap |= CLC_CAP_DTS_EN;
buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
buf_len = le32_to_cpu(clc->len) - sizeof(*clc);
pos = clc->data;
while (buf_len > 16) {
struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;

View file

@ -322,4 +322,5 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
enum mt7921_roc_req type, u8 token_id);
int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
u8 token_id);
void mt7921_roc_abort_sync(struct mt792x_dev *dev);
#endif

View file

@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
#include "mt7921.h"
#include "../mt76_connac2_mac.h"
@ -369,6 +370,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_irq;
if (of_property_read_bool(dev->mt76.dev->of_node, "wakeup-source"))
device_init_wakeup(dev->mt76.dev, true);
return 0;
err_free_irq:
@ -386,7 +390,11 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
if (of_property_read_bool(dev->mt76.dev->of_node, "wakeup-source"))
device_init_wakeup(dev->mt76.dev, false);
mt7921e_unregister_device(dev);
set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
@ -405,10 +413,15 @@ static int mt7921_pci_suspend(struct device *device)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
mt7921_roc_abort_sync(dev);
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
wait_event_timeout(dev->wait,
!dev->regd_in_progress, 5 * HZ);
err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
goto restore_suspend;

View file

@ -216,6 +216,8 @@ static int mt7921s_suspend(struct device *__dev)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
mt7921_roc_abort_sync(dev);
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;

View file

@ -2,11 +2,61 @@
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/etherdevice.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/thermal.h>
#include <linux/firmware.h>
#include "mt7925.h"
#include "mac.h"
#include "mcu.h"
static ssize_t mt7925_thermal_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
switch (to_sensor_dev_attr(attr)->index) {
case 0: {
struct mt792x_phy *phy = dev_get_drvdata(dev);
struct mt792x_dev *mdev = phy->dev;
int temperature;
mt792x_mutex_acquire(mdev);
temperature = mt7925_mcu_get_temperature(phy);
mt792x_mutex_release(mdev);
if (temperature < 0)
return temperature;
/* display in millidegree Celsius */
return sprintf(buf, "%u\n", temperature * 1000);
}
default:
return -EINVAL;
}
}
static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7925_thermal_temp, 0);
static struct attribute *mt7925_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mt7925_hwmon);
static int mt7925_thermal_init(struct mt792x_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
struct device *hwmon;
const char *name;
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7925_%s",
wiphy_name(wiphy));
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
mt7925_hwmon_groups);
return PTR_ERR_OR_ZERO(hwmon);
}
static void
mt7925_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
@ -142,6 +192,12 @@ static void mt7925_init_work(struct work_struct *work)
return;
}
ret = mt7925_thermal_init(&dev->phy);
if (ret) {
dev_err(dev->mt76.dev, "thermal init failed\n");
return;
}
/* we support chip reset now */
dev->hw_init_done = true;

View file

@ -359,6 +359,7 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
mvif->sta.vif = mvif;
mt76_wcid_init(&mvif->sta.wcid);
mt7925_mac_wtbl_update(dev, idx,
@ -526,7 +527,7 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == SET_KEY && !mvif->mt76.cipher) {
struct mt792x_phy *phy = mt792x_hw_phy(hw);
mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true);
}
@ -710,7 +711,7 @@ static void mt7925_bss_info_changed(struct ieee80211_hw *hw,
if (slottime != phy->slottime) {
phy->slottime = slottime;
mt792x_mac_set_timeing(phy);
mt7925_mcu_set_timing(phy, vif);
}
}
@ -1273,6 +1274,25 @@ mt7925_channel_switch_beacon(struct ieee80211_hw *hw,
mt792x_mutex_release(dev);
}
static int
mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
static const u8 mq_to_aci[] = {
[IEEE80211_AC_VO] = 3,
[IEEE80211_AC_VI] = 2,
[IEEE80211_AC_BE] = 0,
[IEEE80211_AC_BK] = 1,
};
/* firmware uses access class index */
mvif->queue_params[mq_to_aci[queue]] = *params;
return 0;
}
static int
mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
@ -1396,7 +1416,7 @@ const struct ieee80211_ops mt7925_ops = {
.add_interface = mt7925_add_interface,
.remove_interface = mt792x_remove_interface,
.config = mt7925_config,
.conf_tx = mt792x_conf_tx,
.conf_tx = mt7925_conf_tx,
.configure_filter = mt7925_configure_filter,
.bss_info_changed = mt7925_bss_info_changed,
.start_ap = mt7925_start_ap,

View file

@ -656,6 +656,42 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl)
return ret;
}
int mt7925_mcu_get_temperature(struct mt792x_phy *phy)
{
struct {
u8 _rsv[4];
__le16 tag;
__le16 len;
u8 _rsv2[4];
} __packed req = {
.tag = cpu_to_le16(0x0),
.len = cpu_to_le16(sizeof(req) - 4),
};
struct mt7925_thermal_evt {
u8 rsv[4];
__le32 temperature;
} __packed * evt;
struct mt792x_dev *dev = phy->dev;
int temperature, ret;
struct sk_buff *skb;
ret = mt76_mcu_send_and_get_msg(&dev->mt76,
MCU_WM_UNI_CMD_QUERY(THERMAL),
&req, sizeof(req), true, &skb);
if (ret)
return ret;
skb_pull(skb, 4 + sizeof(struct tlv));
evt = (struct mt7925_thermal_evt *)skb->data;
temperature = le32_to_cpu(evt->temperature);
dev_kfree_skb(skb);
return temperature;
}
static void
mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data)
{
@ -814,6 +850,7 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct sta_rec_hdr_trans *hdr_trans;
struct mt76_wcid *wcid;
struct tlv *tlv;
@ -827,7 +864,11 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
else
hdr_trans->from_ds = true;
if (sta)
wcid = (struct mt76_wcid *)sta->drv_priv;
else
wcid = &mvif->sta.wcid;
if (!wcid)
return;
@ -895,7 +936,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
e = (struct edca *)tlv;
e->set = WMM_PARAM_SET;
e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
e->queue = ac;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@ -921,61 +962,67 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid);
struct sta_rec_sec_uni *sec;
struct mt792x_vif *mvif = msta->vif;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
sta = msta == &mvif->sta ?
NULL :
container_of((void *)msta, struct ieee80211_sta, drv_priv);
vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
sec->add = cmd;
sec->bss_idx = mvif->mt76.idx;
sec->is_authenticator = 0;
sec->mgmt_prot = 0;
sec->wlan_idx = (u8)wcid->idx;
if (sta) {
sec->tx_key = 1;
sec->key_type = 1;
memcpy(sec->peer_addr, sta->addr, ETH_ALEN);
} else {
memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
}
if (cmd == SET_KEY) {
struct sec_key_uni *sec_key;
u8 cipher;
cipher = mt76_connac_mcu_get_cipher(key->cipher);
if (cipher == MCU_CIPHER_NONE)
sec->add = 1;
cipher = mt7925_mcu_get_cipher(key->cipher);
if (cipher == CONNAC3_CIPHER_NONE)
return -EOPNOTSUPP;
sec_key = &sec->key[0];
sec_key->cipher_len = sizeof(*sec_key);
if (cipher == MCU_CIPHER_BIP_CMAC_128) {
sec_key->wlan_idx = cpu_to_le16(wcid->idx);
sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
sec_key->key_id = sta_key_conf->keyidx;
sec_key->key_len = 16;
memcpy(sec_key->key, sta_key_conf->key, 16);
sec_key = &sec->key[1];
sec_key->wlan_idx = cpu_to_le16(wcid->idx);
sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
sec_key->cipher_len = sizeof(*sec_key);
sec_key->key_len = 16;
memcpy(sec_key->key, key->key, 16);
sec->n_cipher = 2;
if (cipher == CONNAC3_CIPHER_BIP_CMAC_128) {
sec->cipher_id = CONNAC3_CIPHER_BIP_CMAC_128;
sec->key_id = sta_key_conf->keyidx;
sec->key_len = 32;
memcpy(sec->key, sta_key_conf->key, 16);
memcpy(sec->key + 16, key->key, 16);
} else {
sec_key->wlan_idx = cpu_to_le16(wcid->idx);
sec_key->cipher_id = cipher;
sec_key->key_id = key->keyidx;
sec_key->key_len = key->keylen;
memcpy(sec_key->key, key->key, key->keylen);
sec->cipher_id = cipher;
sec->key_id = key->keyidx;
sec->key_len = key->keylen;
memcpy(sec->key, key->key, key->keylen);
if (cipher == MCU_CIPHER_TKIP) {
if (cipher == CONNAC3_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
memcpy(sec_key->key + 16, key->key + 24, 8);
memcpy(sec_key->key + 24, key->key + 16, 8);
memcpy(sec->key + 16, key->key + 24, 8);
memcpy(sec->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
if (cipher == MCU_CIPHER_AES_CCMP) {
if (cipher == CONNAC3_CIPHER_AES_CCMP) {
memcpy(sta_key_conf->key, key->key, key->keylen);
sta_key_conf->keyidx = key->keyidx;
}
sec->n_cipher = 1;
}
} else {
sec->n_cipher = 0;
sec->add = 0;
}
return 0;
@ -1460,12 +1507,10 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
struct tlv *tlv;
u8 af = 0, mm = 0;
if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta);
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
if (sta->deflink.ht_cap.ht_supported) {
af = sta->deflink.ht_cap.ampdu_factor;
mm = sta->deflink.ht_cap.ampdu_density;
@ -1573,8 +1618,6 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
@ -1598,30 +1641,11 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta,
info->vif, info->rcpi,
info->state);
mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta);
}
sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
sizeof(struct tlv));
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid,
WTBL_RESET_AND_SET,
sta_wtbl, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
if (info->enable) {
mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
info->sta, sta_wtbl,
wtbl_hdr);
mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
sta_wtbl, wtbl_hdr);
if (info->sta)
mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
sta_wtbl, wtbl_hdr,
true, true);
}
if (info->enable)
mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
@ -2049,9 +2073,9 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_connac_bss_basic_tlv *basic_req;
u8 idx, basic_phy;
struct tlv *tlv;
int conn_type;
u8 idx;
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req));
basic_req = (struct mt76_connac_bss_basic_tlv *)tlv;
@ -2062,8 +2086,10 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta);
basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, sta);
basic_req->nonht_basic_phy = cpu_to_le16(basic_phy);
if (band == NL80211_BAND_2GHZ)
basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX);
else
basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX);
memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN);
basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta);
@ -2122,21 +2148,21 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
sec = (struct bss_sec_tlv *)tlv;
switch (mvif->cipher) {
case MCU_CIPHER_GCMP_256:
case MCU_CIPHER_GCMP:
case CONNAC3_CIPHER_GCMP_256:
case CONNAC3_CIPHER_GCMP:
sec->mode = MODE_WPA3_SAE;
sec->status = 8;
break;
case MCU_CIPHER_AES_CCMP:
case CONNAC3_CIPHER_AES_CCMP:
sec->mode = MODE_WPA2_PSK;
sec->status = 6;
break;
case MCU_CIPHER_TKIP:
case CONNAC3_CIPHER_TKIP:
sec->mode = MODE_WPA2_PSK;
sec->status = 4;
break;
case MCU_CIPHER_WEP104:
case MCU_CIPHER_WEP40:
case CONNAC3_CIPHER_WEP104:
case CONNAC3_CIPHER_WEP40:
sec->mode = MODE_SHARED;
sec->status = 0;
break;
@ -2167,6 +2193,11 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
bmc = (struct bss_rate_tlv *)tlv;
if (band == NL80211_BAND_2GHZ)
bmc->basic_rate = cpu_to_le16(HR_DSSS_ERP_BASIC_RATE);
else
bmc->basic_rate = cpu_to_le16(OFDM_BASIC_RATE);
bmc->short_preamble = (band == NL80211_BAND_2GHZ);
bmc->bc_fixed_rate = idx;
bmc->mc_fixed_rate = idx;
@ -2249,6 +2280,38 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
vif->bss_conf.he_bss_color.color : 0;
}
static void
mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_phy *phy = mvif->phy;
struct bss_ifs_time_tlv *ifs_time;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_IFS_TIME, sizeof(*ifs_time));
ifs_time = (struct bss_ifs_time_tlv *)tlv;
ifs_time->slot_valid = true;
ifs_time->slot_time = cpu_to_le16(phy->slottime);
}
int mt7925_mcu_set_timing(struct mt792x_phy *phy,
struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = phy->dev;
struct sk_buff *skb;
skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
MT7925_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
mt7925_mcu_bss_ifs_tlv(skb, vif);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
}
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_vif *vif,
@ -2273,6 +2336,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta);
mt7925_mcu_bss_qos_tlv(skb, vif);
mt7925_mcu_bss_mld_tlv(skb, vif, sta);
mt7925_mcu_bss_ifs_tlv(skb, vif);
if (vif->bss_conf.he_support) {
mt7925_mcu_bss_he_tlv(skb, vif, phy);
@ -2845,12 +2909,16 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (cmd & __MCU_CMD_FIELD_UNI) {
uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd;
uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
uni_txd->cid = cpu_to_le16(mcu_cmd);
uni_txd->s2d_index = MCU_S2D_H2N;
uni_txd->pkt_type = MCU_PKT_ID;
uni_txd->seq = seq;
if (cmd & __MCU_CMD_FIELD_QUERY)
uni_txd->option = MCU_CMD_UNI_QUERY_ACK;
else
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
goto exit;
}

View file

@ -159,6 +159,20 @@ enum {
UNI_EVENT_SCAN_DONE_NLO = 3,
};
enum connac3_mcu_cipher_type {
CONNAC3_CIPHER_NONE = 0,
CONNAC3_CIPHER_WEP40 = 1,
CONNAC3_CIPHER_TKIP = 2,
CONNAC3_CIPHER_AES_CCMP = 4,
CONNAC3_CIPHER_WEP104 = 5,
CONNAC3_CIPHER_BIP_CMAC_128 = 6,
CONNAC3_CIPHER_WEP128 = 7,
CONNAC3_CIPHER_WAPI = 8,
CONNAC3_CIPHER_CCMP_256 = 10,
CONNAC3_CIPHER_GCMP = 11,
CONNAC3_CIPHER_GCMP_256 = 12,
};
struct mt7925_mcu_scan_chinfo_event {
u8 nr_chan;
u8 alpha2[3];
@ -208,7 +222,7 @@ struct scan_req_tlv {
__le16 channel_dwell_time; /* channel Dwell interval */
__le16 timeout_value;
__le16 probe_delay_time;
u8 func_mask_ext;
__le32 func_mask_ext;
};
struct scan_ssid_tlv {
@ -334,7 +348,8 @@ struct bss_req_hdr {
struct bss_rate_tlv {
__le16 tag;
__le16 len;
u8 __rsv1[4];
u8 __rsv1[2];
__le16 basic_rate;
__le16 bc_trans;
__le16 mc_trans;
u8 short_preamble;
@ -382,25 +397,22 @@ struct sta_rec_eht {
u8 _rsv2[3];
} __packed;
struct sec_key_uni {
__le16 wlan_idx;
u8 mgmt_prot;
u8 cipher_id;
u8 cipher_len;
u8 key_id;
u8 key_len;
u8 need_resp;
u8 key[32];
} __packed;
struct sta_rec_sec_uni {
__le16 tag;
__le16 len;
u8 add;
u8 n_cipher;
u8 rsv[2];
struct sec_key_uni key[2];
u8 tx_key;
u8 key_type;
u8 is_authenticator;
u8 peer_addr[6];
u8 bss_idx;
u8 cipher_id;
u8 key_id;
u8 key_len;
u8 wlan_idx;
u8 mgmt_prot;
u8 key[32];
u8 key_rsc[16];
} __packed;
struct sta_rec_hdr_trans {
@ -428,6 +440,22 @@ struct sta_rec_mld {
} __packed link[2];
} __packed;
struct bss_ifs_time_tlv {
__le16 tag;
__le16 len;
u8 slot_valid;
u8 sifs_valid;
u8 rifs_valid;
u8 eifs_valid;
__le16 slot_time;
__le16 sifs_time;
__le16 rifs_time;
__le16 eifs_time;
u8 eifs_cck_valid;
u8 rsv;
__le16 eifs_cck_time;
} __packed;
#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_bf) + \
@ -440,7 +468,7 @@ struct sta_rec_mld {
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_phy) + \
sizeof(struct sta_rec_ra) + \
sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_sec_uni) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_eht) + \
@ -455,6 +483,7 @@ struct sta_rec_mld {
sizeof(struct bss_mld_tlv) + \
sizeof(struct bss_info_uni_he) + \
sizeof(struct bss_info_uni_bss_color) + \
sizeof(struct bss_ifs_time_tlv) + \
sizeof(struct tlv))
#define MT_CONNAC3_SKU_POWER_LIMIT 449
@ -509,6 +538,33 @@ struct mt7925_wow_pattern_tlv {
u8 rsv[4];
} __packed;
static inline enum connac3_mcu_cipher_type
mt7925_mcu_get_cipher(int cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return CONNAC3_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return CONNAC3_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return CONNAC3_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_AES_CMAC:
return CONNAC3_CIPHER_BIP_CMAC_128;
case WLAN_CIPHER_SUITE_CCMP:
return CONNAC3_CIPHER_AES_CCMP;
case WLAN_CIPHER_SUITE_CCMP_256:
return CONNAC3_CIPHER_CCMP_256;
case WLAN_CIPHER_SUITE_GCMP:
return CONNAC3_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
return CONNAC3_CIPHER_GCMP_256;
case WLAN_CIPHER_SUITE_SMS4:
return CONNAC3_CIPHER_WAPI;
default:
return CONNAC3_CIPHER_NONE;
}
}
int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
@ -525,6 +581,8 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
int enable);
int mt7925_mcu_set_timing(struct mt792x_phy *phy,
struct ieee80211_vif *vif);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);

View file

@ -271,6 +271,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
struct ieee80211_chanctx_conf *ctx);
int mt7925_mcu_get_temperature(struct mt792x_phy *phy);
int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,

View file

@ -386,6 +386,8 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
mt76_rmw_field(dev, MT_HW_EMI_CTL, MT_HW_EMI_CTL_SLPPROT_EN, 1);
ret = mt792x_wfsys_reset(dev);
if (ret)
goto err_free_dev;
@ -425,6 +427,7 @@ static void mt7925_pci_remove(struct pci_dev *pdev)
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
mt7925e_unregister_device(dev);
set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);

View file

@ -186,6 +186,8 @@ struct mt792x_dev {
bool hw_init_done:1;
bool fw_assert:1;
bool has_eht:1;
bool regd_in_progress:1;
wait_queue_head_t wait;
struct work_struct init_work;

View file

@ -66,13 +66,15 @@ mt792x_acpi_read(struct mt792x_dev *dev, u8 *method, u8 **tbl, u32 *len)
}
/* MTCL : Country List Table for 6G band */
static void
static int
mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
{
if (mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL) < 0)
*version = 1;
else
*version = 2;
int ret;
*version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0)
? 1 : 2;
return ret;
}
/* MTDS : Dynamic SAR Power Table */
@ -166,16 +168,16 @@ int mt792x_init_acpi_sar(struct mt792x_dev *dev)
if (!asar)
return -ENOMEM;
mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
ret = mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
if (ret) {
devm_kfree(dev->mt76.dev, asar->countrylist);
asar->countrylist = NULL;
}
/* MTDS is mandatory. Return error if table is invalid */
ret = mt792x_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
if (ret) {
devm_kfree(dev->mt76.dev, asar->dyn);
devm_kfree(dev->mt76.dev, asar->countrylist);
devm_kfree(dev->mt76.dev, asar);
return ret;
asar->dyn = NULL;
}
/* MTGS is optional */
@ -290,7 +292,7 @@ int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default)
const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
int i;
if (!phy->acpisar)
if (!phy->acpisar || !((struct mt792x_acpi_sar *)phy->acpisar)->dyn)
return 0;
/* When ACPI SAR enabled in HW, we should apply rules for .frp
@ -353,11 +355,15 @@ static u8
mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
{
u8 config = 0;
u8 mode_6g, mode_5g9;
if (cl->cl6g[row] & BIT(column))
config |= (cl->mode_6g & 0x3) << 2;
mode_6g = (cl->mode_6g > 0x02) ? 0 : cl->mode_6g;
mode_5g9 = (cl->mode_5g9 > 0x01) ? 0 : cl->mode_5g9;
if ((cl->cl6g[row] & BIT(column)) || cl->mode_6g == 0x02)
config |= (mode_6g & 0x3) << 2;
if (cl->version > 1 && cl->cl5g9[row] & BIT(column))
config |= (cl->mode_5g9 & 0x3);
config |= (mode_5g9 & 0x3);
return config;
}
@ -374,7 +380,7 @@ u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
"AT", "BE", "BG", "CY", "CZ", "HR", "DK", "EE",
"FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT",
"LV", "LI", "LT", "LU", "MT", "NL", "NO", "PL",
"PT", "RO", "MT", "SK", "SI", "ES", "CH",
"PT", "RO", "SK", "SI", "ES", "SE", "CH",
};
struct mt792x_acpi_sar *sar = phy->acpisar;
struct mt792x_asar_cl *cl;

View file

@ -354,6 +354,7 @@ static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_bw_40",
"v_tx_bw_80",
"v_tx_bw_160",
"v_tx_bw_320",
"v_tx_mcs_0",
"v_tx_mcs_1",
"v_tx_mcs_2",

View file

@ -12,6 +12,8 @@ irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
{
struct mt792x_dev *dev = dev_instance;
if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
return IRQ_NONE;
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@ -123,14 +125,13 @@ static void mt792x_dma_prefetch(struct mt792x_dev *dev)
int mt792x_dma_enable(struct mt792x_dev *dev)
{
if (is_mt7925(&dev->mt76))
mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
/* configure perfetch settings */
mt792x_dma_prefetch(dev);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
if (is_mt7925(&dev->mt76))
mt76_wr(dev, MT_WFDMA0_RST_DRX_PTR, ~0);
/* configure delay interrupt */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
@ -140,12 +141,20 @@ int mt792x_dma_enable(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
FIELD_PREP(MT_WFDMA0_GLO_CFG_DMA_SIZE, 3) |
MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK |
MT_WFDMA0_GLO_CFG_RX_WB_DDONE |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
if (is_mt7925(&dev->mt76)) {
mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
mt76_set(dev, MT_WFDMA0_INT_RX_PRI, 0x0F00);
mt76_set(dev, MT_WFDMA0_INT_TX_PRI, 0x7F00);
}
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
/* enable interrupts for TX/RX rings */

View file

@ -292,9 +292,12 @@
#define MT_WFDMA0_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
#define MT_WFDMA0_GLO_CFG_DMA_SIZE GENMASK(5, 4)
#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
#define MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK BIT(11)
#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
#define MT_WFDMA0_GLO_CFG_RX_WB_DDONE BIT(13)
#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
@ -322,6 +325,8 @@
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_RST_DRX_PTR MT_WFDMA0(0x280)
#define MT_WFDMA0_INT_RX_PRI MT_WFDMA0(0x298)
#define MT_WFDMA0_INT_TX_PRI MT_WFDMA0(0x29c)
#define MT_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
#define MT_WFDMA0_CSR_TX_DMASHDL_ENABLE BIT(6)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
@ -389,6 +394,9 @@
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
#define MT_HW_EMI_CTL 0x18011100
#define MT_HW_EMI_CTL_SLPPROT_EN BIT(1)
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)

View file

@ -121,44 +121,25 @@ static void mt792xu_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val)
static void mt792xu_dma_prefetch(struct mt792x_dev *dev)
{
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
MT_WPDMA0_MAX_CNT_MASK, 4);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
MT_WPDMA0_BASE_PTR_MASK, 0x80);
#define DMA_PREFETCH_CONF(_idx_, _cnt_, _base_) \
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL((_idx_)), \
MT_WPDMA0_MAX_CNT_MASK | MT_WPDMA0_BASE_PTR_MASK, \
FIELD_PREP(MT_WPDMA0_MAX_CNT_MASK, (_cnt_)) | \
FIELD_PREP(MT_WPDMA0_BASE_PTR_MASK, (_base_)))
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
MT_WPDMA0_MAX_CNT_MASK, 4);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
MT_WPDMA0_BASE_PTR_MASK, 0xc0);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
MT_WPDMA0_MAX_CNT_MASK, 4);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
MT_WPDMA0_BASE_PTR_MASK, 0x100);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
MT_WPDMA0_MAX_CNT_MASK, 4);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
MT_WPDMA0_BASE_PTR_MASK, 0x140);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
MT_WPDMA0_MAX_CNT_MASK, 4);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
MT_WPDMA0_BASE_PTR_MASK, 0x180);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
MT_WPDMA0_MAX_CNT_MASK, 4);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
MT_WPDMA0_BASE_PTR_MASK, 0x280);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
MT_WPDMA0_MAX_CNT_MASK, 4);
mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
MT_WPDMA0_BASE_PTR_MASK, 0x2c0);
DMA_PREFETCH_CONF(0, 4, 0x080);
DMA_PREFETCH_CONF(1, 4, 0x0c0);
DMA_PREFETCH_CONF(2, 4, 0x100);
DMA_PREFETCH_CONF(3, 4, 0x140);
DMA_PREFETCH_CONF(4, 4, 0x180);
DMA_PREFETCH_CONF(16, 4, 0x280);
DMA_PREFETCH_CONF(17, 4, 0x2c0);
}
static void mt792xu_wfdma_init(struct mt792x_dev *dev)
{
int i;
mt792xu_dma_prefetch(dev);
mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO);
@ -169,10 +150,27 @@ static void mt792xu_wfdma_init(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN);
/* disable dmashdl */
mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0,
MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
mt76_rmw(dev, MT_DMASHDL_REFILL, MT_DMASHDL_REFILL_MASK, 0xffe00000);
mt76_clear(dev, MT_DMASHDL_PAGE, MT_DMASHDL_GROUP_SEQ_ORDER);
mt76_rmw(dev, MT_DMASHDL_PKT_MAX_SIZE,
MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 0));
for (i = 0; i < 5; i++)
mt76_wr(dev, MT_DMASHDL_GROUP_QUOTA(i),
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0xfff));
for (i = 5; i < 16; i++)
mt76_wr(dev, MT_DMASHDL_GROUP_QUOTA(i),
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x0) |
FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x0));
mt76_wr(dev, MT_DMASHDL_Q_MAP(0), 0x32013201);
mt76_wr(dev, MT_DMASHDL_Q_MAP(1), 0x32013201);
mt76_wr(dev, MT_DMASHDL_Q_MAP(2), 0x55555444);
mt76_wr(dev, MT_DMASHDL_Q_MAP(3), 0x55555444);
mt76_wr(dev, MT_DMASHDL_SCHED_SET(0), 0x76540132);
mt76_wr(dev, MT_DMASHDL_SCHED_SET(1), 0xFEDCBA98);
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}

View file

@ -237,7 +237,8 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
MT_WFDMA0_GLO_CFG_EXT_EN);
if (dev->hif2)
mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
@ -694,7 +695,7 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
mt7996_dma_disable(dev, force);
mt76_dma_wed_reset(&dev->mt76);
mt76_wed_dma_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {

View file

@ -493,7 +493,7 @@ static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev)
void mt7996_mac_init(struct mt7996_dev *dev)
{
#define HIF_TXD_V2_1 4
#define HIF_TXD_V2_1 0x21
int i;
mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
@ -507,11 +507,6 @@ void mt7996_mac_init(struct mt7996_dev *dev)
mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
}
/* txs report queue */
mt76_rmw_field(dev, MT_DMA_TCRF1(0), MT_DMA_TCRF1_QIDX, 0);
mt76_rmw_field(dev, MT_DMA_TCRF1(1), MT_DMA_TCRF1_QIDX, 6);
mt76_rmw_field(dev, MT_DMA_TCRF1(2), MT_DMA_TCRF1_QIDX, 0);
/* rro module init */
if (is_mt7996(&dev->mt76))
mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
@ -1012,10 +1007,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
if (vif != NL80211_IFTYPE_AP)
if (!(vif == NL80211_IFTYPE_AP || vif == NL80211_IFTYPE_STATION))
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
if (vif == NL80211_IFTYPE_AP)
elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,

View file

@ -732,6 +732,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
txwi[2] |= cpu_to_le32(val);
if (wcid->amsdu)
txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
}
static void
@ -862,8 +865,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD3_PROTECT_FRAME;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
val |= MT_TXD3_NO_ACK;
if (wcid->amsdu)
val |= MT_TXD3_HW_AMSDU;
txwi[3] = cpu_to_le32(val);
txwi[4] = 0;
@ -1188,15 +1189,17 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_tx_info *info;
struct sk_buff_head list;
struct rate_info rate = {};
struct sk_buff *skb;
struct sk_buff *skb = NULL;
bool cck = false;
u32 txrate, txs, mode, stbc;
txs = le32_to_cpu(txs_data[0]);
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
/* only report MPDU TXS */
if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
if (skb) {
info = IEEE80211_SKB_CB(skb);
if (!(txs & MT_TXS0_ACK_ERROR_MASK))
@ -1208,6 +1211,7 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
info->status.rates[0].idx = -1;
}
}
if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
struct ieee80211_sta *sta;
@ -2527,6 +2531,34 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
static bool
mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
struct ieee80211_twt_params *twt_agrt)
{
u16 type = le16_to_cpu(twt_agrt->req_type);
u8 exp;
int i;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
struct mt7996_twt_flow *f;
if (!(msta->twt.flowid_mask & BIT(i)))
continue;
f = &msta->twt.flow[i];
if (f->duration == twt_agrt->min_twt_dur &&
f->mantissa == twt_agrt->mantissa &&
f->exp == exp &&
f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
return true;
}
return false;
}
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@ -2538,8 +2570,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
enum ieee80211_twt_setup_cmd sta_setup_cmd;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_twt_flow *flow;
int flowid, table_id;
u8 exp;
u8 flowid, table_id, exp;
if (mt7996_mac_check_twt_req(twt))
goto out;
@ -2552,8 +2583,18 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
goto unlock;
if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
setup_cmd = TWT_SETUP_CMD_DICTATE;
twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
goto unlock;
}
if (mt7996_mac_twt_param_equal(msta, twt_agrt))
goto unlock;
flowid = ffs(~msta->twt.flowid_mask) - 1;
le16p_replace_bits(&twt_agrt->req_type, flowid,
twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
twt_agrt->req_type |= le16_encode_bits(flowid,
IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
@ -2601,10 +2642,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
(twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt_agrt->req_type |=
le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
}
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,

View file

@ -350,9 +350,12 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (key->keyidx == 6 || key->keyidx == 7)
break;
fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
default:
@ -1499,6 +1502,6 @@ const struct ieee80211_ops mt7996_ops = {
.set_radar_background = mt7996_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7996_net_fill_forward_path,
.net_setup_tc = mt76_net_setup_tc,
.net_setup_tc = mt76_wed_net_setup_tc,
#endif
};

View file

@ -732,13 +732,10 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
static struct tlv *
mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
{
struct tlv *ptlv, tlv = {
.tag = cpu_to_le16(tag),
.len = cpu_to_le16(len),
};
struct tlv *ptlv = skb_put(skb, len);
ptlv = skb_put(skb, len);
memcpy(ptlv, &tlv, sizeof(tlv));
ptlv->tag = cpu_to_le16(tag);
ptlv->len = cpu_to_le16(len);
return ptlv;
}
@ -1240,6 +1237,9 @@ mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
static void
mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct ieee80211_vif *vif = container_of((void *)msta->vif,
struct ieee80211_vif, drv_priv);
struct ieee80211_eht_mcs_nss_supp *mcs_map;
struct ieee80211_eht_cap_elem_fixed *elem;
struct sta_rec_eht *eht;
@ -1259,8 +1259,17 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
if (vif->type != NL80211_IFTYPE_STATION &&
(sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) {
memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz,
sizeof(eht->mcs_map_bw20));
return;
}
memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
memcpy(eht->mcs_map_bw320, &mcs_map->bw._320, sizeof(eht->mcs_map_bw320));
@ -2510,7 +2519,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
len = sizeof(*bcn) + MT_TXD_SIZE + skb->len;
len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
bcn = (struct bss_bcn_content_tlv *)tlv;
bcn->enable = en;
@ -2579,8 +2588,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
info->band = band;
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
len = ALIGN(sizeof(*discov) + MT_TXD_SIZE + skb->len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
discov = (struct bss_inband_discovery_tlv *)tlv;
@ -3539,7 +3547,7 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
skb_pull(skb, 64);
skb_pull(skb, 48);
memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
}

View file

@ -800,10 +800,10 @@ enum {
sizeof(struct sta_rec_hdr_trans) + \
sizeof(struct tlv))
#define MT7996_MAX_BEACON_SIZE 1342
#define MT7996_MAX_BEACON_SIZE 1338
#define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
sizeof(struct bss_bcn_content_tlv) + \
MT_TXD_SIZE + \
4 + MT_TXD_SIZE + \
sizeof(struct bss_bcn_cntdwn_tlv) + \
sizeof(struct bss_bcn_mbss_tlv))
#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \

View file

@ -140,7 +140,6 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
dev->reg_l1_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
MT_HIF_REMAP_L1_MASK,
FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
@ -155,7 +154,6 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
dev->reg_l2_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
MT_HIF_REMAP_L2_MASK,
FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
@ -165,26 +163,10 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
return MT_HIF_REMAP_BASE_L2 + offset;
}
static void mt7996_reg_remap_restore(struct mt7996_dev *dev)
{
/* remap to ori status */
if (unlikely(dev->reg_l1_backup)) {
dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->reg_l1_backup);
dev->reg_l1_backup = 0;
}
if (dev->reg_l2_backup) {
dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->reg_l2_backup);
dev->reg_l2_backup = 0;
}
}
static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
{
int i;
mt7996_reg_remap_restore(dev);
if (addr < 0x100000)
return addr;
@ -201,6 +183,11 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
return dev->reg.map[i].mapped + ofs;
}
return 0;
}
static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr)
{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@ -225,28 +212,60 @@ void mt7996_memcpy_fromio(struct mt7996_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7996_reg_addr(dev, offset);
if (addr) {
memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
return;
}
spin_lock_bh(&dev->reg_lock);
memcpy_fromio(buf, dev->mt76.mmio.regs +
__mt7996_reg_remap_addr(dev, offset), len);
spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
u32 addr = __mt7996_reg_addr(dev, offset), val;
return dev->bus_ops->rr(mdev, __mt7996_reg_addr(dev, offset));
if (addr)
return dev->bus_ops->rr(mdev, addr);
spin_lock_bh(&dev->reg_lock);
val = dev->bus_ops->rr(mdev, __mt7996_reg_remap_addr(dev, offset));
spin_unlock_bh(&dev->reg_lock);
return val;
}
static void mt7996_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
u32 addr = __mt7996_reg_addr(dev, offset);
dev->bus_ops->wr(mdev, __mt7996_reg_addr(dev, offset), val);
if (addr) {
dev->bus_ops->wr(mdev, addr, val);
return;
}
spin_lock_bh(&dev->reg_lock);
dev->bus_ops->wr(mdev, __mt7996_reg_remap_addr(dev, offset), val);
spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
u32 addr = __mt7996_reg_addr(dev, offset);
return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
if (addr)
return dev->bus_ops->rmw(mdev, addr, mask, val);
spin_lock_bh(&dev->reg_lock);
val = dev->bus_ops->rmw(mdev, __mt7996_reg_remap_addr(dev, offset), mask, val);
spin_unlock_bh(&dev->reg_lock);
return val;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
@ -391,13 +410,13 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.amsdu_max_len = 1536;
wed->wlan.init_buf = mt7996_wed_init_buf;
wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
wed->wlan.init_rx_buf = mt76_wed_init_rx_buf;
wed->wlan.release_rx_buf = mt76_wed_release_rx_buf;
wed->wlan.offload_enable = mt76_wed_offload_enable;
wed->wlan.offload_disable = mt76_wed_offload_disable;
if (!hif2) {
wed->wlan.reset = mt7996_mmio_wed_reset;
wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
wed->wlan.reset_complete = mt76_wed_reset_complete;
}
if (mtk_wed_device_attach(wed))
@ -421,6 +440,7 @@ static int mt7996_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7996_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7990:

View file

@ -53,6 +53,7 @@
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
#define MT7996_MIN_TWT_DUR 64
#define MT7996_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 3)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
@ -320,12 +321,11 @@ struct mt7996_dev {
struct rchan *relay_fwlog;
struct {
u8 table_mask;
u16 table_mask;
u8 n_agrt;
} twt;
u32 reg_l1_backup;
u32 reg_l2_backup;
spinlock_t reg_lock;
u8 wtbl_size_group;
};

View file

@ -767,7 +767,7 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
return;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@ -872,9 +872,8 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (err < 0)
return err;
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
q->entry[idx].urb, mt76u_complete_tx,
&q->entry[idx]);
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
mt76u_complete_tx, &q->entry[idx]);
q->head = (q->head + 1) % q->ndesc;
q->entry[idx].skb = tx_info.skb;
@ -906,9 +905,13 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
}
}
static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
static void
mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
{
if (mt76_chip(dev) == 0x7663) {
u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE;
switch (mt76_chip(dev)) {
case 0x7663: {
static const u8 lmac_queue_map[] = {
/* ac to lmac mapping */
[IEEE80211_AC_BK] = 0,
@ -917,33 +920,36 @@ static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
[IEEE80211_AC_VO] = 4,
};
if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
return 1; /* BE */
return lmac_queue_map[ac];
q->hw_idx = lmac_queue_map[ac];
q->ep = q->hw_idx + 1;
break;
}
case 0x7961:
case 0x7925:
q->hw_idx = mt76_ac_to_hwq(ac);
q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
break;
default:
q->hw_idx = mt76_ac_to_hwq(ac);
q->ep = q->hw_idx + 1;
break;
}
return mt76_ac_to_hwq(ac);
}
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
struct mt76_queue *q;
int i, j, err;
int i;
for (i = 0; i <= MT_TXQ_PSD; i++) {
if (i >= IEEE80211_NUM_ACS) {
dev->phy.q_tx[i] = dev->phy.q_tx[0];
continue;
}
struct mt76_queue *q;
int j, err;
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
if (!q)
return -ENOMEM;
spin_lock_init(&q->lock);
q->hw_idx = mt76u_ac_to_hwq(dev, i);
mt76u_ac_to_hwq(dev, q, i);
dev->phy.q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
@ -969,7 +975,7 @@ static void mt76u_free_tx(struct mt76_dev *dev)
mt76_worker_teardown(&dev->usb.status_worker);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
for (i = 0; i <= MT_TXQ_PSD; i++) {
struct mt76_queue *q;
int j;
@ -999,7 +1005,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
dev_err(dev->dev, "timed out waiting for pending tx\n");
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@ -1013,7 +1019,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
/* On device removal we maight queue skb's, but mt76u_tx_kick()
* will fail to submit urb, cleanup those skb's manually.
*/
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;

View file

@ -0,0 +1,213 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include "mt76.h"
#include "dma.h"
void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
int i;
for (i = 0; i < dev->rx_token_size; i++) {
struct mt76_txwi_cache *t;
t = mt76_rx_token_release(dev, i);
if (!t || !t->ptr)
continue;
mt76_put_page_pool_buf(t->ptr, false);
t->ptr = NULL;
mt76_put_rxwi(dev, t);
}
mt76_free_pending_rxwi(dev);
}
EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i, len = SKB_WITH_OVERHEAD(q->buf_size);
struct mt76_txwi_cache *t = NULL;
for (i = 0; i < size; i++) {
enum dma_data_direction dir;
dma_addr_t addr;
u32 offset;
int token;
void *buf;
t = mt76_get_rxwi(dev);
if (!t)
goto unmap;
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
if (!buf)
goto unmap;
addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
dir = page_pool_get_dma_dir(q->page_pool);
dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
desc->buf0 = cpu_to_le32(addr);
token = mt76_rx_token_consume(dev, buf, t, addr);
if (token < 0) {
mt76_put_page_pool_buf(buf, false);
goto unmap;
}
token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
#endif
desc->token |= cpu_to_le32(token);
desc++;
}
return 0;
unmap:
if (t)
mt76_put_rxwi(dev, t);
mt76_wed_release_rx_buf(wed);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(mt76_wed_init_rx_buf);
int mt76_wed_offload_enable(struct mtk_wed_device *wed)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
spin_lock_bh(&dev->token_lock);
dev->token_size = wed->wlan.token_start;
spin_unlock_bh(&dev->token_lock);
return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
}
EXPORT_SYMBOL_GPL(mt76_wed_offload_enable);
int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
{
int ret = 0, type, ring;
u16 flags;
if (!q || !q->ndesc)
return -EINVAL;
flags = q->flags;
if (!q->wed || !mtk_wed_device_active(q->wed))
q->flags &= ~MT_QFLAG_WED;
if (!(q->flags & MT_QFLAG_WED))
return 0;
type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
switch (type) {
case MT76_WED_Q_TX:
ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
reset);
if (!ret)
q->wed_regs = q->wed->tx_ring[ring].reg_base;
break;
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
mt76_dma_queue_reset(dev, q);
mt76_dma_rx_fill(dev, q, false);
ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
if (!ret)
q->wed_regs = q->wed->txfree_ring.reg_base;
break;
case MT76_WED_Q_RX:
ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
reset);
if (!ret)
q->wed_regs = q->wed->rx_ring[ring].reg_base;
break;
case MT76_WED_RRO_Q_DATA:
q->flags &= ~MT_QFLAG_WED;
__mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_MSDU_PG:
q->flags &= ~MT_QFLAG_WED;
__mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_IND:
q->flags &= ~MT_QFLAG_WED;
mt76_dma_queue_reset(dev, q);
mt76_dma_rx_fill(dev, q, false);
mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
break;
default:
ret = -EINVAL;
break;
}
q->flags = flags;
return ret;
}
EXPORT_SYMBOL_GPL(mt76_wed_dma_setup);
#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
void mt76_wed_offload_disable(struct mtk_wed_device *wed)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
spin_lock_bh(&dev->token_lock);
dev->token_size = dev->drv->token_size;
spin_unlock_bh(&dev->token_lock);
}
EXPORT_SYMBOL_GPL(mt76_wed_offload_disable);
void mt76_wed_reset_complete(struct mtk_wed_device *wed)
{
struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
complete(&dev->mmio.wed_reset_complete);
}
EXPORT_SYMBOL_GPL(mt76_wed_reset_complete);
int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct mt76_phy *phy = hw->priv;
struct mtk_wed_device *wed = &phy->dev->mmio.wed;
if (!mtk_wed_device_active(wed))
return -EOPNOTSUPP;
return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
}
EXPORT_SYMBOL_GPL(mt76_wed_net_setup_tc);
void mt76_wed_dma_reset(struct mt76_dev *dev)
{
struct mt76_mmio *mmio = &dev->mmio;
if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
return;
complete(&mmio->wed_reset);
if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
dev_err(dev->dev, "wed reset complete timeout\n");
}
EXPORT_SYMBOL_GPL(mt76_wed_dma_reset);