ath11k: update driver from upstream

This is a set of updates of the ath11k driver based on wireless-testing
(wt-2023-05-11) 711dca0ca3d77414f8f346e564e9c8640147f40d (after v6.4-rc1),
(wt-2023-06-09) 7bd20e011626ccc3ad53e57873452b1716fcfaaa (after v6.4-rc5),
(wt-2023-07-24) 62e409149b62a285e89018e49b2e115757fb9022 (after v6.5-rc3),
(wt-2023-08-06) 2a220a15be657a24868368892e3e2caba2115283 (after v6.5-rc4).

MFC after:	20 days
This commit is contained in:
Bjoern A. Zeeb 2023-05-20 00:36:03 +00:00
parent 07724ba62b
commit 28348caeee
59 changed files with 13537 additions and 3160 deletions

View file

@ -17,13 +17,14 @@ ath11k-y += core.o \
peer.o \
dbring.o \
hw.o \
wow.o
pcic.o
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
ath11k-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -8,10 +9,16 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/dma-mapping.h>
#include <linux/of_address.h>
#include <linux/iommu.h>
#include "ahb.h"
#include "debug.h"
#include "hif.h"
#include "qmi.h"
#include <linux/remoteproc.h>
#include "pcic.h"
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
static const struct of_device_id ath11k_ahb_of_match[] = {
/* TODO: Should we change the compatible string to something similar
@ -23,18 +30,17 @@ static const struct of_device_id ath11k_ahb_of_match[] = {
{ .compatible = "qcom,ipq6018-wifi",
.data = (void *)ATH11K_HW_IPQ6018_HW10,
},
{ .compatible = "qcom,wcn6750-wifi",
.data = (void *)ATH11K_HW_WCN6750_HW10,
},
{ .compatible = "qcom,ipq5018-wifi",
.data = (void *)ATH11K_HW_IPQ5018_HW10,
},
{ }
};
MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
static const struct ath11k_bus_params ath11k_ahb_bus_params = {
.mhi_support = false,
.m3_fw_support = false,
.fixed_bdf_addr = true,
.fixed_mem_region = true,
};
#define ATH11K_IRQ_CE0_OFFSET 4
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
@ -134,6 +140,61 @@ enum ext_irq_num {
tcl2host_status_ring,
};
static int
ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
{
return ab->pci.msi.irqs[vector];
}
static inline u32
ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
{
u32 window_start = 0;
/* If offset lies within DP register range, use 1st window */
if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
window_start = ATH11K_PCI_WINDOW_START;
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
ATH11K_PCI_WINDOW_RANGE_MASK)
window_start = 2 * ATH11K_PCI_WINDOW_START;
return window_start;
}
static void
ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
{
u32 window_start;
/* WCN6750 uses static window based register access*/
window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
iowrite32(value, ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
}
static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
{
u32 window_start;
u32 val;
/* WCN6750 uses static window based register access */
window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
val = ioread32(ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
return val;
}
static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
.wakeup = NULL,
.release = NULL,
.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
.window_write32 = ath11k_ahb_window_write32_wcn6750,
.window_read32 = ath11k_ahb_window_read32_wcn6750,
};
static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
{
return ioread32(ab->mem + offset);
@ -210,30 +271,42 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_attr *ce_attr;
const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
if (ce_attr->dest_nentries) {
ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
CE_HOST_IE_3_ADDRESS);
ie3_reg_addr);
}
}
static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_attr *ce_attr;
const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
if (ce_attr->dest_nentries) {
ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
CE_HOST_IE_3_ADDRESS);
ie3_reg_addr);
}
}
@ -346,31 +419,6 @@ static void ath11k_ahb_power_down(struct ath11k_base *ab)
rproc_shutdown(ab_ahb->tgt_rproc);
}
static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
{
int timeout;
if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
ab->hw_params.cold_boot_calib == 0)
return 0;
ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
(ab->qmi.cal_done == 1),
ATH11K_COLD_BOOT_FW_RESET_DELAY);
if (timeout <= 0) {
ath11k_cold_boot_cal = 0;
ath11k_warn(ab, "Coldboot Calibration failed timed out\n");
}
/* reset the firmware */
ath11k_ahb_power_down(ab);
ath11k_ahb_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n");
return 0;
}
static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
{
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
@ -391,6 +439,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
@ -399,6 +449,9 @@ static void ath11k_ahb_free_irq(struct ath11k_base *ab)
int irq_idx;
int i;
if (ab->hw_params.hybrid_bus_type)
return ath11k_pcic_free_irq(ab);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@ -466,7 +519,7 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
{
struct ath11k_hw_params *hw = &ab->hw_params;
int i, j;
@ -553,6 +606,9 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
int irq, irq_idx, i;
int ret;
if (ab->hw_params.hybrid_bus_type)
return ath11k_pcic_config_irq(ab);
/* Configure CE irqs */
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
@ -574,7 +630,7 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
}
/* Configure external interrupts */
ret = ath11k_ahb_ext_irq_config(ab);
ret = ath11k_ahb_config_ext_irq(ab);
return ret;
}
@ -622,11 +678,90 @@ static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id
return 0;
}
static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
u32 wake_irq;
u32 value = 0;
int ret;
if (!device_may_wakeup(ab->dev))
return -EPERM;
wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
ret = enable_irq_wake(wake_irq);
if (ret) {
ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
return ret;
}
value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
ATH11K_AHB_SMP2P_SMEM_MSG);
ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
if (ret) {
ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
return ret;
}
ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
return ret;
}
static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
u32 wake_irq;
u32 value = 0;
int ret;
if (!device_may_wakeup(ab->dev))
return -EPERM;
wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
ret = disable_irq_wake(wake_irq);
if (ret) {
ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
return ret;
}
reinit_completion(&ab->wow.wakeup_completed);
value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
ATH11K_AHB_SMP2P_SMEM_MSG);
ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
if (ret) {
ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
if (ret == 0) {
ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
return -ETIMEDOUT;
}
ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
return 0;
}
static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
.start = ath11k_ahb_start,
.stop = ath11k_ahb_stop,
.read32 = ath11k_ahb_read32,
.write32 = ath11k_ahb_write32,
.read = NULL,
.irq_enable = ath11k_ahb_ext_irq_enable,
.irq_disable = ath11k_ahb_ext_irq_disable,
.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
@ -634,6 +769,25 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
.power_up = ath11k_ahb_power_up,
};
static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
.start = ath11k_pcic_start,
.stop = ath11k_pcic_stop,
.read32 = ath11k_pcic_read32,
.write32 = ath11k_pcic_write32,
.read = NULL,
.irq_enable = ath11k_pcic_ext_irq_enable,
.irq_disable = ath11k_pcic_ext_irq_disable,
.get_msi_address = ath11k_pcic_get_msi_address,
.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
.power_down = ath11k_ahb_power_down,
.power_up = ath11k_ahb_power_up,
.suspend = ath11k_ahb_hif_suspend,
.resume = ath11k_ahb_hif_resume,
.ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
.ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
};
static int ath11k_core_get_rproc(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@ -656,12 +810,284 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab)
return 0;
}
static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
{
struct platform_device *pdev = ab->pdev;
phys_addr_t msi_addr_pa;
dma_addr_t msi_addr_iova;
struct resource *res;
int int_prop;
int ret;
int i;
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ath11k_err(ab, "failed to fetch msi_addr\n");
return -ENOENT;
}
msi_addr_pa = res->start;
msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
DMA_FROM_DEVICE, 0);
if (dma_mapping_error(ab->dev, msi_addr_iova))
return -ENOMEM;
ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
if (ret)
return ret;
ab->pci.msi.ep_base_data = int_prop + 32;
for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
ret = platform_get_irq(pdev, i);
if (ret < 0)
return ret;
ab->pci.msi.irqs[i] = ret;
}
set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
return 0;
}
static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
if (!ab->hw_params.smp2p_wow_exit)
return 0;
ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
&ab_ahb->smp2p_info.smem_bit);
if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
ath11k_err(ab, "failed to fetch smem state: %ld\n",
PTR_ERR(ab_ahb->smp2p_info.smem_state));
return PTR_ERR(ab_ahb->smp2p_info.smem_state);
}
return 0;
}
static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
if (!ab->hw_params.smp2p_wow_exit)
return;
qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
}
static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
{
struct platform_device *pdev = ab->pdev;
struct resource *mem_res;
void __iomem *mem;
if (ab->hw_params.hybrid_bus_type)
return ath11k_ahb_setup_msi_resources(ab);
mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(mem)) {
dev_err(&pdev->dev, "ioremap error\n");
return PTR_ERR(mem);
}
ab->mem = mem;
ab->mem_len = resource_size(mem_res);
return 0;
}
static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *dev = ab->dev;
struct device_node *node;
struct resource r;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node)
return -ENOENT;
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
ab_ahb->fw.msa_paddr = r.start;
ab_ahb->fw.msa_size = resource_size(&r);
node = of_parse_phandle(dev->of_node, "memory-region", 1);
if (!node)
return -ENOENT;
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve ce fixed region\n");
return ret;
}
ab_ahb->fw.ce_paddr = r.start;
ab_ahb->fw.ce_size = resource_size(&r);
return 0;
}
static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *host_dev = ab->dev;
struct platform_device_info info = {0};
struct iommu_domain *iommu_dom;
struct platform_device *pdev;
struct device_node *node;
int ret;
/* Chipsets not requiring MSA need not initialize
* MSA resources, return success in such cases.
*/
if (!ab->hw_params.fixed_fw_mem)
return 0;
ret = ath11k_ahb_setup_msa_resources(ab);
if (ret) {
ath11k_err(ab, "failed to setup msa resources\n");
return ret;
}
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
if (!node) {
ab_ahb->fw.use_tz = true;
return 0;
}
info.fwnode = &node->fwnode;
info.parent = host_dev;
info.name = node->name;
info.dma_mask = DMA_BIT_MASK(32);
pdev = platform_device_register_full(&info);
if (IS_ERR(pdev)) {
of_node_put(node);
return PTR_ERR(pdev);
}
ret = of_dma_configure(&pdev->dev, node, true);
if (ret) {
ath11k_err(ab, "dma configure fail: %d\n", ret);
goto err_unregister;
}
ab_ahb->fw.dev = &pdev->dev;
iommu_dom = iommu_domain_alloc(&platform_bus_type);
if (!iommu_dom) {
ath11k_err(ab, "failed to allocate iommu domain\n");
ret = -ENOMEM;
goto err_unregister;
}
ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
if (ret) {
ath11k_err(ab, "could not attach device: %d\n", ret);
goto err_iommu_free;
}
ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) {
ath11k_err(ab, "failed to map firmware region: %d\n", ret);
goto err_iommu_detach;
}
ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) {
ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
goto err_iommu_unmap;
}
ab_ahb->fw.use_tz = false;
ab_ahb->fw.iommu_domain = iommu_dom;
of_node_put(node);
return 0;
err_iommu_unmap:
iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
err_iommu_detach:
iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
err_iommu_free:
iommu_domain_free(iommu_dom);
err_unregister:
platform_device_unregister(pdev);
of_node_put(node);
return ret;
}
static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct iommu_domain *iommu;
size_t unmapped_size;
/* Chipsets not requiring MSA would have not initialized
* MSA resources, return success in such cases.
*/
if (!ab->hw_params.fixed_fw_mem)
return 0;
if (ab_ahb->fw.use_tz)
return 0;
iommu = ab_ahb->fw.iommu_domain;
unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
if (unmapped_size != ab_ahb->fw.msa_size)
ath11k_err(ab, "failed to unmap firmware: %zu\n",
unmapped_size);
unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
if (unmapped_size != ab_ahb->fw.ce_size)
ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
unmapped_size);
iommu_detach_device(iommu, ab_ahb->fw.dev);
iommu_domain_free(iommu);
platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
return 0;
}
static int ath11k_ahb_probe(struct platform_device *pdev)
{
struct ath11k_base *ab;
const struct of_device_id *of_id;
struct resource *mem_res;
void __iomem *mem;
const struct ath11k_hif_ops *hif_ops;
const struct ath11k_pci_ops *pci_ops;
enum ath11k_hw_rev hw_rev;
int ret;
of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
@ -670,10 +1096,22 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
return -EINVAL;
}
mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(mem)) {
dev_err(&pdev->dev, "ioremap error\n");
return PTR_ERR(mem);
hw_rev = (enum ath11k_hw_rev)of_id->data;
switch (hw_rev) {
case ATH11K_HW_IPQ8074:
case ATH11K_HW_IPQ6018_HW10:
case ATH11K_HW_IPQ5018_HW10:
hif_ops = &ath11k_ahb_hif_ops_ipq8074;
pci_ops = NULL;
break;
case ATH11K_HW_WCN6750_HW10:
hif_ops = &ath11k_ahb_hif_ops_wcn6750;
pci_ops = &ath11k_ahb_pci_ops_wcn6750;
break;
default:
dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
return -EOPNOTSUPP;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@ -683,28 +1121,60 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
}
ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
ATH11K_BUS_AHB,
&ath11k_ahb_bus_params);
ATH11K_BUS_AHB);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
}
ab->hif.ops = &ath11k_ahb_hif_ops;
ab->hif.ops = hif_ops;
ab->pdev = pdev;
ab->hw_rev = (enum ath11k_hw_rev)of_id->data;
ab->mem = mem;
ab->mem_len = resource_size(mem_res);
ab->hw_rev = hw_rev;
ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
platform_set_drvdata(pdev, ab);
ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
if (ret) {
ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
goto err_core_free;
}
ret = ath11k_core_pre_init(ab);
if (ret)
goto err_core_free;
ret = ath11k_hal_srng_init(ab);
ret = ath11k_ahb_setup_resources(ab);
if (ret)
goto err_core_free;
ab->mem_ce = ab->mem;
if (ab->hw_params.ce_remap) {
const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
/* ce register space is moved out of wcss unlike ipq8074 or ipq6018
* and the space is not contiguous, hence remapping the CE registers
* to a new space for accessing them.
*/
ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
if (!ab->mem_ce) {
dev_err(&pdev->dev, "ce ioremap error\n");
ret = -ENOMEM;
goto err_core_free;
}
}
ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
goto err_core_free;
ret = ath11k_ahb_setup_smp2p_handle(ab);
if (ret)
goto err_fw_deinit;
ret = ath11k_hal_srng_init(ab);
if (ret)
goto err_release_smp2p_handle;
ret = ath11k_ce_alloc_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
@ -731,7 +1201,7 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
goto err_ce_free;
}
ath11k_ahb_fwreset_from_cold_boot(ab);
ath11k_qmi_fwreset_from_cold_boot(ab);
return 0;
@ -741,6 +1211,12 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
err_release_smp2p_handle:
ath11k_ahb_release_smp2p_handle(ab);
err_fw_deinit:
ath11k_ahb_fw_resource_deinit(ab);
err_core_free:
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
@ -748,20 +1224,10 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
return ret;
}
static int ath11k_ahb_remove(struct platform_device *pdev)
static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
{
struct ath11k_base *ab = platform_get_drvdata(pdev);
unsigned long left;
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_ahb_power_down(ab);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
}
reinit_completion(&ab->driver_recovery);
if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
left = wait_for_completion_timeout(&ab->driver_recovery,
ATH11K_AHB_RECOVERY_TIMEOUT);
@ -771,18 +1237,65 @@ static int ath11k_ahb_remove(struct platform_device *pdev)
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->restart_work);
cancel_work_sync(&ab->qmi.event_work);
}
static void ath11k_ahb_free_resources(struct ath11k_base *ab)
{
struct platform_device *pdev = ab->pdev;
ath11k_core_deinit(ab);
qmi_fail:
ath11k_ahb_free_irq(ab);
ath11k_hal_srng_deinit(ab);
ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
if (ab->hw_params.ce_remap)
iounmap(ab->mem_ce);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
}
static int ath11k_ahb_remove(struct platform_device *pdev)
{
struct ath11k_base *ab = platform_get_drvdata(pdev);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath11k_ahb_power_down(ab);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
}
ath11k_ahb_remove_prepare(ab);
ath11k_core_deinit(ab);
qmi_fail:
ath11k_ahb_free_resources(ab);
return 0;
}
static void ath11k_ahb_shutdown(struct platform_device *pdev)
{
struct ath11k_base *ab = platform_get_drvdata(pdev);
/* platform shutdown() & remove() are mutually exclusive.
* remove() is invoked during rmmod & shutdown() during
* system reboot/shutdown.
*/
ath11k_ahb_remove_prepare(ab);
if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
goto free_resources;
ath11k_core_deinit(ab);
free_resources:
ath11k_ahb_free_resources(ab);
}
static struct platform_driver ath11k_ahb_driver = {
.driver = {
.name = "ath11k",
@ -790,6 +1303,7 @@ static struct platform_driver ath11k_ahb_driver = {
},
.probe = ath11k_ahb_probe,
.remove = ath11k_ahb_remove,
.shutdown = ath11k_ahb_shutdown,
};
static int ath11k_ahb_init(void)

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_AHB_H
#define ATH11K_AHB_H
@ -8,10 +9,34 @@
#include "core.h"
#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
#define ATH11K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
#define ATH11K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
#define ATH11K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
enum ath11k_ahb_smp2p_msg_id {
ATH11K_AHB_POWER_SAVE_ENTER = 1,
ATH11K_AHB_POWER_SAVE_EXIT,
};
struct ath11k_base;
struct ath11k_ahb {
struct rproc *tgt_rproc;
struct {
struct device *dev;
struct iommu_domain *iommu_domain;
dma_addr_t msa_paddr;
u32 msa_size;
dma_addr_t ce_paddr;
u32 ce_size;
bool use_tz;
} fw;
struct {
unsigned short seq_no;
unsigned int smem_bit;
struct qcom_smem_state *smem_state;
} smp2p_info;
};
static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
@ -249,7 +250,7 @@ const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
static bool ath11k_ce_need_shadow_fix(int ce_id)
{
/* only ce4 needs shadow workaroud*/
/* only ce4 needs shadow workaround */
if (ce_id == 4)
return true;
return false;
@ -441,7 +442,7 @@ static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
}
while ((skb = __skb_dequeue(&list))) {
ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}
@ -519,7 +520,7 @@ static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
}
while ((skb = __skb_dequeue(&list))) {
ath11k_dbg(ab, ATH11K_DBG_AHB, "tx ce pipe %d len %d\n",
ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->send_cb(ab, skb);
}
@ -918,9 +919,6 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab)
int i;
int ret;
ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
&ab->qmi.ce_cfg.shadow_reg_v2_len);
for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
@ -1044,7 +1042,7 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
ret = ath11k_ce_alloc_pipe(ab, i);
if (ret) {
/* Free any parial successful allocation */
/* Free any partial successful allocation */
ath11k_ce_free_pipes(ab);
return ret;
}

View file

@ -49,6 +49,11 @@ void ath11k_ce_byte_swap(void *mem, u32 len);
#define CE_HOST_IE_2_ADDRESS 0x00A18040
#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
/* CE IE registers are different for IPQ5018 */
#define CE_HOST_IPQ5018_IE_ADDRESS 0x0841804C
#define CE_HOST_IPQ5018_IE_2_ADDRESS 0x08418050
#define CE_HOST_IPQ5018_IE_3_ADDRESS CE_HOST_IPQ5018_IE_ADDRESS
#define CE_HOST_IE_3_SHIFT 0xC
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
@ -84,6 +89,17 @@ struct ce_pipe_config {
__le32 reserved;
};
struct ce_ie_addr {
u32 ie1_reg_addr;
u32 ie2_reg_addr;
u32 ie3_reg_addr;
};
struct ce_remap {
u32 base;
u32 size;
};
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
@ -145,7 +161,7 @@ struct ath11k_ce_ring {
u32 hal_ring_id;
/* keep last */
struct sk_buff *skb[0];
struct sk_buff *skb[];
};
struct ath11k_ce_pipe {

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@ -10,6 +11,10 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/rhashtable.h>
#include <linux/average.h>
#if defined(__FreeBSD__)
#include <linux/wait.h>
#endif
@ -26,6 +31,7 @@
#include "thermal.h"
#include "dbring.h"
#include "spectral.h"
#include "wow.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@ -39,9 +45,27 @@
#define ATH11K_INVALID_HW_MAC_ID 0xFF
#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ)
/* SMBIOS type containing Board Data File Name Extension */
#define ATH11K_SMBIOS_BDF_EXT_TYPE 0xF8
/* SMBIOS type structure length (excluding strings-set) */
#define ATH11K_SMBIOS_BDF_EXT_LENGTH 0x9
/* The magic used by QCA spec */
#define ATH11K_SMBIOS_BDF_EXT_MAGIC "BDF_"
extern unsigned int ath11k_frame_mode;
extern bool ath11k_ftm_mode;
#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
#define ATH11K_MON_TIMER_INTERVAL 10
#define ATH11K_RESET_TIMEOUT_HZ (20 * HZ)
#define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3
#define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5
#define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ)
#define ATH11K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH11K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
enum ath11k_supported_bw {
ATH11K_BW_20 = 0,
@ -139,6 +163,8 @@ enum ath11k_hw_rev {
ATH11K_HW_QCN9074_HW10,
ATH11K_HW_WCN6855_HW20,
ATH11K_HW_WCN6855_HW21,
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
};
enum ath11k_firmware_mode {
@ -168,6 +194,39 @@ struct ath11k_ext_irq_grp {
struct net_device napi_ndev;
};
enum ath11k_smbios_cc_type {
/* disable country code setting from SMBIOS */
ATH11K_SMBIOS_CC_DISABLE = 0,
/* set country code by ANSI country name, based on ISO3166-1 alpha2 */
ATH11K_SMBIOS_CC_ISO = 1,
/* worldwide regdomain */
ATH11K_SMBIOS_CC_WW = 2,
};
struct ath11k_smbios_bdf {
struct dmi_header hdr;
u8 features_disabled;
/* enum ath11k_smbios_cc_type */
u8 country_code_flag;
/* To set specific country, you need to set country code
* flag=ATH11K_SMBIOS_CC_ISO first, then if country is United
* States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
* 0x53). To set country to INDONESIA, then country code value =
* 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
* ATH11K_SMBIOS_CC_WW, then you can use worldwide regulatory
* setting.
*/
u16 cc_code;
u8 bdf_enabled;
u8 bdf_ext[];
} __packed;
#define HEHANDLE_CAP_PHYINFO_SIZE 3
#define HECAP_PHYINFO_SIZE 9
#define HECAP_MACINFO_SIZE 5
@ -194,6 +253,13 @@ struct ath11k_he {
#define MAX_RADIOS 3
/* ipq5018 hw param macros */
#define MAX_RADIOS_5018 1
#define CE_CNT_5018 6
#define TARGET_CE_CNT_5018 9
#define SVC_CE_MAP_LEN_5018 17
#define RXDMA_PER_PDEV_5018 1
enum {
WMI_HOST_TP_SCALE_MAX = 0,
WMI_HOST_TP_SCALE_50 = 1,
@ -210,6 +276,12 @@ enum ath11k_scan_state {
ATH11K_SCAN_ABORTING,
};
enum ath11k_11d_state {
ATH11K_11D_IDLE,
ATH11K_11D_PREPARING,
ATH11K_11D_RUNNING,
};
enum ath11k_dev_flags {
ATH11K_CAC_RUNNING,
ATH11K_FLAG_CORE_REGISTERED,
@ -225,6 +297,9 @@ enum ath11k_dev_flags {
ATH11K_FLAG_CE_IRQ_ENABLED,
ATH11K_FLAG_EXT_IRQ_ENABLED,
ATH11K_FLAG_FIXED_MEM_RGN,
ATH11K_FLAG_DEVICE_INIT_DONE,
ATH11K_FLAG_MULTI_MSI_VECTORS,
ATH11K_FLAG_FTM_SEGMENTED,
};
enum ath11k_monitor_flags {
@ -233,6 +308,30 @@ enum ath11k_monitor_flags {
ATH11K_FLAG_MONITOR_VDEV_CREATED,
};
#define ATH11K_IPV6_UC_TYPE 0
#define ATH11K_IPV6_AC_TYPE 1
#define ATH11K_IPV6_MAX_COUNT 16
#define ATH11K_IPV4_MAX_COUNT 2
struct ath11k_arp_ns_offload {
u8 ipv4_addr[ATH11K_IPV4_MAX_COUNT][4];
u32 ipv4_count;
u32 ipv6_count;
u8 ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
u8 self_ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
u8 ipv6_type[ATH11K_IPV6_MAX_COUNT];
bool ipv6_valid[ATH11K_IPV6_MAX_COUNT];
u8 mac_addr[ETH_ALEN];
};
struct ath11k_rekey_data {
u8 kck[NL80211_KCK_LEN];
u8 kek[NL80211_KCK_LEN];
u64 replay_ctr;
bool enable_offload;
};
struct ath11k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@ -270,6 +369,7 @@ struct ath11k_vif {
bool is_started;
bool is_up;
bool ftm_responder;
bool spectral_enabled;
bool ps;
u32 aid;
@ -284,6 +384,12 @@ struct ath11k_vif {
bool bcca_zero_sent;
bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_twt;
#endif /* CONFIG_ATH11K_DEBUGFS */
};
struct ath11k_vif_iter {
@ -391,6 +497,8 @@ struct ath11k_per_ppdu_tx_stats {
u32 retry_bytes;
};
DECLARE_EWMA(avg_rssi, 10, 8)
struct ath11k_sta {
struct ath11k_vif *arvif;
@ -409,6 +517,7 @@ struct ath11k_sta {
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
struct ewma_avg_rssi avg_rssi;
s8 rssi_beacon;
s8 chain_signal[IEEE80211_MAX_CHAINS];
struct ath11k_htt_tx_stats *tx_stats;
@ -421,13 +530,22 @@ struct ath11k_sta {
bool use_4addr_set;
u16 tcl_metadata;
/* Protected with ar->data_lock */
enum ath11k_wmi_peer_ps_state peer_ps_state;
u64 ps_start_time;
u64 ps_start_jiffies;
u64 ps_total_duration;
bool peer_current_ps_valid;
u32 bw_prev;
};
#define ATH11K_MIN_5G_FREQ 4150
#define ATH11K_MIN_6G_FREQ 5925
#define ATH11K_MAX_6G_FREQ 7115
#define ATH11K_NUM_CHANS 101
#define ATH11K_MAX_5G_CHAN 173
#define ATH11K_NUM_CHANS 102
#define ATH11K_MAX_5G_CHAN 177
enum ath11k_state {
ATH11K_STATE_OFF,
@ -435,6 +553,7 @@ enum ath11k_state {
ATH11K_STATE_RESTARTING,
ATH11K_STATE_RESTARTED,
ATH11K_STATE_WEDGED,
ATH11K_STATE_FTM,
/* Add other states as required */
};
@ -462,19 +581,21 @@ struct ath11k_dbg_htt_stats {
spinlock_t lock;
};
#define MAX_MODULE_ID_BITMAP_WORDS 16
struct ath11k_debug {
struct dentry *debugfs_pdev;
struct ath11k_dbg_htt_stats htt_stats;
u32 extd_tx_stats;
struct ath11k_fw_stats fw_stats;
struct completion fw_stats_complete;
bool fw_stats_done;
u32 extd_rx_stats;
u32 pktlog_filter;
u32 pktlog_mode;
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
u32 rx_filter;
u32 mem_offset;
u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
};
struct ath11k_per_peer_tx_stats {
@ -500,8 +621,6 @@ struct ath11k {
struct ath11k_pdev_wmi *wmi;
struct ath11k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
u32 ht_cap_info;
u32 vht_cap_info;
struct ath11k_he ar_he;
enum ath11k_state state;
bool supports_6ghz;
@ -603,6 +722,9 @@ struct ath11k {
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
struct ath11k_wow wow;
struct completion target_suspend;
bool target_suspend_ack;
struct ath11k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth;
@ -611,6 +733,8 @@ struct ath11k {
u32 last_ppdu_id;
u32 cached_ppdu_id;
int monitor_vdev_id;
struct completion fw_mode_reset;
u8 ftm_msgref;
#ifdef CONFIG_ATH11K_DEBUGFS
struct ath11k_debug debug;
#endif
@ -620,11 +744,20 @@ struct ath11k {
bool dfs_block_radar_events;
struct ath11k_thermal thermal;
u32 vdev_id_11d_scan;
struct completion finish_11d_scan;
struct completion finish_11d_ch_list;
bool pending_11d;
struct completion completed_11d_scan;
enum ath11k_11d_state state_11d;
bool regdom_set_by_user;
int hw_rate_code;
u8 twt_enabled;
bool nlo_enabled;
u8 alpha2[REG_ALPHA2_LEN + 1];
struct ath11k_fw_stats fw_stats;
struct completion fw_stats_complete;
bool fw_stats_done;
/* protected by conf_mutex */
bool ps_state_enable;
bool ps_timekeeper_enable;
};
struct ath11k_band_cap {
@ -666,12 +799,12 @@ struct ath11k_board_data {
size_t len;
};
struct ath11k_bus_params {
bool mhi_support;
bool m3_fw_support;
bool fixed_bdf_addr;
bool fixed_mem_region;
bool static_window_map;
struct ath11k_pci_ops {
int (*wakeup)(struct ath11k_base *ab);
void (*release)(struct ath11k_base *ab);
int (*get_msi_irq)(struct ath11k_base *ab, unsigned int vector);
void (*window_write32)(struct ath11k_base *ab, u32 offset, u32 value);
u32 (*window_read32)(struct ath11k_base *ab, u32 offset);
};
/* IPQ8074 HW channel counters frequency value in hertz */
@ -715,9 +848,23 @@ struct ath11k_soc_dp_stats {
struct ath11k_dp_ring_bp_stats bp_stats;
};
struct ath11k_msi_user {
char *name;
int num_vectors;
u32 base_vector;
};
struct ath11k_msi_config {
int total_vectors;
int total_users;
struct ath11k_msi_user *users;
u16 hw_rev;
};
/* Master structure to hold the hw data which may be used in core module */
struct ath11k_base {
enum ath11k_hw_rev hw_rev;
enum ath11k_firmware_mode fw_mode;
struct platform_device *pdev;
struct device *dev;
struct ath11k_qmi qmi;
@ -732,6 +879,7 @@ struct ath11k_base {
struct ath11k_dp dp;
void __iomem *mem;
void __iomem *mem_ce;
unsigned long mem_len;
struct {
@ -759,6 +907,18 @@ struct ath11k_base {
struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
/* To synchronize rhash tbl write operation */
struct mutex tbl_mtx_lock;
/* The rhashtable containing struct ath11k_peer keyed by mac addr */
struct rhashtable *rhead_peer_addr;
struct rhashtable_params rhash_peer_addr_param;
/* The rhashtable containing struct ath11k_peer keyed by id */
struct rhashtable *rhead_peer_id;
struct rhashtable_params rhash_peer_id_param;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
u8 mac_addr[ETH_ALEN];
@ -772,13 +932,12 @@ struct ath11k_base {
int bd_api;
struct ath11k_hw_params hw_params;
struct ath11k_bus_params bus_params;
const struct firmware *cal_file;
/* Below regd's are protected by ab->data_lock */
/* This is the regd set for every radio
* by the firmware during initializatin
* by the firmware during initialization
*/
struct ieee80211_regdomain *default_regd[MAX_RADIOS];
/* This regd is set during dynamic country setting
@ -790,7 +949,6 @@ struct ath11k_base {
enum ath11k_dfs_region dfs_region;
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_soc;
struct dentry *debugfs_ath11k;
#endif
struct ath11k_soc_dp_stats soc_stats;
@ -800,6 +958,18 @@ struct ath11k_base {
struct work_struct restart_work;
struct work_struct update_11d_work;
u8 new_alpha2[3];
struct workqueue_struct *workqueue_aux;
struct work_struct reset_work;
atomic_t reset_count;
atomic_t recovery_count;
atomic_t recovery_start_count;
bool is_reset;
struct completion reset_complete;
struct completion reconfigure_complete;
struct completion recovery_start;
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
struct {
/* protected by data_lock */
u32 fw_crash_counter;
@ -808,10 +978,6 @@ struct ath11k_base {
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
struct work_struct rfkill_work;
/* true means radio is on */
bool rfkill_radio_on;
/* To synchronize 11d scan vdev id */
struct mutex vdev_id_11d_lock;
@ -827,8 +993,28 @@ struct ath11k_base {
u32 subsystem_device;
} id;
struct {
struct {
const struct ath11k_msi_config *config;
u32 ep_base_data;
u32 irqs[32];
u32 addr_lo;
u32 addr_hi;
} msi;
const struct ath11k_pci_ops *ops;
} pci;
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 data_pos;
u32 expected_seq;
u8 *eventdata;
} testmode;
#endif
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
u8 drv_priv[] __aligned(sizeof(void *));
};
struct ath11k_fw_stats_pdev {
@ -983,6 +1169,12 @@ struct ath11k_fw_stats_bcn {
u32 tx_bcn_outage_cnt;
};
void ath11k_fw_stats_init(struct ath11k *ar);
void ath11k_fw_stats_pdevs_free(struct list_head *head);
void ath11k_fw_stats_vdevs_free(struct list_head *head);
void ath11k_fw_stats_bcn_free(struct list_head *head);
void ath11k_fw_stats_free(struct ath11k_fw_stats *stats);
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[];
@ -990,6 +1182,9 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[];
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[];
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
@ -997,8 +1192,7 @@ int ath11k_core_pre_init(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
void ath11k_core_deinit(struct ath11k_base *ath11k);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
enum ath11k_bus bus,
const struct ath11k_bus_params *bus_params);
enum ath11k_bus bus);
void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
@ -1008,10 +1202,12 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
const char *name);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
const char *filename);

View file

@ -26,18 +26,19 @@ int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
void *buffer, u32 size)
{
u32 *temp;
int idx;
/* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
* and the variable size is expected to be the number of u32 values
* to be stored, not the number of bytes.
*/
size = size / sizeof(u32);
size = size >> 2;
for (idx = 0, temp = buffer; idx < size; idx++, temp++)
*temp++ = ATH11K_DB_MAGIC_VALUE;
memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
}
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
struct ath11k_dbring_element *buff)
struct ath11k_dbring_element *buff,
enum wmi_direct_buffer_module id)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@ -84,6 +85,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
ath11k_hal_srng_access_end(ab, srng);
return 0;
@ -101,7 +103,8 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
struct ath11k_dbring *ring)
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
@ -129,7 +132,7 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
kfree(buff);
break;
}
ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
@ -210,7 +213,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
ret = ath11k_dbring_fill_bufs(ar, ring);
ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
return ret;
}
@ -270,7 +273,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
struct ath11k_buffer_addr desc;
u8 *vaddr_unalign;
u32 num_entry, num_buff_reaped;
u8 pdev_idx, rbm;
u8 pdev_idx, rbm, module_id;
u32 cookie;
int buf_id;
int size;
@ -278,6 +281,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
int ret = 0;
pdev_idx = ev->fixed.pdev_id;
module_id = ev->fixed.module_id;
if (pdev_idx >= ab->num_radios) {
ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
@ -346,6 +350,9 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
DMA_FROM_DEVICE);
ath11k_debugfs_add_dbring_entry(ar, module_id,
ATH11K_DBG_DBR_EVENT_RX, srng);
if (ring->handler) {
vaddr_unalign = buff->payload;
handler_data.data = PTR_ALIGN(vaddr_unalign,
@ -357,7 +364,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
buff->paddr = 0;
memset(buff->payload, 0, size);
ath11k_dbring_bufs_replenish(ar, ring, buff);
ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
}
spin_unlock_bh(&srng->lock);

View file

@ -98,12 +98,12 @@ void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
if (ath11k_debug_mask & mask)
#if defined(__linux__)
dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
dev_printk(KERN_DEBUG, ab->dev, "%s %pV", ath11k_dbg_str(mask), &vaf);
#elif defined(__FreeBSD__)
{
char *str;
vasprintf(&str, M_KMALLOC, fmt, args);
dev_printk(KERN_DEBUG, ab->dev, "%s", str);
dev_printk(KERN_DEBUG, ab->dev, "%s %s", ath11k_dbg_str(mask), str);
free(str, M_KMALLOC);
}
#endif

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_DEBUG_H_
@ -21,13 +22,57 @@ enum ath11k_debug_mask {
ATH11K_DBG_MGMT = 0x00000100,
ATH11K_DBG_REG = 0x00000200,
ATH11K_DBG_TESTMODE = 0x00000400,
ATH11k_DBG_HAL = 0x00000800,
ATH11K_DBG_HAL = 0x00000800,
ATH11K_DBG_PCI = 0x00001000,
ATH11K_DBG_DP_TX = 0x00001000,
ATH11K_DBG_DP_RX = 0x00002000,
ATH11K_DBG_ANY = 0xffffffff,
ATH11K_DBG_DP_TX = 0x00002000,
ATH11K_DBG_DP_RX = 0x00004000,
ATH11K_DBG_CE = 0x00008000,
};
static inline const char *ath11k_dbg_str(enum ath11k_debug_mask mask)
{
switch (mask) {
case ATH11K_DBG_AHB:
return "ahb";
case ATH11K_DBG_WMI:
return "wmi";
case ATH11K_DBG_HTC:
return "htc";
case ATH11K_DBG_DP_HTT:
return "dp_htt";
case ATH11K_DBG_MAC:
return "mac";
case ATH11K_DBG_BOOT:
return "boot";
case ATH11K_DBG_QMI:
return "qmi";
case ATH11K_DBG_DATA:
return "data";
case ATH11K_DBG_MGMT:
return "mgmt";
case ATH11K_DBG_REG:
return "reg";
case ATH11K_DBG_TESTMODE:
return "testmode";
case ATH11K_DBG_HAL:
return "hal";
case ATH11K_DBG_PCI:
return "pci";
case ATH11K_DBG_DP_TX:
return "dp_tx";
case ATH11K_DBG_DP_RX:
return "dp_rx";
case ATH11K_DBG_CE:
return "ce";
/* no default handler to allow compiler to check that the
* enum is fully handled
*/
}
return "<?>";
}
__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);

File diff suppressed because it is too large Load diff

View file

@ -47,6 +47,36 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_NUM_EXT_STATS,
};
#define ATH11K_DEBUG_DBR_ENTRIES_MAX 512
enum ath11k_dbg_dbr_event {
ATH11K_DBG_DBR_EVENT_INVALID,
ATH11K_DBG_DBR_EVENT_RX,
ATH11K_DBG_DBR_EVENT_REPLENISH,
ATH11K_DBG_DBR_EVENT_MAX,
};
struct ath11k_dbg_dbr_entry {
u32 hp;
u32 tp;
u64 timestamp;
enum ath11k_dbg_dbr_event event;
};
struct ath11k_dbg_dbr_data {
/* protects ath11k_db_ring_debug data */
spinlock_t lock;
struct ath11k_dbg_dbr_entry *entries;
u32 dbr_debug_idx;
u32 num_ring_debug_entries;
};
struct ath11k_debug_dbr {
struct ath11k_dbg_dbr_data dbr_dbg_data;
struct dentry *dbr_debugfs;
bool dbr_debug_enabled;
};
struct debug_htt_stats_req {
bool done;
u8 pdev_id;
@ -88,6 +118,7 @@ enum ath11k_pktlog_mode {
};
enum ath11k_pktlog_enum {
ATH11K_PKTLOG_TYPE_INVALID = 0,
ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
ATH11K_PKTLOG_TYPE_TX_STAT = 2,
ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
@ -107,6 +138,130 @@ enum ath11k_dbg_aggr_mode {
ATH11K_DBG_AGGR_MODE_MAX,
};
enum fw_dbglog_wlan_module_id {
WLAN_MODULE_ID_MIN = 0,
WLAN_MODULE_INF = WLAN_MODULE_ID_MIN,
WLAN_MODULE_WMI,
WLAN_MODULE_STA_PWRSAVE,
WLAN_MODULE_WHAL,
WLAN_MODULE_COEX,
WLAN_MODULE_ROAM,
WLAN_MODULE_RESMGR_CHAN_MANAGER,
WLAN_MODULE_RESMGR,
WLAN_MODULE_VDEV_MGR,
WLAN_MODULE_SCAN,
WLAN_MODULE_RATECTRL,
WLAN_MODULE_AP_PWRSAVE,
WLAN_MODULE_BLOCKACK,
WLAN_MODULE_MGMT_TXRX,
WLAN_MODULE_DATA_TXRX,
WLAN_MODULE_HTT,
WLAN_MODULE_HOST,
WLAN_MODULE_BEACON,
WLAN_MODULE_OFFLOAD,
WLAN_MODULE_WAL,
WLAN_WAL_MODULE_DE,
WLAN_MODULE_PCIELP,
WLAN_MODULE_RTT,
WLAN_MODULE_RESOURCE,
WLAN_MODULE_DCS,
WLAN_MODULE_CACHEMGR,
WLAN_MODULE_ANI,
WLAN_MODULE_P2P,
WLAN_MODULE_CSA,
WLAN_MODULE_NLO,
WLAN_MODULE_CHATTER,
WLAN_MODULE_WOW,
WLAN_MODULE_WAL_VDEV,
WLAN_MODULE_WAL_PDEV,
WLAN_MODULE_TEST,
WLAN_MODULE_STA_SMPS,
WLAN_MODULE_SWBMISS,
WLAN_MODULE_WMMAC,
WLAN_MODULE_TDLS,
WLAN_MODULE_HB,
WLAN_MODULE_TXBF,
WLAN_MODULE_BATCH_SCAN,
WLAN_MODULE_THERMAL_MGR,
WLAN_MODULE_PHYERR_DFS,
WLAN_MODULE_RMC,
WLAN_MODULE_STATS,
WLAN_MODULE_NAN,
WLAN_MODULE_IBSS_PWRSAVE,
WLAN_MODULE_HIF_UART,
WLAN_MODULE_LPI,
WLAN_MODULE_EXTSCAN,
WLAN_MODULE_UNIT_TEST,
WLAN_MODULE_MLME,
WLAN_MODULE_SUPPL,
WLAN_MODULE_ERE,
WLAN_MODULE_OCB,
WLAN_MODULE_RSSI_MONITOR,
WLAN_MODULE_WPM,
WLAN_MODULE_CSS,
WLAN_MODULE_PPS,
WLAN_MODULE_SCAN_CH_PREDICT,
WLAN_MODULE_MAWC,
WLAN_MODULE_CMC_QMIC,
WLAN_MODULE_EGAP,
WLAN_MODULE_NAN20,
WLAN_MODULE_QBOOST,
WLAN_MODULE_P2P_LISTEN_OFFLOAD,
WLAN_MODULE_HALPHY,
WLAN_WAL_MODULE_ENQ,
WLAN_MODULE_GNSS,
WLAN_MODULE_WAL_MEM,
WLAN_MODULE_SCHED_ALGO,
WLAN_MODULE_TX,
WLAN_MODULE_RX,
WLAN_MODULE_WLM,
WLAN_MODULE_RU_ALLOCATOR,
WLAN_MODULE_11K_OFFLOAD,
WLAN_MODULE_STA_TWT,
WLAN_MODULE_AP_TWT,
WLAN_MODULE_UL_OFDMA,
WLAN_MODULE_HPCS_PULSE,
WLAN_MODULE_DTF,
WLAN_MODULE_QUIET_IE,
WLAN_MODULE_SHMEM_MGR,
WLAN_MODULE_CFIR,
WLAN_MODULE_CODE_COVER,
WLAN_MODULE_SHO,
WLAN_MODULE_MLO_MGR,
WLAN_MODULE_PEER_INIT,
WLAN_MODULE_STA_MLO_PS,
WLAN_MODULE_ID_MAX,
WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
};
enum fw_dbglog_log_level {
ATH11K_FW_DBGLOG_ML = 0,
ATH11K_FW_DBGLOG_VERBOSE = 0,
ATH11K_FW_DBGLOG_INFO,
ATH11K_FW_DBGLOG_INFO_LVL_1,
ATH11K_FW_DBGLOG_INFO_LVL_2,
ATH11K_FW_DBGLOG_WARN,
ATH11K_FW_DBGLOG_ERR,
ATH11K_FW_DBGLOG_LVL_MAX
};
struct ath11k_fw_dbglog {
enum wmi_debug_log_param param;
union {
struct {
/* log_level values are given in enum fw_dbglog_log_level */
u16 log_level;
/* module_id values are given in enum fw_dbglog_wlan_module_id */
u16 module_id;
};
/* value is either log_level&module_id/vdev_id/vdev_id_bitmap/log_level
* according to param
*/
u32 value;
};
};
#ifdef CONFIG_ATH11K_DEBUGFS
int ath11k_debugfs_soc_create(struct ath11k_base *ab);
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
@ -114,7 +269,7 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab);
void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab);
int ath11k_debugfs_register(struct ath11k *ar);
void ath11k_debugfs_unregister(struct ath11k *ar);
void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
@ -151,6 +306,13 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
struct hal_srng *srng);
#else
static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
{
@ -179,8 +341,8 @@ static inline void ath11k_debugfs_unregister(struct ath11k *ar)
{
}
static inline void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab,
struct sk_buff *skb)
static inline void ath11k_debugfs_fw_stats_process(struct ath11k *ar,
struct ath11k_fw_stats *stats)
{
}
@ -224,6 +386,21 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
#endif /* CONFIG_MAC80211_DEBUGFS*/
static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
}
static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
}
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
struct hal_srng *srng)
{
}
#endif /* CONFIG_ATH11K_DEBUGFS*/
#endif /* _ATH11K_DEBUGFS_H_ */

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@ -1403,6 +1404,8 @@ htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
htt_stats_buf->ax_mu_mimo_brpoll_7);
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
htt_stats_buf->ax_basic_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger = %u\n",
htt_stats_buf->ax_ulmumimo_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
htt_stats_buf->ax_bsr_trigger);
len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
@ -1485,6 +1488,8 @@ htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
htt_stats_buf->ax_mu_mimo_brp7_err);
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
htt_stats_buf->ax_basic_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger_err = %u\n",
htt_stats_buf->ax_ulmumimo_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
htt_stats_buf->ax_bsr_trigger_err);
len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
@ -1519,6 +1524,16 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
htt_stats_buf->mu_mimo_ppdu_posted);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
len += scnprintf(buf + len, buf_len - len,
"ac_mu_mimo_sch_posted_per_group_index %u = %u\n",
i, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[i]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
len += scnprintf(buf + len, buf_len - len,
"ax_mu_mimo_sch_posted_per_group_index %u = %u\n",
i, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[i]);
len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
@ -1535,10 +1550,34 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ofdma_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_ofdma_basic_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_ofdma_basic_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_ofdma_bsr_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_ofdma_bsr_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_ofdma_sch_bar_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_ofdma_bar_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_ofdma_brp_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_ofdma_brp_sch_nusers[i]);
}
len += scnprintf(buf + len, buf_len - len, "\n11ax UL MUMIO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) {
len += scnprintf(buf + len, buf_len - len,
"ax_ul_mumimo_basic_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_mumimo_basic_sch_nusers[i]);
len += scnprintf(buf + len, buf_len - len,
"ax_ul_mumimo_brp_sch_nusers_%u = %u\n",
i, htt_stats_buf->ax_ul_mumimo_brp_sch_nusers[i]);
}
if (len >= buf_len)
buf[buf_len - 1] = 0;
@ -2933,6 +2972,21 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
htt_stats_buf->txbf);
len += scnprintf(buf + len, buf_len - len, "\nrx_su_ndpa = %u",
htt_stats_buf->rx_su_ndpa);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_su_txbf_mcs,
"rx_11ax_su_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
"\n");
len += scnprintf(buf + len, buf_len - len, "\nrx_mu_ndpa = %u",
htt_stats_buf->rx_mu_ndpa);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_mu_txbf_mcs,
"rx_11ax_mu_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
"\n");
len += scnprintf(buf + len, buf_len - len, "\nrx_br_poll = %u",
htt_stats_buf->rx_br_poll);
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
"rx_legacy_cck_rate",
HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
@ -2995,6 +3049,38 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
len += scnprintf(buf + len, buf_len - len, "\n");
}
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_nusers,
"rx_ulofdma_non_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_nusers,
"rx_ulofdma_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs,
"rx_11ax_dl_ofdma_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_ru,
"rx_11ax_dl_ofdma_ru", HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_non_data_ppdu,
"rx_ulmumimo_non_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_data_ppdu,
"rx_ulmumimo_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_ok,
"rx_ulmumimo_mpdu_ok", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
"\n");
PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_fail,
"rx_ulmumimo_mpdu_fail", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
"\n");
len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
htt_stats_buf->per_chain_rssi_pkt_type);
@ -3925,6 +4011,114 @@ void htt_print_phy_stats_tlv(const void *tag_buf,
stats_req->buf_len = len;
}
static inline void
htt_print_phy_reset_counters_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
htt_stats_buf->pdev_id);
len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
htt_stats_buf->cf_active_low_fail_cnt);
len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
htt_stats_buf->cf_active_low_pass_cnt);
len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
htt_stats_buf->phy_off_through_vreg_cnt);
len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
htt_stats_buf->force_calibration_cnt);
len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
htt_stats_buf->rf_mode_switch_phy_off_cnt);
stats_req->buf_len = len;
}
static inline void
htt_print_phy_reset_stats_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
if (tag_len < sizeof(*htt_stats_buf))
return;
len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
htt_stats_buf->pdev_id);
len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
htt_stats_buf->chan_mhz);
len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
htt_stats_buf->chan_band_center_freq1);
len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
htt_stats_buf->chan_band_center_freq2);
len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
htt_stats_buf->chan_phy_mode);
len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
htt_stats_buf->chan_flags);
len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
htt_stats_buf->chan_num);
len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
htt_stats_buf->reset_cause);
len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
htt_stats_buf->prev_reset_cause);
len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
htt_stats_buf->phy_warm_reset_src);
len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
htt_stats_buf->rx_gain_tbl_mode);
len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
htt_stats_buf->xbar_val);
len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
htt_stats_buf->force_calibration);
len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
htt_stats_buf->phyrf_mode);
len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
htt_stats_buf->phy_homechan);
len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
htt_stats_buf->phy_tx_ch_mask);
len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
htt_stats_buf->phy_rx_ch_mask);
len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
htt_stats_buf->phybb_ini_mask);
len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
htt_stats_buf->phyrf_ini_mask);
len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
htt_stats_buf->phy_dfs_en_mask);
len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
htt_stats_buf->phy_sscan_en_mask);
len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
htt_stats_buf->phy_synth_sel_mask);
len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
htt_stats_buf->phy_adfs_freq);
len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
htt_stats_buf->cck_fir_settings);
len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
htt_stats_buf->phy_dyn_pri_chan);
len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
htt_stats_buf->cca_thresh);
len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
htt_stats_buf->dyn_cca_status);
len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
htt_stats_buf->rxdesense_thresh_hw);
len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n",
htt_stats_buf->rxdesense_thresh_sw);
stats_req->buf_len = len;
}
static inline
void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf,
struct debug_htt_stats_req *stats_req)
@ -4339,6 +4533,12 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
case HTT_STATS_PHY_STATS_TAG:
htt_print_phy_stats_tlv(tag_buf, stats_req);
break;
case HTT_STATS_PHY_RESET_COUNTERS_TAG:
htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PHY_RESET_STATS_TAG:
htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:
htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req);
break;

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef DEBUG_HTT_STATS_H
@ -110,6 +111,8 @@ enum htt_tlv_tag_t {
HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG = 116,
HTT_STATS_PHY_COUNTERS_TAG = 121,
HTT_STATS_PHY_STATS_TAG = 122,
HTT_STATS_PHY_RESET_COUNTERS_TAG = 123,
HTT_STATS_PHY_RESET_STATS_TAG = 124,
HTT_STATS_MAX_TAG,
};
@ -142,7 +145,8 @@ enum htt_tx_pdev_underrun_enum {
/* Bytes stored in little endian order */
/* Length should be multiple of DWORD */
struct htt_stats_string_tlv {
u32 data[0]; /* Can be variable length */
/* Can be variable length */
DECLARE_FLEX_ARRAY(u32, data);
} __packed;
#define HTT_STATS_MAC_ID GENMASK(7, 0)
@ -204,27 +208,32 @@ struct htt_tx_pdev_stats_cmn_tlv {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_pdev_stats_urrn_tlv_v {
u32 urrn_stats[0]; /* HTT_TX_PDEV_MAX_URRN_STATS */
/* HTT_TX_PDEV_MAX_URRN_STATS */
DECLARE_FLEX_ARRAY(u32, urrn_stats);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_pdev_stats_flush_tlv_v {
u32 flush_errs[0]; /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */
/* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */
DECLARE_FLEX_ARRAY(u32, flush_errs);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_pdev_stats_sifs_tlv_v {
u32 sifs_status[0]; /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */
/* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */
DECLARE_FLEX_ARRAY(u32, sifs_status);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_pdev_stats_phy_err_tlv_v {
u32 phy_errs[0]; /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */
/* HTT_TX_PDEV_MAX_PHY_ERR_STATS */
DECLARE_FLEX_ARRAY(u32, phy_errs);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_pdev_stats_sifs_hist_tlv_v {
u32 sifs_hist_status[0]; /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */
/* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */
DECLARE_FLEX_ARRAY(u32, sifs_hist_status);
};
struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
@ -589,20 +598,20 @@ struct htt_tx_hwq_difs_latency_stats_tlv_v {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_hwq_cmd_result_stats_tlv_v {
/* Histogram of sched cmd result */
u32 cmd_result[0]; /* HTT_TX_HWQ_MAX_CMD_RESULT_STATS */
/* Histogram of sched cmd result, HTT_TX_HWQ_MAX_CMD_RESULT_STATS */
DECLARE_FLEX_ARRAY(u32, cmd_result);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_hwq_cmd_stall_stats_tlv_v {
/* Histogram of various pause conitions */
u32 cmd_stall_status[0]; /* HTT_TX_HWQ_MAX_CMD_STALL_STATS */
/* Histogram of various pause conitions, HTT_TX_HWQ_MAX_CMD_STALL_STATS */
DECLARE_FLEX_ARRAY(u32, cmd_stall_status);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_hwq_fes_result_stats_tlv_v {
/* Histogram of number of user fes result */
u32 fes_result[0]; /* HTT_TX_HWQ_MAX_FES_RESULT_STATS */
/* Histogram of number of user fes result, HTT_TX_HWQ_MAX_FES_RESULT_STATS */
DECLARE_FLEX_ARRAY(u32, fes_result);
};
/* NOTE: Variable length TLV, use length spec to infer array size
@ -629,13 +638,13 @@ struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
* completing the burst, we identify the txop used in the burst and
* incr the corresponding bin.
* Each bin represents 1ms & we have 10 bins in this histogram.
* they are deined in FW using the following macros
* they are defined in FW using the following macros
* #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
* #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
*/
struct htt_tx_hwq_txop_used_cnt_hist_tlv_v {
/* Histogram of txop used cnt */
u32 txop_used_cnt_hist[0]; /* HTT_TX_HWQ_TXOP_USED_CNT_HIST */
/* Histogram of txop used cnt, HTT_TX_HWQ_TXOP_USED_CNT_HIST */
DECLARE_FLEX_ARRAY(u32, txop_used_cnt_hist);
};
/* == TX SELFGEN STATS == */
@ -682,6 +691,7 @@ struct htt_tx_selfgen_ax_stats_tlv {
u32 ax_bsr_trigger;
u32 ax_mu_bar_trigger;
u32 ax_mu_rts_trigger;
u32 ax_ulmumimo_trigger;
};
struct htt_tx_selfgen_ac_err_stats_tlv {
@ -712,12 +722,14 @@ struct htt_tx_selfgen_ax_err_stats_tlv {
u32 ax_bsr_trigger_err;
u32 ax_mu_bar_trigger_err;
u32 ax_mu_rts_trigger_err;
u32 ax_ulmumimo_trigger_err;
};
/* == TX MU STATS == */
#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
#define HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS 8
struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
/* mu-mimo sw sched cmd stats */
@ -734,6 +746,24 @@ struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
u32 ax_ul_ofdma_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
u32 ax_ul_ofdma_bsr_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
u32 ax_ul_ofdma_bar_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
u32 ax_ul_ofdma_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
/* UL MU-MIMO */
/* ax_ul_mumimo_basic_sch_nusers[i] is the number of basic triggers sent
* for (i+1) users
*/
u32 ax_ul_mumimo_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
/* ax_ul_mumimo_brp_sch_nusers[i] is the number of brp triggers sent
* for (i+1) users
*/
u32 ax_ul_mumimo_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
u32 ac_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
u32 ax_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
};
struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
@ -782,17 +812,20 @@ struct htt_tx_pdev_mpdu_stats_tlv {
/* == TX SCHED STATS == */
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_sched_txq_cmd_posted_tlv_v {
u32 sched_cmd_posted[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
/* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
DECLARE_FLEX_ARRAY(u32, sched_cmd_posted);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_sched_txq_cmd_reaped_tlv_v {
u32 sched_cmd_reaped[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
/* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
DECLARE_FLEX_ARRAY(u32, sched_cmd_reaped);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_sched_txq_sched_order_su_tlv_v {
u32 sched_order_su[0]; /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */
/* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */
DECLARE_FLEX_ARRAY(u32, sched_order_su);
};
enum htt_sched_txq_sched_ineligibility_tlv_enum {
@ -820,7 +853,7 @@ enum htt_sched_txq_sched_ineligibility_tlv_enum {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_sched_txq_sched_ineligibility_tlv_v {
/* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */
u32 sched_ineligibility[0];
DECLARE_FLEX_ARRAY(u32, sched_ineligibility);
};
#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
@ -866,18 +899,20 @@ struct htt_stats_tx_sched_cmn_tlv {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_tqm_gen_mpdu_stats_tlv_v {
u32 gen_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */
/* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */
DECLARE_FLEX_ARRAY(u32, gen_mpdu_end_reason);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_tqm_list_mpdu_stats_tlv_v {
u32 list_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */
/* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */
DECLARE_FLEX_ARRAY(u32, list_mpdu_end_reason);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_tx_tqm_list_mpdu_cnt_tlv_v {
u32 list_mpdu_cnt_hist[0];
/* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */
/* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */
DECLARE_FLEX_ARRAY(u32, list_mpdu_cnt_hist);
};
struct htt_tx_tqm_pdev_stats_tlv_v {
@ -1076,7 +1111,7 @@ struct htt_tx_de_compl_stats_tlv {
* ENTRIES_PER_BIN_COUNT)
*/
struct htt_tx_de_fw2wbm_ring_full_hist_tlv {
u32 fw2wbm_ring_full_hist[0];
DECLARE_FLEX_ARRAY(u32, fw2wbm_ring_full_hist);
};
struct htt_tx_de_cmn_stats_tlv {
@ -1129,7 +1164,7 @@ struct htt_ring_if_cmn_tlv {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_sfm_client_user_tlv_v {
/* Number of DWORDS used per user and per client */
u32 dwords_used_by_user_n[0];
DECLARE_FLEX_ARRAY(u32, dwords_used_by_user_n);
};
struct htt_sfm_client_tlv {
@ -1297,6 +1332,8 @@ struct htt_tx_pdev_rate_stats_tlv {
#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
#define HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
#define HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
struct htt_rx_pdev_rate_stats_tlv {
u32 mac_id__word;
@ -1375,6 +1412,21 @@ struct htt_rx_pdev_rate_stats_tlv {
u32 per_chain_rssi_pkt_type;
s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
u32 rx_su_ndpa;
u32 rx_11ax_su_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
u32 rx_mu_ndpa;
u32 rx_11ax_mu_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
u32 rx_br_poll;
u32 rx_11ax_dl_ofdma_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
u32 rx_11ax_dl_ofdma_ru[HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
u32 rx_ulmumimo_non_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
u32 rx_ulmumimo_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
u32 rx_ulmumimo_mpdu_ok[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
u32 rx_ulmumimo_mpdu_fail[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
u32 rx_ulofdma_non_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
u32 rx_ulofdma_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
};
/* == RX PDEV/SOC STATS == */
@ -1397,12 +1449,14 @@ struct htt_rx_soc_fw_stats_tlv {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_rx_soc_fw_refill_ring_empty_tlv_v {
u32 refill_ring_empty_cnt[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
/* HTT_RX_STATS_REFILL_MAX_RING */
DECLARE_FLEX_ARRAY(u32, refill_ring_empty_cnt);
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v {
u32 refill_ring_num_refill[0]; /* HTT_RX_STATS_REFILL_MAX_RING */
/* HTT_RX_STATS_REFILL_MAX_RING */
DECLARE_FLEX_ARRAY(u32, refill_ring_num_refill);
};
/* RXDMA error code from WBM released packets */
@ -1434,7 +1488,7 @@ enum htt_rx_rxdma_error_code_enum {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v {
u32 rxdma_err[0]; /* HTT_RX_RXDMA_MAX_ERR_CODE */
DECLARE_FLEX_ARRAY(u32, rxdma_err); /* HTT_RX_RXDMA_MAX_ERR_CODE */
};
/* REO error code from WBM released packets */
@ -1466,7 +1520,7 @@ enum htt_rx_reo_error_code_enum {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v {
u32 reo_err[0]; /* HTT_RX_REO_MAX_ERR_CODE */
DECLARE_FLEX_ARRAY(u32, reo_err); /* HTT_RX_REO_MAX_ERR_CODE */
};
/* == RX PDEV STATS == */
@ -1583,13 +1637,13 @@ struct htt_rx_pdev_fw_stats_phy_err_tlv {
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v {
/* Num error MPDU for each RxDMA error type */
u32 fw_ring_mpdu_err[0]; /* HTT_RX_STATS_RXDMA_MAX_ERR */
DECLARE_FLEX_ARRAY(u32, fw_ring_mpdu_err); /* HTT_RX_STATS_RXDMA_MAX_ERR */
};
/* NOTE: Variable length TLV, use length spec to infer array size */
struct htt_rx_pdev_fw_mpdu_drop_tlv_v {
/* Num MPDU dropped */
u32 fw_mpdu_drop[0]; /* HTT_RX_STATS_FW_DROP_REASON_MAX */
DECLARE_FLEX_ARRAY(u32, fw_mpdu_drop); /* HTT_RX_STATS_FW_DROP_REASON_MAX */
};
#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1)
@ -1858,7 +1912,7 @@ struct htt_phy_counters_tlv {
u32 phytx_abort_cnt;
/* number of times rx abort initiated by phy */
u32 phyrx_abort_cnt;
/* number of rx defered count initiated by phy */
/* number of rx deferred count initiated by phy */
u32 phyrx_defer_abort_cnt;
/* number of sizing events generated at LSTF */
u32 rx_gain_adj_lstf_event_cnt;
@ -1912,6 +1966,47 @@ struct htt_phy_stats_tlv {
u32 fw_run_time;
};
struct htt_phy_reset_counters_tlv {
u32 pdev_id;
u32 cf_active_low_fail_cnt;
u32 cf_active_low_pass_cnt;
u32 phy_off_through_vreg_cnt;
u32 force_calibration_cnt;
u32 rf_mode_switch_phy_off_cnt;
};
struct htt_phy_reset_stats_tlv {
u32 pdev_id;
u32 chan_mhz;
u32 chan_band_center_freq1;
u32 chan_band_center_freq2;
u32 chan_phy_mode;
u32 chan_flags;
u32 chan_num;
u32 reset_cause;
u32 prev_reset_cause;
u32 phy_warm_reset_src;
u32 rx_gain_tbl_mode;
u32 xbar_val;
u32 force_calibration;
u32 phyrf_mode;
u32 phy_homechan;
u32 phy_tx_ch_mask;
u32 phy_rx_ch_mask;
u32 phybb_ini_mask;
u32 phyrf_ini_mask;
u32 phy_dfs_en_mask;
u32 phy_sscan_en_mask;
u32 phy_synth_sel_mask;
u32 phy_adfs_freq;
u32 cck_fir_settings;
u32 phy_dyn_pri_chan;
u32 cca_thresh;
u32 dyn_cca_status;
u32 rxdesense_thresh_hw;
u32 rxdesense_thresh_sw;
};
struct htt_peer_ctrl_path_txrx_stats_tlv {
/* peer mac address */
u8 peer_mac_addr[ETH_ALEN];

View file

@ -751,6 +751,102 @@ static const struct file_operations fops_htt_peer_stats_reset = {
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
char buf[20];
int len;
spin_lock_bh(&ar->data_lock);
len = scnprintf(buf, sizeof(buf), "%d\n", arsta->peer_ps_state);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_peer_ps_state = {
.open = simple_open,
.read = ath11k_dbg_sta_read_peer_ps_state,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
u64 time_since_station_in_power_save;
char buf[20];
int len;
spin_lock_bh(&ar->data_lock);
if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
arsta->peer_current_ps_valid)
time_since_station_in_power_save = jiffies_to_msecs(jiffies
- arsta->ps_start_jiffies);
else
time_since_station_in_power_save = 0;
len = scnprintf(buf, sizeof(buf), "%llu\n",
time_since_station_in_power_save);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_current_ps_duration = {
.open = simple_open,
.read = ath11k_dbg_sta_read_current_ps_duration,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
char buf[20];
u64 power_save_duration;
int len;
spin_lock_bh(&ar->data_lock);
if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
arsta->peer_current_ps_valid)
power_save_duration = jiffies_to_msecs(jiffies
- arsta->ps_start_jiffies)
+ arsta->ps_total_duration;
else
power_save_duration = arsta->ps_total_duration;
len = scnprintf(buf, sizeof(buf), "%llu\n", power_save_duration);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_total_ps_duration = {
.open = simple_open,
.read = ath11k_dbg_sta_read_total_ps_duration,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@ -778,4 +874,15 @@ void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vi
ar->ab->wmi_ab.svc_map))
debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
&fops_htt_peer_stats_reset);
debugfs_create_file("peer_ps_state", 0400, dir, sta,
&fops_peer_ps_state);
if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
ar->ab->wmi_ab.svc_map)) {
debugfs_create_file("current_ps_duration", 0440, dir, sta,
&fops_current_ps_duration);
debugfs_create_file("total_ps_duration", 0440, dir, sta,
&fops_total_ps_duration);
}
}

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#if defined(__FreeBSD__)
@ -38,6 +39,7 @@ void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
}
ath11k_peer_rx_tid_cleanup(ar, peer);
peer->dp_setup_done = false;
crypto_free_shash(peer->tfm_mmic);
spin_unlock_bh(&ab->base_lock);
}
@ -74,7 +76,8 @@ int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
if (ret) {
ath11k_warn(ab, "failed to setup rx defrag context\n");
return ret;
tid--;
goto peer_clean;
}
/* TODO: Setup other peer specific resource used in data path */
@ -134,13 +137,11 @@ static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
switch (type) {
case HAL_WBM2SW_RELEASE:
if (ring_num < 3) {
grp_mask = &ab->hw_params.ring_mask->tx[0];
} else if (ring_num == 3) {
if (ring_num == DP_RX_RELEASE_RING_NUM) {
grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
return -ENOENT;
grp_mask = &ab->hw_params.ring_mask->tx[0];
}
break;
case HAL_REO_EXCEPTION:
@ -374,6 +375,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
u8 tcl_num, wbm_num;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
@ -399,9 +401,12 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
HAL_TCL_DATA, i, 0,
DP_TCL_DATA_RING_SIZE);
HAL_TCL_DATA, tcl_num, 0,
ab->hw_params.tx_ring_size);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret);
@ -409,7 +414,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, i, 0,
HAL_WBM2SW_RELEASE, wbm_num, 0,
DP_TX_COMP_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
@ -434,7 +439,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
3, 0, DP_RX_RELEASE_RING_SIZE);
DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
goto err;
@ -787,9 +792,10 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
int i, j;
int tot_work_done = 0;
if (ab->hw_params.ring_mask->tx[grp_id]) {
i = __fls(ab->hw_params.ring_mask->tx[grp_id]);
ath11k_dp_tx_completion_handler(ab, i);
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
ab->hw_params.ring_mask->tx[grp_id])
ath11k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params.ring_mask->rx_err[grp_id]) {
@ -976,7 +982,7 @@ static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
{
/* When v2_map_support is true:for STA mode, enable address
* search index, tcl uses ast_hash value in the descriptor.
* When v2_map_support is false: for STA mode, dont' enable
* When v2_map_support is false: for STA mode, don't enable
* address search index.
*/
switch (arvif->vdev_type) {

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@ -115,6 +116,8 @@ struct ath11k_pdev_mon_stats {
u32 dest_mpdu_drop;
u32 dup_mon_linkdesc_cnt;
u32 dup_mon_buf_cnt;
u32 dest_mon_stuck;
u32 dest_mon_not_reaped;
};
struct dp_full_mon_mpdu {
@ -167,6 +170,7 @@ struct ath11k_mon_data {
struct ath11k_pdev_dp {
u32 mac_id;
u32 mon_dest_ring_stuck_cnt;
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
struct dp_rxdma_ring rx_refill_buf_ring;
@ -200,6 +204,7 @@ struct ath11k_pdev_dp {
#define DP_WBM_RELEASE_RING_SIZE 64
#define DP_TCL_DATA_RING_SIZE 512
#define DP_TCL_DATA_RING_SIZE_WCN6750 2048
#define DP_TX_COMP_RING_SIZE 32768
#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
#define DP_TCL_CMD_RING_SIZE 32
@ -209,7 +214,7 @@ struct ath11k_pdev_dp {
#define DP_REO_REINJECT_RING_SIZE 32
#define DP_RX_RELEASE_RING_SIZE 1024
#define DP_REO_EXCEPTION_RING_SIZE 128
#define DP_REO_CMD_RING_SIZE 128
#define DP_REO_CMD_RING_SIZE 256
#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
#define DP_RXDMA_REFILL_RING_SIZE 2048
@ -219,6 +224,8 @@ struct ath11k_pdev_dp {
#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
#define DP_RX_RELEASE_RING_NUM 3
#define DP_RX_BUFFER_SIZE 2048
#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
@ -296,12 +303,16 @@ struct ath11k_dp {
#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
/* HTT tx completion is overlayed in wbm_release_ring */
#define HTT_INVALID_PEER_ID 0xffff
/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO1_ACK_RSSI GENMASK(31, 24)
#define HTT_TX_WBM_COMP_INFO2_SW_PEER_ID GENMASK(15, 0)
#define HTT_TX_WBM_COMP_INFO2_VALID BIT(21)
struct htt_tx_wbm_completion {
u32 info0;
@ -463,7 +474,7 @@ enum htt_srng_ring_id {
* 3'b010: 4 usec
* 3'b011: 8 usec (default)
* 3'b100: 16 usec
* Others: Reserverd
* Others: Reserved
* b'19 - response_required:
* Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
* b'20:31 - reserved: reserved for future use
@ -990,8 +1001,7 @@ struct htt_rx_ring_tlv_filter {
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3)
/**
* Enumeration for full monitor mode destination ring select
/* Enumeration for full monitor mode destination ring select
* 0 - REO destination ring select
* 1 - FW destination ring select
* 2 - SW destination ring select
@ -1170,12 +1180,16 @@ struct ath11k_htt_ppdu_stats_msg {
u32 ppdu_id;
u32 timestamp;
u32 rsvd;
u8 data[0];
u8 data[];
} __packed;
struct htt_tlv {
u32 header;
#if defined(__linux__)
u8 value[];
#elif defined(__FreeBSD__)
u8 value[0];
#endif
} __packed;
#define HTT_TLV_TAG GENMASK(11, 0)
@ -1362,7 +1376,7 @@ struct htt_ppdu_stats_usr_cmn_array {
* tx_ppdu_stats_info is variable length, with length =
* number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
*/
struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
struct htt_tx_ppdu_stats_info tx_ppdu_info[];
} __packed;
struct htt_ppdu_user_stats {
@ -1388,8 +1402,7 @@ struct htt_ppdu_stats_info {
struct list_head list;
};
/**
* @brief target -> host packet log message
/* @brief target -> host packet log message
*
* @details
* The following field definitions describe the format of the packet log
@ -1424,11 +1437,10 @@ struct htt_ppdu_stats_info {
*/
struct htt_pktlog_msg {
u32 hdr;
u8 payload[0];
u8 payload[];
};
/**
* @brief host -> target FW extended statistics retrieve
/* @brief host -> target FW extended statistics retrieve
*
* @details
* The following field definitions describe the format of the HTT host
@ -1563,8 +1575,7 @@ struct htt_ext_stats_cfg_params {
u32 cfg3;
};
/**
* @brief target -> host extended statistics upload
/* @brief target -> host extended statistics upload
*
* @details
* The following field definitions describe the format of the HTT target
@ -1645,7 +1656,7 @@ struct ath11k_htt_extd_stats_msg {
u32 info0;
u64 cookie;
u32 info1;
u8 data[0];
u8 data[];
} __packed;
#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)

View file

@ -389,10 +389,10 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
goto fail_free_skb;
spin_lock_bh(&rx_ring->idr_lock);
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
rx_ring->bufs_max * 3, GFP_ATOMIC);
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
(rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (buf_id < 0)
if (buf_id <= 0)
goto fail_dma_unmap;
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
@ -435,7 +435,6 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
struct dp_rxdma_ring *rx_ring)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct sk_buff *skb;
int buf_id;
@ -453,28 +452,6 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
idr_destroy(&rx_ring->bufs_idr);
spin_unlock_bh(&rx_ring->idr_lock);
/* if rxdma1_enable is false, mon_status_refill_ring
* isn't setup, so don't clean.
*/
if (!ar->ab->hw_params.rxdma1_enable)
return 0;
rx_ring = &dp->rx_mon_status_refill_ring[0];
spin_lock_bh(&rx_ring->idr_lock);
idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
idr_remove(&rx_ring->bufs_idr, buf_id);
/* XXX: Understand where internal driver does this dma_unmap
* of rxdma_buffer.
*/
dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
dev_kfree_skb_any(skb);
}
idr_destroy(&rx_ring->bufs_idr);
spin_unlock_bh(&rx_ring->idr_lock);
return 0;
}
@ -691,13 +668,18 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct dp_reo_cmd *cmd, *tmp;
struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
struct dp_rx_tid *rx_tid;
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
dma_unmap_single(ab->dev, cmd->data.paddr,
cmd->data.size, DMA_BIDIRECTIONAL);
kfree(cmd->data.vaddr);
rx_tid = &cmd->data;
if (rx_tid->vaddr) {
dma_unmap_single(ab->dev, rx_tid->paddr,
rx_tid->size, DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
kfree(cmd);
}
@ -705,9 +687,13 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
dma_unmap_single(ab->dev, cmd_cache->data.paddr,
cmd_cache->data.size, DMA_BIDIRECTIONAL);
kfree(cmd_cache->data.vaddr);
rx_tid = &cmd_cache->data;
if (rx_tid->vaddr) {
dma_unmap_single(ab->dev, rx_tid->paddr,
rx_tid->size, DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
kfree(cmd_cache);
}
spin_unlock_bh(&dp->reo_cmd_lock);
@ -721,10 +707,12 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
if (status != HAL_REO_CMD_SUCCESS)
ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
if (rx_tid->vaddr) {
dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
}
static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
@ -763,6 +751,7 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
}
@ -815,6 +804,7 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
@ -827,6 +817,8 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
if (!rx_tid->active)
return;
rx_tid->active = false;
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
cmd.addr_hi = upper_32_bits(rx_tid->paddr);
@ -835,14 +827,17 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
ath11k_dp_rx_tid_del_func);
if (ret) {
ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
if (ret != -ESHUTDOWN)
ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
}
rx_tid->active = false;
rx_tid->paddr = 0;
rx_tid->size = 0;
}
static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
@ -989,6 +984,7 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
rx_tid->active = false;
@ -1013,7 +1009,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
peer = ath11k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
peer_mac);
spin_unlock_bh(&ab->base_lock);
return -ENOENT;
}
@ -1026,7 +1023,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
ba_win_sz, ssn, true);
spin_unlock_bh(&ab->base_lock);
if (ret) {
ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
peer_mac, tid, ret);
return ret;
}
@ -1034,8 +1032,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
peer_mac, paddr,
tid, 1, ba_win_sz);
if (ret)
ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
tid, ret);
ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
return ret;
}
@ -1068,6 +1066,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
ret = dma_mapping_error(ab->dev, paddr);
if (ret) {
spin_unlock_bh(&ab->base_lock);
ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
peer_mac, tid, ret);
goto err_mem_free;
}
@ -1081,15 +1081,16 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
paddr, tid, 1, ba_win_sz);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
tid, ret);
ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
}
return ret;
err_mem_free:
kfree(vaddr);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
return ret;
}
@ -1558,13 +1559,12 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
{
struct htt_ppdu_stats_info *ppdu_info;
spin_lock_bh(&ar->data_lock);
lockdep_assert_held(&ar->data_lock);
if (!list_empty(&ar->ppdu_stats_info)) {
list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
if (ppdu_info->ppdu_id == ppdu_id) {
spin_unlock_bh(&ar->data_lock);
if (ppdu_info->ppdu_id == ppdu_id)
return ppdu_info;
}
}
if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
@ -1576,16 +1576,13 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
kfree(ppdu_info);
}
}
spin_unlock_bh(&ar->data_lock);
ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
if (!ppdu_info)
return NULL;
spin_lock_bh(&ar->data_lock);
list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
ar->ppdu_stat_list_depth++;
spin_unlock_bh(&ar->data_lock);
return ppdu_info;
}
@ -1609,16 +1606,17 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
ret = -EINVAL;
goto exit;
goto out;
}
if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
spin_lock_bh(&ar->data_lock);
ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
if (!ppdu_info) {
ret = -EINVAL;
goto exit;
goto out_unlock_data;
}
ppdu_info->ppdu_id = ppdu_id;
@ -1627,10 +1625,13 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
(void *)ppdu_info);
if (ret) {
ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
goto exit;
goto out_unlock_data;
}
exit:
out_unlock_data:
spin_unlock_bh(&ar->data_lock);
out:
rcu_read_unlock();
return ret;
@ -1674,7 +1675,7 @@ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
backpressure_time = *data;
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
@ -2452,7 +2453,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
} else if (channel_num >= 36 && channel_num <= 177) {
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
@ -2510,7 +2511,7 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
spin_unlock_bh(&ar->ab->base_lock);
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
"rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@ -2543,7 +2544,7 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
/* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path
* Also, fast_rx expectes the STA to be authorized, hence
* Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path.
*/
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
@ -2697,9 +2698,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
spin_lock_bh(&srng->lock);
try_again:
ath11k_hal_srng_access_begin(ab, srng);
try_again:
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
@ -2709,6 +2710,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
if (unlikely(buf_id == 0))
continue;
ar = ab->pdevs[mac_id].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
spin_lock_bh(&rx_ring->idr_lock);
@ -2810,6 +2814,9 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
if (!rx_stats)
return;
arsta->rssi_comb = ppdu_info->rssi_comb;
ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@ -3070,39 +3077,51 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
spin_lock_bh(&rx_ring->idr_lock);
skb = idr_find(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
if (!skb) {
ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
pmon->buf_state = DP_MON_STATUS_REPLINISH;
goto move_next;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
rxcb = ATH11K_SKB_RXCB(skb);
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
tlv = (struct hal_tlv_hdr *)skb->data;
if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
HAL_RX_STATUS_BUFFER_DONE) {
ath11k_warn(ab, "mon status DONE not set %lx\n",
ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
FIELD_GET(HAL_TLV_HDR_TAG,
tlv->tl));
dev_kfree_skb_any(skb);
tlv->tl), buf_id);
/* If done status is missing, hold onto status
* ring until status is done for this status
* ring buffer.
* Keep HP in mon_status_ring unchanged,
* and break from here.
* Check status for same buffer for next time
*/
pmon->buf_state = DP_MON_STATUS_NO_DMA;
goto move_next;
break;
}
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
if (ab->hw_params.full_monitor_mode) {
ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
if (paddr == pmon->mon_status_paddr)
pmon->buf_state = DP_MON_STATUS_MATCH;
}
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
__skb_queue_tail(skb_list, skb);
} else {
pmon->buf_state = DP_MON_STATUS_REPLINISH;
@ -3158,8 +3177,11 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
int i;
tfm = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(tfm))
if (IS_ERR(tfm)) {
ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
PTR_ERR(tfm));
return PTR_ERR(tfm);
}
spin_lock_bh(&ab->base_lock);
@ -3167,6 +3189,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
if (!peer) {
ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
spin_unlock_bh(&ab->base_lock);
crypto_free_shash(tfm);
return -ENOENT;
}
@ -3178,6 +3201,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
}
peer->tfm_mmic = tfm;
peer->dp_setup_done = true;
spin_unlock_bh(&ab->base_lock);
return 0;
@ -3632,6 +3656,13 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
ret = -ENOENT;
goto out_unlock;
}
if (!peer->dp_setup_done) {
ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
peer->addr, peer_id);
ret = -ENOENT;
goto out_unlock;
}
rx_tid = &peer->rx_tid[tid];
if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
@ -3647,7 +3678,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
goto out_unlock;
}
if (frag_no > __fls(rx_tid->rx_frag_bitmap))
if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
__skb_queue_tail(&rx_tid->rx_frags, msdu);
else
ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
@ -4877,7 +4908,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
u32 wifi_hdr_len;
struct hal_rx_desc *rx_desc;
char *hdr_desc;
u8 *dest, decap_format;
@ -4919,38 +4949,27 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
__le16 qos_field;
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
/* Base size */
wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
if (ieee80211_is_data_qos(wh->frame_control)) {
struct ieee80211_qos_hdr *qwh =
(struct ieee80211_qos_hdr *)hdr_desc;
qos_field = qwh->qos_ctrl;
if (ieee80211_is_data_qos(wh->frame_control))
qos_pkt = 1;
}
msdu = head_msdu;
while (msdu) {
rx_desc = (struct hal_rx_desc *)msdu->data;
hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
ath11k_dp_rx_msdus_set_payload(ar, msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
goto err_merge_fail;
memcpy(dest, hdr_desc, wifi_hdr_len);
memcpy(dest + wifi_hdr_len,
(u8 *)&qos_field, sizeof(__le16));
memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
}
ath11k_dp_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
}
@ -4959,7 +4978,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
goto err_merge_fail;
ath11k_dbg(ab, ATH11K_DBG_DATA,
"mpdu_buf %pK mpdu_buf->len %u",
"mpdu_buf %p mpdu_buf->len %u",
prev_buf, prev_buf->len);
} else {
ath11k_dbg(ab, ATH11K_DBG_DATA,
@ -4974,8 +4993,98 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
return NULL;
}
static void
ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
u8 *rtap_buf)
{
u32 rtap_len = 0;
put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
}
static void
ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
u8 *rtap_buf)
{
u32 rtap_len = 0;
put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
rtap_len += 2;
put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
rtap_len += 2;
rtap_buf[rtap_len] = rx_status->he_RU[0];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[1];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[2];
rtap_len += 1;
rtap_buf[rtap_len] = rx_status->he_RU[3];
}
static void ath11k_update_radiotap(struct ath11k *ar,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *mon_skb,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
u8 *ptr = NULL;
rxs->flag |= RX_FLAG_MACTIME_START;
rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
if (ppduinfo->nss)
rxs->nss = ppduinfo->nss;
if (ppduinfo->he_mu_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
} else if (ppduinfo->he_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
rxs->rate_idx = ppduinfo->rate;
} else if (ppduinfo->vht_flags) {
rxs->encoding = RX_ENC_VHT;
rxs->rate_idx = ppduinfo->rate;
} else if (ppduinfo->ht_flags) {
rxs->encoding = RX_ENC_HT;
rxs->rate_idx = ppduinfo->rate;
} else {
rxs->encoding = RX_ENC_LEGACY;
sband = &ar->mac.sbands[rxs->band];
rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
ppduinfo->cck_flag);
}
rxs->mactime = ppduinfo->tsft;
}
static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct sk_buff *head_msdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *tail_msdu,
struct napi_struct *napi)
{
@ -5011,6 +5120,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
@ -5029,6 +5139,12 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
return -EINVAL;
}
/* The destination ring processing is stuck if the destination is not
* moving while status ring moves 16 PPDU. The destination ring processing
* skips this destination ring PPDU as a workaround.
*/
#define MON_DEST_RING_STUCK_MAX_CNT 16
static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 quota, struct napi_struct *napi)
{
@ -5042,6 +5158,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 ring_id;
struct ath11k_pdev_mon_stats *rx_mon_stats;
u32 npackets = 0;
u32 mpdu_rx_bufs_used;
if (ar->ab->hw_params.rxdma1_enable)
ring_id = dp->rxdma_mon_dst_ring.ring_id;
@ -5052,7 +5169,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
if (!mon_dst_srng) {
ath11k_warn(ar->ab,
"HAL Monitor Destination Ring Init Failed -- %pK",
"HAL Monitor Destination Ring Init Failed -- %p",
mon_dst_srng);
return;
}
@ -5071,20 +5188,44 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
head_msdu = NULL;
tail_msdu = NULL;
rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
&head_msdu,
&tail_msdu,
&npackets, &ppdu_id);
mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
&head_msdu,
&tail_msdu,
&npackets, &ppdu_id);
rx_bufs_used += mpdu_rx_bufs_used;
if (mpdu_rx_bufs_used) {
dp->mon_dest_ring_stuck_cnt = 0;
} else {
dp->mon_dest_ring_stuck_cnt++;
rx_mon_stats->dest_mon_not_reaped++;
}
if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
rx_mon_stats->dest_mon_stuck++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
pmon->mon_ppdu_info.ppdu_id, ppdu_id,
dp->mon_dest_ring_stuck_cnt,
rx_mon_stats->dest_mon_not_reaped,
rx_mon_stats->dest_mon_stuck);
pmon->mon_ppdu_info.ppdu_id = ppdu_id;
continue;
}
if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"dest_rx: new ppdu_id %x != status ppdu_id %x",
ppdu_id, pmon->mon_ppdu_info.ppdu_id);
"dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
ppdu_id, pmon->mon_ppdu_info.ppdu_id,
rx_mon_stats->dest_mon_not_reaped,
rx_mon_stats->dest_mon_stuck);
break;
}
if (head_msdu && tail_msdu) {
ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
&pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
}
@ -5124,7 +5265,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct ath11k_sta *arsta;
int num_buffs_reaped = 0;
u32 rx_buf_sz;
u16 log_type = 0;
u16 log_type;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
@ -5146,11 +5287,16 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
rx_buf_sz = DP_RX_BUFFER_SIZE;
} else {
log_type = ATH11K_PKTLOG_TYPE_INVALID;
rx_buf_sz = 0;
}
if (log_type)
if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
memset(ppdu_info, 0, sizeof(*ppdu_info));
ppdu_info->peer_id = HAL_INVALID_PEERID;
hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
@ -5411,6 +5557,7 @@ static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
&pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@ -101,7 +102,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 pool_id;
u8 hal_ring_id;
int ret;
u8 ring_selector = 0, ring_map = 0;
u32 ring_selector = 0;
u8 ring_map = 0;
bool tcl_ring_retry;
if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
@ -113,19 +115,13 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
/* Let the default ring selection be based on current processor
* number, where one of the 3 tcl rings are selected based on
* the smp_processor_id(). In case that ring
* is full/busy, we resort to other available rings.
* If all rings are full, we drop the packet.
* //TODO Add throttling logic when all rings are full
*/
ring_selector = smp_processor_id();
ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
tcl_ring_sel:
tcl_ring_retry = false;
ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
ring_map |= BIT(ti.ring_id);
@ -137,7 +133,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (unlikely(ret < 0)) {
if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
!ab->hw_params.tcl_ring_retry) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
}
@ -255,7 +252,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
* Restart ring selection if some rings are not checked yet.
*/
if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
ab->hw_params.max_tx_ring > 1) {
ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
tcl_ring_retry = true;
ring_selector++;
}
@ -327,10 +324,12 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
struct dp_tx_ring *tx_ring,
struct ath11k_dp_htt_wbm_tx_status *ts)
{
struct ieee80211_tx_status status = { 0 };
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
struct ath11k *ar;
struct ath11k_peer *peer;
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
@ -352,6 +351,11 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (!skb_cb->vif) {
dev_kfree_skb_any(msdu);
return;
}
memset(&info->status, 0, sizeof(info->status));
if (ts->acked) {
@ -359,13 +363,30 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
info->status.is_valid_ack_signal = true;
info->status.flags |=
IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
}
ieee80211_tx_status(ar->hw, msdu);
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, ts->peer_id);
if (!peer || !peer->sta) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"dp_tx: failed to find the peer with peer_id %d\n",
ts->peer_id);
spin_unlock_bh(&ab->base_lock);
dev_kfree_skb_any(msdu);
return;
}
spin_unlock_bh(&ab->base_lock);
status.sta = peer->sta;
status.info = info;
status.skb = msdu;
ieee80211_tx_status_ext(ar->hw, &status);
}
static void
@ -381,7 +402,11 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
struct ath11k_dp_htt_wbm_tx_status ts = {0};
enum hal_wbm_htt_tx_comp_status wbm_status;
#if defined(__linux__)
status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
#elif defined(__FreeBSD__)
status_desc = (void *)(desc + HTT_TX_WBM_COMP_STATUS_OFFSET);
#endif
wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
status_desc->info0);
@ -393,7 +418,15 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
ts.msdu_id = msdu_id;
ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
status_desc->info1);
if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
status_desc->info2);
else
ts.peer_id = HTT_INVALID_PEER_ID;
ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
@ -438,7 +471,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate, ru_tones;
u8 mcs, rate_idx, ofdma;
u8 mcs, rate_idx = 0, ofdma;
int ret;
spin_lock_bh(&ab->base_lock);
@ -530,9 +563,14 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
struct ieee80211_tx_status status = { 0 };
struct ieee80211_rate_status status_rate = { 0 };
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
struct rate_info rate;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@ -564,7 +602,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
info->status.is_valid_ack_signal = true;
info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
@ -595,12 +633,31 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
}
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
* Might end up reporting it out-of-band from HTT stats.
*/
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, ts->peer_id);
if (!peer || !peer->sta) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"dp_tx: failed to find the peer with peer_id %d\n",
ts->peer_id);
spin_unlock_bh(&ab->base_lock);
dev_kfree_skb_any(msdu);
return;
}
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
status.sta = peer->sta;
status.skb = msdu;
status.info = info;
rate = arsta->last_txrate;
ieee80211_tx_status(ar->hw, msdu);
status_rate.rate_idx = rate;
status_rate.try_count = 1;
status.rates = &status_rate;
status.n_rates = 1;
spin_unlock_bh(&ab->base_lock);
ieee80211_tx_status_ext(ar->hw, &status);
}
static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
@ -742,7 +799,7 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
return 0;
/* Can this be optimized so that we keep the pending command list only
* for tid delete command to free up the resoruce on the command status
* for tid delete command to free up the resource on the command status
* indication?
*/
dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
@ -923,14 +980,10 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
params.low_threshold);
}
ath11k_dbg(ab, ATH11k_DBG_HAL,
"%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
__func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
cmd->msi_data);
ath11k_dbg(ab, ATH11k_DBG_HAL,
"ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
ring_id, ring_type, cmd->intr_info, cmd->info2);
ath11k_dbg(ab, ATH11K_DBG_DP_TX,
"htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n",
cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2);
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)

View file

@ -13,6 +13,7 @@ struct ath11k_dp_htt_wbm_tx_status {
u32 msdu_id;
bool acked;
int ack_rssi;
u16 peer_id;
};
void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts);

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#if defined(__FreeBSD__)
#include <asm/io.h>
@ -128,7 +129,7 @@ static const struct hal_srng_config hw_srng_config_template[] = {
},
{ /* WBM2SW_RELEASE */
.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
.max_rings = 4,
.max_rings = 5,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
@ -1020,8 +1021,8 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
srng->u.src_ring.hp_addr =
(u32 *)((unsigned long)ab->mem + reg_base);
else
ath11k_dbg(ab, ATH11k_DBG_HAL,
"hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
ath11k_dbg(ab, ATH11K_DBG_HAL,
"type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
type, ring_num,
reg_base,
(unsigned long)srng->u.src_ring.hp_addr -
@ -1054,7 +1055,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
(u32 *)((unsigned long)ab->mem + reg_base +
(HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
else
ath11k_dbg(ab, ATH11k_DBG_HAL,
ath11k_dbg(ab, ATH11K_DBG_HAL,
"type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
type, ring_num,
reg_base + (HAL_REO1_RING_TP(ab) -
@ -1094,10 +1095,10 @@ static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
srng = &hal->srng_list[ring_id];
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
(unsigned long)ab->mem);
else
srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
(unsigned long)ab->mem);
}
@ -1129,10 +1130,10 @@ int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
ring_num);
ath11k_dbg(ab, ATH11k_DBG_HAL,
"target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
ath11k_dbg(ab, ATH11K_DBG_HAL,
"update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d",
target_reg,
HAL_SHADOW_REG(shadow_cfg_idx),
HAL_SHADOW_REG(ab, shadow_cfg_idx),
shadow_cfg_idx,
ring_type, ring_num);
@ -1175,8 +1176,8 @@ void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
{
lockdep_assert_held(&srng->lock);
/* check whether the ring is emptry. Update the shadow
* HP only when then ring isn't' empty.
/* check whether the ring is empty. Update the shadow
* HP only when then ring isn't empty.
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
*srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
@ -1205,12 +1206,12 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
s = &hal->srng_config[HAL_REO_REINJECT];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
s = &hal->srng_config[HAL_REO_CMD];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
s = &hal->srng_config[HAL_REO_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
@ -1231,16 +1232,20 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
s = &hal->srng_config[HAL_CE_SRC];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
ATH11K_CE_OFFSET(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s = &hal->srng_config[HAL_CE_DST];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
ATH11K_CE_OFFSET(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
@ -1248,8 +1253,9 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s = &hal->srng_config[HAL_CE_DST_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
HAL_CE_DST_STATUS_RING_BASE_LSB;
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_H
@ -31,12 +32,12 @@ struct ath11k_base;
#define HAL_DSCP_TID_TBL_SIZE 24
/* calculate the register address from bar0 of shadow register x */
#define HAL_SHADOW_BASE_ADDR 0x000008fc
#define HAL_SHADOW_BASE_ADDR(ab) ab->hw_params.regs->hal_shadow_base_addr
#define HAL_SHADOW_NUM_REGS 36
#define HAL_HP_OFFSET_IN_REG_START 1
#define HAL_OFFSET_FROM_HP_TO_TP 4
#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
#define HAL_SHADOW_REG(ab, x) (HAL_SHADOW_BASE_ADDR(ab) + (4 * (x)))
/* WCSS Relative address */
#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
@ -120,7 +121,7 @@ struct ath11k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
#define HAL_REO1_MISC_CTL 0x00000630
#define HAL_REO1_MISC_CTL(ab) ab->hw_params.regs->hal_reo1_misc_ctl
#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
@ -180,16 +181,18 @@ struct ath11k_base;
#define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp
/* REO CMD R0 address */
#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
#define HAL_REO_CMD_RING_BASE_LSB(ab) \
ab->hw_params.regs->hal_reo_cmd_ring_base_lsb
/* REO CMD R2 address */
#define HAL_REO_CMD_HP 0x00003020
#define HAL_REO_CMD_HP(ab) ab->hw_params.regs->hal_reo_cmd_ring_hp
/* SW2REO R0 address */
#define HAL_SW2REO_RING_BASE_LSB 0x000001ec
#define HAL_SW2REO_RING_BASE_LSB(ab) \
ab->hw_params.regs->hal_sw2reo_ring_base_lsb
/* SW2REO R2 address */
#define HAL_SW2REO_RING_HP 0x00003028
#define HAL_SW2REO_RING_HP(ab) ab->hw_params.regs->hal_sw2reo_ring_hp
/* CE ring R0 address */
#define HAL_CE_DST_RING_BASE_LSB 0x00000000
@ -240,7 +243,7 @@ struct ath11k_base;
#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
/* TCL ring feild mask and offset */
/* TCL ring field mask and offset */
#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
@ -265,7 +268,7 @@ struct ath11k_base;
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
/* REO ring feild mask and offset */
/* REO ring field mask and offset */
#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
@ -318,6 +321,10 @@ struct ath11k_base;
#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
/* IPQ5018 ce registers */
#define HAL_IPQ5018_CE_WFSS_REG_BASE 0x08400000
#define HAL_IPQ5018_CE_SIZE 0x200000
/* Add any other errors here and return them in
* ath11k_hal_rx_desc_get_err().
*/
@ -386,6 +393,7 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
HAL_SRNG_RING_ID_UMAC_ID_END = 127,
HAL_SRNG_RING_ID_LMAC1_ID_START,
@ -447,13 +455,13 @@ enum hal_ring_type {
/**
* enum hal_reo_cmd_type: Enum for REO command type
* @CMD_GET_QUEUE_STATS: Get REO queue status/stats
* @CMD_FLUSH_QUEUE: Flush all frames in REO queue
* @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
* @CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
* @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
* @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
* @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
* @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
* earlier with a 'REO_FLUSH_CACHE' command
* @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
* @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
* @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
* @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
*/
enum hal_reo_cmd_type {
HAL_REO_CMD_GET_QUEUE_STATS = 0,
@ -515,6 +523,7 @@ enum hal_srng_dir {
#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
#define HAL_SRNG_FLAGS_CACHED 0x20000000
#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
#define HAL_SRNG_FLAGS_REMAP_CE_RING 0x10000000
#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
@ -632,7 +641,7 @@ struct hal_srng {
} u;
};
/* Interrupt mitigation - Batch threshold in terms of numer of frames */
/* Interrupt mitigation - Batch threshold in terms of number of frames */
#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
@ -675,6 +684,7 @@ enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_SW1_BM,
HAL_RX_BUF_RBM_SW2_BM,
HAL_RX_BUF_RBM_SW3_BM,
HAL_RX_BUF_RBM_SW4_BM,
};
#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
@ -870,8 +880,7 @@ struct hal_reo_status {
} u;
};
/**
* HAL context to be used to access SRNG APIs (currently used by data path
/* HAL context to be used to access SRNG APIs (currently used by data path
* and transport (CE) modules)
*/
struct ath11k_hal {

View file

@ -474,6 +474,7 @@ enum hal_tlv_tag {
#define HAL_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_TLV_HDR_LEN GENMASK(25, 10)
#define HAL_TLV_USR_ID GENMASK(31, 26)
#define HAL_TLV_ALIGN 4
@ -606,7 +607,7 @@ struct rx_msdu_desc {
*
* msdu_continuation
* When set, this MSDU buffer was not able to hold the entire MSDU.
* The next buffer will therefor contain additional information
* The next buffer will therefore contain additional information
* related to this MSDU.
*
* msdu_length
@ -642,7 +643,7 @@ struct rx_msdu_desc {
*
* da_idx_timeout
* Indicates, an unsuccessful MAC destination address search due
* to the expiration of search timer fot this MSDU.
* to the expiration of search timer for this MSDU.
*/
enum hal_reo_dest_ring_buffer_type {
@ -1677,7 +1678,7 @@ struct hal_wbm_release_ring {
* Producer: SW/TQM/RXDMA/REO/SWITCH
* Consumer: WBM/SW/FW
*
* HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
* HTT tx status is overlaid on wbm_release ring on 4-byte words 2, 3, 4 and 5
* for software based completions.
*
* buf_addr_info
@ -2158,7 +2159,7 @@ struct hal_reo_status_hdr {
* commands.
*
* execution_time (in us)
* The amount of time REO took to excecute the command. Note that
* The amount of time REO took to execute the command. Note that
* this time does not include the duration of the command waiting
* in the command ring, before the execution started.
*

View file

@ -442,52 +442,54 @@ void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
ath11k_dbg(ab, ATH11k_DBG_HAL, "Queue stats status:\n");
ath11k_dbg(ab, ATH11k_DBG_HAL, "header: cmd_num %d status %d\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "Queue stats status:\n");
ath11k_dbg(ab, ATH11K_DBG_HAL, "header: cmd_num %d status %d\n",
status->uniform_hdr.cmd_num,
status->uniform_hdr.cmd_status);
ath11k_dbg(ab, ATH11k_DBG_HAL, "ssn %ld cur_idx %ld\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "ssn %ld cur_idx %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
desc->info0),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
desc->info0));
ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
ath11k_dbg(ab, ATH11K_DBG_HAL,
"last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
desc->last_rx_enqueue_timestamp,
desc->last_rx_dequeue_timestamp);
ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
ath11k_dbg(ab, ATH11K_DBG_HAL,
"rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
desc->rx_bitmap[6], desc->rx_bitmap[7]);
ath11k_dbg(ab, ATH11k_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
desc->info1),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
desc->info1));
ath11k_dbg(ab, ATH11k_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
desc->info2),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
desc->info2),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
desc->info2));
ath11k_dbg(ab, ATH11k_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
desc->info3),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
desc->info3));
ath11k_dbg(ab, ATH11k_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
desc->num_mpdu_frames, desc->num_msdu_frames,
desc->total_bytes);
ath11k_dbg(ab, ATH11k_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
desc->info4),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
desc->info4),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
desc->info4));
ath11k_dbg(ab, ATH11k_DBG_HAL, "looping count %ld\n",
ath11k_dbg(ab, ATH11K_DBG_HAL, "looping count %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
desc->info5));
}
@ -755,7 +757,7 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
/* TODO: HW queue descriptors are currently allocated for max BA
* window size for all QOS TIDs so that same descriptor can be used
* later when ADDBA request is recevied. This should be changed to
* later when ADDBA request is received. This should be changed to
* allocate HW queue descriptors based on BA window size being
* negotiated (0 for non BA cases), and reallocate when BA window
* size changes and also send WMI message to FW to change the REO
@ -802,12 +804,81 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
#define HAL_MAX_UL_MU_USERS 37
static inline void
ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
(struct hal_rx_ppdu_end_user_stats *)rx_tlv;
rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]);
}
static inline void
ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
(struct hal_rx_ppdu_end_user_stats *)rx_tlv;
rx_user_status->mpdu_ok_byte_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT,
__le32_to_cpu(ppdu_end_user->rsvd2[6]));
rx_user_status->mpdu_err_byte_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT,
__le32_to_cpu(ppdu_end_user->rsvd2[8]));
}
static inline void
ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *rx_user_status)
{
rx_user_status->ast_index = ppdu_info->ast_index;
rx_user_status->tid = ppdu_info->tid;
rx_user_status->tcp_msdu_count =
ppdu_info->tcp_msdu_count;
rx_user_status->udp_msdu_count =
ppdu_info->udp_msdu_count;
rx_user_status->other_msdu_count =
ppdu_info->other_msdu_count;
rx_user_status->frame_control = ppdu_info->frame_control;
rx_user_status->frame_control_info_valid =
ppdu_info->frame_control_info_valid;
rx_user_status->data_sequence_control_info_valid =
ppdu_info->data_sequence_control_info_valid;
rx_user_status->first_data_seq_ctrl =
ppdu_info->first_data_seq_ctrl;
rx_user_status->preamble_type = ppdu_info->preamble_type;
rx_user_status->ht_flags = ppdu_info->ht_flags;
rx_user_status->vht_flags = ppdu_info->vht_flags;
rx_user_status->he_flags = ppdu_info->he_flags;
rx_user_status->rs_flags = ppdu_info->rs_flags;
rx_user_status->mpdu_cnt_fcs_ok =
ppdu_info->num_mpdu_fcs_ok;
rx_user_status->mpdu_cnt_fcs_err =
ppdu_info->num_mpdu_fcs_err;
ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
}
static u16 ath11k_hal_rx_mpduinfo_get_peerid(struct ath11k_base *ab,
struct hal_rx_mpdu_info *mpdu_info)
{
return ab->hw_params.hw_ops->mpdu_info_get_peerid(mpdu_info);
}
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 tlv_tag, u8 *tlv_data)
u32 tlv_tag, u8 *tlv_data, u32 userid)
{
u32 info0, info1;
u32 info0, info1, value;
u8 he_dcm = 0, he_stbc = 0;
u16 he_gi = 0, he_ltf = 0;
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
@ -828,6 +899,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(eu_stats->info0);
info1 = __le32_to_cpu(eu_stats->info1);
ppdu_info->ast_index =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
__le32_to_cpu(eu_stats->info2));
ppdu_info->tid =
ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
__le32_to_cpu(eu_stats->info6))) - 1;
@ -851,6 +925,44 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->num_mpdu_fcs_err =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
info0);
switch (ppdu_info->preamble_type) {
case HAL_RX_PREAMBLE_11N:
ppdu_info->ht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AC:
ppdu_info->vht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AX:
ppdu_info->he_flags = 1;
break;
default:
break;
}
if (userid < HAL_MAX_UL_MU_USERS) {
struct hal_rx_user_status *rxuser_stats =
&ppdu_info->userstats;
ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
rxuser_stats);
}
ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
__le32_to_cpu(eu_stats->rsvd1[0]);
ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
__le32_to_cpu(eu_stats->rsvd1[1]);
break;
}
case HAL_RX_PPDU_END_USER_STATS_EXT: {
struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
(struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
break;
}
case HAL_PHYRX_HT_SIG: {
@ -917,7 +1029,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info1 = __le32_to_cpu(vht_sig->info1);
ppdu_info->ldpc = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING,
info0);
info1);
ppdu_info->mcs = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_MCS,
info1);
gi_setting = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING,
@ -949,50 +1061,151 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
else
ppdu_info->reception_type =
HAL_RX_RECEPTION_TYPE_MU_MIMO;
ppdu_info->vht_flag_values5 = group_id;
ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
ppdu_info->nss);
ppdu_info->vht_flag_values2 = ppdu_info->bw;
ppdu_info->vht_flag_values4 =
FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
break;
}
case HAL_PHYRX_HE_SIG_A_SU: {
struct hal_rx_he_sig_a_su_info *he_sig_a =
(struct hal_rx_he_sig_a_su_info *)tlv_data;
u32 nsts, cp_ltf, dcm;
ppdu_info->he_flags = 1;
info0 = __le32_to_cpu(he_sig_a->info0);
info1 = __le32_to_cpu(he_sig_a->info1);
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
info0);
ppdu_info->bw =
FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
info0);
ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
ppdu_info->beamformed = info1 &
HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
info0);
nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
switch (cp_ltf) {
if (value == 0)
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
else
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
ppdu_info->he_data2 |=
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
ppdu_info->he_data3 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
ppdu_info->mcs = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
ppdu_info->dcm = he_dcm;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
ppdu_info->is_stbc = he_stbc;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
/* data4 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
ppdu_info->he_data4 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
/* data5 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->bw = value;
ppdu_info->he_data5 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_1_X;
break;
case 1:
ppdu_info->gi = HAL_RX_GI_0_8_US;
break;
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
ppdu_info->gi = HAL_RX_GI_1_6_US;
break;
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
if (dcm && ppdu_info->is_stbc)
ppdu_info->gi = HAL_RX_GI_0_8_US;
else
ppdu_info->gi = HAL_RX_GI_3_2_US;
break;
if (he_dcm && he_stbc) {
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
} else {
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
}
break;
}
ppdu_info->gi = he_gi;
he_gi = (he_gi != 0) ? he_gi - 1 : 0;
ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
(he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
ppdu_info->beamformed = value;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
/* data6 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
value++;
ppdu_info->nss = value;
ppdu_info->he_data6 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
ppdu_info->nss = nsts + 1;
ppdu_info->dcm = dcm;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
@ -1000,29 +1213,142 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
u32 cp_ltf;
info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
ppdu_info->bw =
FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
info0);
cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
info0);
ppdu_info->he_mu_flags = 1;
switch (cp_ltf) {
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
ppdu_info->he_data2 =
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
/*data3*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
ppdu_info->he_data3 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
he_stbc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
/*data4*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
ppdu_info->he_data4 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
/*data5*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->bw = value;
ppdu_info->he_data5 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
break;
case 1:
ppdu_info->gi = HAL_RX_GI_0_8_US;
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
ppdu_info->gi = HAL_RX_GI_1_6_US;
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
ppdu_info->gi = HAL_RX_GI_3_2_US;
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
break;
}
ppdu_info->gi = he_gi;
he_gi = (he_gi != 0) ? he_gi - 1 : 0;
ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
(he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
/*data6*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
info0);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
/* HE-MU Flags */
/* HE-MU-flags1 */
ppdu_info->he_flags1 =
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
ppdu_info->he_flags1 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
ppdu_info->he_flags1 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
value);
/* HE-MU-flags2 */
ppdu_info->he_flags2 =
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
value = value - 1;
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
value);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
@ -1040,7 +1366,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0);
ppdu_info->ru_alloc =
ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->he_RU[0] = ru_tones;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
@ -1050,14 +1376,25 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
info0);
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
ppdu_info->ldpc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
ppdu_info->he_data4 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
info0);
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
break;
}
case HAL_PHYRX_HE_SIG_B2_OFDMA: {
@ -1066,17 +1403,40 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
/* HE-data2 */
ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
he_dcm = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
ppdu_info->ldpc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
/* HE-data4 */
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
ppdu_info->he_data4 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->beamformed =
info0 &
HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
info0);
info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
}
@ -1092,7 +1452,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB,
FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB,
__le32_to_cpu(rssi->info0));
if (db2dbm) {
@ -1105,9 +1465,11 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
break;
}
case HAL_RX_MPDU_START: {
struct hal_rx_mpdu_info *mpdu_info =
(struct hal_rx_mpdu_info *)tlv_data;
u16 peer_id;
peer_id = ab->hw_params.hw_ops->mpdu_info_get_peerid(tlv_data);
peer_id = ath11k_hal_rx_mpduinfo_get_peerid(ab, mpdu_info);
if (peer_id)
ppdu_info->peer_id = peer_id;
break;
@ -1118,6 +1480,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->rx_duration =
FIELD_GET(HAL_RX_PPDU_END_DURATION,
__le32_to_cpu(ppdu_rx_duration->info0));
ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
ppdu_info->tsft = (ppdu_info->tsft << 32) |
__le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
break;
}
case HAL_DUMMY:
@ -1141,12 +1506,14 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
u16 tlv_tag;
u16 tlv_len;
u32 tlv_userid = 0;
u8 *ptr = skb->data;
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
@ -1158,7 +1525,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
tlv_tag, ptr);
tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);

View file

@ -73,6 +73,36 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_BUF_DONE,
};
struct hal_rx_user_status {
u32 mcs:4,
nss:3,
ofdma_info_valid:1,
dl_ofdma_ru_start_index:7,
dl_ofdma_ru_width:7,
dl_ofdma_ru_size:8;
u32 ul_ofdma_user_v0_word0;
u32 ul_ofdma_user_v0_word1;
u32 ast_index;
u32 tid;
u16 tcp_msdu_count;
u16 udp_msdu_count;
u16 other_msdu_count;
u16 frame_control;
u8 frame_control_info_valid;
u8 data_sequence_control_info_valid;
u16 first_data_seq_ctrl;
u32 preamble_type;
u16 ht_flags;
u16 vht_flags;
u16 he_flags;
u8 rs_flags;
u32 mpdu_cnt_fcs_ok;
u32 mpdu_cnt_fcs_err;
u32 mpdu_fcs_ok_bitmap[8];
u32 mpdu_ok_byte_count;
u32 mpdu_err_byte_count;
};
#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
@ -107,6 +137,12 @@ struct hal_rx_mon_ppdu_info {
u8 mcs;
u8 nss;
u8 bw;
u8 vht_flag_values1;
u8 vht_flag_values2;
u8 vht_flag_values3[4];
u8 vht_flag_values4;
u8 vht_flag_values5;
u16 vht_flag_values6;
u8 is_stbc;
u8 gi;
u8 ldpc;
@ -114,10 +150,46 @@ struct hal_rx_mon_ppdu_info {
u8 rssi_comb;
u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
u8 tid;
u16 ht_flags;
u16 vht_flags;
u16 he_flags;
u16 he_mu_flags;
u8 dcm;
u8 ru_alloc;
u8 reception_type;
u64 tsft;
u64 rx_duration;
u16 frame_control;
u32 ast_index;
u8 rs_fcs_err;
u8 rs_flags;
u8 cck_flag;
u8 ofdm_flag;
u8 ulofdma_flag;
u8 frame_control_info_valid;
u16 he_per_user_1;
u16 he_per_user_2;
u8 he_per_user_position;
u8 he_per_user_known;
u16 he_flags1;
u16 he_flags2;
u8 he_RU[4];
u16 he_data1;
u16 he_data2;
u16 he_data3;
u16 he_data4;
u16 he_data5;
u16 he_data6;
u32 ppdu_len;
u32 prev_ppdu_id;
u32 device_id;
u16 first_data_seq_ctrl;
u8 monitor_direct_used;
u8 data_sequence_control_info_valid;
u8 ltf_size;
u8 rxpcu_filter_pass;
char rssi_chain[8][8];
struct hal_rx_user_status userstats;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
@ -150,6 +222,9 @@ struct hal_rx_ppdu_start {
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
struct hal_rx_ppdu_end_user_stats {
__le32 rsvd0[2];
__le32 info0;
@ -164,6 +239,16 @@ struct hal_rx_ppdu_end_user_stats {
__le32 rsvd2[11];
} __packed;
struct hal_rx_ppdu_end_user_stats_ext {
u32 info0;
u32 info1;
u32 info2;
u32 info3;
u32 info4;
u32 info5;
u32 info6;
} __packed;
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
@ -212,25 +297,62 @@ enum hal_rx_vht_sig_a_gi_setting {
HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
};
#define HAL_RX_SU_MU_CODING_LDPC 0x01
#define HE_GI_0_8 0
#define HE_GI_0_4 1
#define HE_GI_1_6 2
#define HE_GI_3_2 3
#define HE_LTF_1_X 0
#define HE_LTF_2_X 1
#define HE_LTF_4_X 2
#define HE_LTF_UNKNOWN 3
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
struct hal_rx_he_sig_a_su_info {
__le32 info0;
__le32 info1;
} __packed;
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG BIT(1)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB GENMASK(3, 1)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB BIT(4)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR GENMASK(10, 5)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE GENMASK(14, 11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB BIT(22)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION BIT(25)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_CODING BIT(7)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA BIT(11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXBF BIT(10)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM BIT(15)
struct hal_rx_he_sig_a_mu_dl_info {
__le32 info0;
@ -243,6 +365,7 @@ struct hal_rx_he_sig_b1_mu_info {
__le32 info0;
} __packed;
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
@ -251,6 +374,7 @@ struct hal_rx_he_sig_b2_mu_info {
__le32 info0;
} __packed;
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
@ -261,7 +385,7 @@ struct hal_rx_he_sig_b2_ofdma_info {
__le32 info0;
} __packed;
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8)
#define HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20 GENMASK(7, 0)
@ -279,11 +403,22 @@ struct hal_rx_phyrx_rssi_legacy_info {
#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
#define HAL_RX_MPDU_INFO_INFO1_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_info {
struct hal_rx_mpdu_info_ipq8074 {
__le32 rsvd0;
__le32 info0;
__le32 rsvd1[21];
__le32 rsvd1[11];
__le32 info1;
__le32 rsvd2[9];
} __packed;
struct hal_rx_mpdu_info_qcn9074 {
__le32 rsvd0[10];
__le32 info0;
__le32 rsvd1[2];
__le32 info1;
__le32 rsvd2[9];
} __packed;
struct hal_rx_mpdu_info_wcn6855 {
@ -292,6 +427,14 @@ struct hal_rx_mpdu_info_wcn6855 {
__le32 rsvd1[14];
} __packed;
struct hal_rx_mpdu_info {
union {
struct hal_rx_mpdu_info_ipq8074 ipq8074;
struct hal_rx_mpdu_info_qcn9074 qcn9074;
struct hal_rx_mpdu_info_wcn6855 wcn6855;
} u;
} __packed;
#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hal_desc.h"
@ -44,8 +45,7 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
tcl_cmd->buf_addr_info.info1 |=
FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR,
(ti->ring_id + HAL_RX_BUF_RBM_SW0_BM)) |
FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
tcl_cmd->info0 =

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_TX_H
@ -35,6 +36,7 @@ struct hal_tx_info {
u8 lmac_id;
u8 dscp_tid_tbl_idx;
bool enable_mesh;
u8 rbm_id;
};
/* TODO: Check if the actual desc macros can be used instead */

View file

@ -11,6 +11,7 @@
struct ath11k_hif_ops {
u32 (*read32)(struct ath11k_base *sc, u32 address);
void (*write32)(struct ath11k_base *sc, u32 address, u32 data);
int (*read)(struct ath11k_base *ab, void *buf, u32 start, u32 end);
void (*irq_enable)(struct ath11k_base *sc);
void (*irq_disable)(struct ath11k_base *sc);
int (*start)(struct ath11k_base *sc);
@ -99,6 +100,15 @@ static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 d
sc->hif.ops->write32(sc, address, data);
}
static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf,
u32 start, u32 end)
{
if (!ab->hif.ops->read)
return -EOPNOTSUPP;
return ab->hif.ops->read(ab, buf, start, end);
}
static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
@ -134,4 +144,5 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
else
*msi_data_idx = ce_id;
}
#endif /* _HIF_H_ */

View file

@ -46,7 +46,6 @@ static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
skb_cb = ATH11K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
ath11k_dbg(ab, ATH11K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
return skb;
}
@ -96,7 +95,7 @@ int ath11k_htc_send(struct ath11k_htc *htc,
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath11k_dbg(ab, ATH11K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d\n",
"ep %d insufficient credits required %d total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
@ -104,7 +103,7 @@ int ath11k_htc_send(struct ath11k_htc *htc,
}
ep->tx_credits -= credits;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
"ep %d credits consumed %d total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
@ -119,6 +118,9 @@ int ath11k_htc_send(struct ath11k_htc *htc,
goto err_credits;
}
ath11k_dbg(ab, ATH11K_DBG_HTC, "tx skb %p eid %d paddr %pad\n",
skb, skb_cb->eid, &skb_cb->paddr);
ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
if (ret)
goto err_unmap;
@ -132,7 +134,7 @@ int ath11k_htc_send(struct ath11k_htc *htc,
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
"ep %d credits reverted %d total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
@ -167,7 +169,7 @@ ath11k_htc_process_credit_report(struct ath11k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath11k_dbg(ab, ATH11K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
ath11k_dbg(ab, ATH11K_DBG_HTC, "ep %d credits got %d total %d\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
@ -239,7 +241,7 @@ static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack)
{
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot suspend complete %d\n", ack);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "suspend complete %d\n", ack);
if (ack)
set_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
@ -258,8 +260,10 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
u8 eid;
eid = ATH11K_SKB_CB(skb)->eid;
if (eid >= ATH11K_HTC_EP_COUNT)
if (eid >= ATH11K_HTC_EP_COUNT) {
dev_kfree_skb_any(skb);
return;
}
ep = &htc->endpoint[eid];
spin_lock_bh(&htc->tx_lock);
@ -272,6 +276,11 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
ep_tx_complete(htc->ab, skb);
}
static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab)
{
ath11k_dbg(ab, ATH11K_DBG_BOOT, "wakeup from suspend is received\n");
}
void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb)
{
@ -280,7 +289,7 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct ath11k_htc_hdr *hdr;
struct ath11k_htc_ep *ep;
u16 payload_len;
u32 trailer_len = 0;
u32 message_id, trailer_len = 0;
size_t min_len;
u8 eid;
bool trailer_present;
@ -315,6 +324,9 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
ATH11K_HTC_FLAG_TRAILER_PRESENT;
ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p trailer_present %d\n",
eid, skb, trailer_present);
if (trailer_present) {
u8 *trailer;
@ -347,7 +359,12 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
if (eid == ATH11K_HTC_EP_0) {
struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data;
switch (FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id)) {
message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p message_id %d\n",
eid, skb, message_id);
switch (message_id) {
case ATH11K_HTC_MSG_READY_ID:
case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
@ -376,6 +393,7 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
ath11k_htc_suspend_complete(ab, false);
break;
case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
ath11k_htc_wakeup_from_suspend(ab);
break;
default:
ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n",
@ -385,8 +403,6 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
goto out;
}
ath11k_dbg(ab, ATH11K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ab, skb);
/* poll tx completion for interrupt disabled CE's */
@ -556,7 +572,7 @@ int ath11k_htc_wait_target(struct ath11k_htc *htc)
htc->target_credit_size = credit_size;
ath11k_dbg(ab, ATH11K_DBG_HTC,
"Target ready! transmit resources: %d size:%d\n",
"target ready total_transmit_credits %d target_credit_size %d\n",
htc->total_transmit_credits, htc->target_credit_size);
if ((htc->total_transmit_credits == 0) ||
@ -607,7 +623,7 @@ int ath11k_htc_connect_service(struct ath11k_htc *htc,
conn_req->service_id);
if (!tx_alloc)
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
"htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
@ -672,7 +688,7 @@ int ath11k_htc_connect_service(struct ath11k_htc *htc,
}
ath11k_dbg(ab, ATH11K_DBG_HTC,
"HTC Service %s connect response: status: 0x%lx, assigned ep: 0x%lx\n",
"service %s connect response status 0x%lx assigned ep 0x%lx\n",
htc_service_name(service_id),
FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
@ -732,14 +748,14 @@ int ath11k_htc_connect_service(struct ath11k_htc *htc,
return status;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
"htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
"htc service '%s' eid %d tx flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
@ -765,7 +781,7 @@ int ath11k_htc_start(struct ath11k_htc *htc)
ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
if (ab->hw_params.credit_flow)
ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n");
ath11k_dbg(ab, ATH11K_DBG_HTC, "using tx credit flow control\n");
else
msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@ -200,6 +201,10 @@ static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
config->flag1 |= WMI_RSRC_CFG_FLAG1_ACK_RSSI;
config->ema_max_vap_cnt = ab->num_radios;
config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
}
static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
@ -770,10 +775,10 @@ static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL);
val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL(ab));
val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL, val);
ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL(ab), val);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
@ -790,29 +795,111 @@ static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
ring_hash_map);
}
static u16 ath11k_hw_ipq8074_mpdu_info_get_peerid(u8 *tlv_data)
static void ath11k_hw_ipq5018_reo_setup(struct ath11k_base *ab)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
/* Each hash entry uses three bits to map to a particular ring. */
u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
HAL_SRNG_RING_ID_REO2SW1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
ring_hash_map);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
ring_hash_map);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
ring_hash_map);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
ring_hash_map);
}
static u16
ath11k_hw_ipq8074_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
{
u16 peer_id = 0;
struct hal_rx_mpdu_info *mpdu_info =
(struct hal_rx_mpdu_info *)tlv_data;
peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
__le32_to_cpu(mpdu_info->info0));
__le32_to_cpu(mpdu_info->u.ipq8074.info0));
return peer_id;
}
static u16 ath11k_hw_wcn6855_mpdu_info_get_peerid(u8 *tlv_data)
static u16
ath11k_hw_qcn9074_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
{
u16 peer_id = 0;
peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
__le32_to_cpu(mpdu_info->u.qcn9074.info0));
return peer_id;
}
static u16
ath11k_hw_wcn6855_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
{
u16 peer_id = 0;
struct hal_rx_mpdu_info_wcn6855 *mpdu_info =
(struct hal_rx_mpdu_info_wcn6855 *)tlv_data;
peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855,
__le32_to_cpu(mpdu_info->info0));
__le32_to_cpu(mpdu_info->u.wcn6855.info0));
return peer_id;
}
static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
__le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
}
static u32 ath11k_hw_ipq8074_get_tcl_ring_selector(struct sk_buff *skb)
{
/* Let the default ring selection be based on current processor
* number, where one of the 3 tcl rings are selected based on
* the smp_processor_id(). In case that ring
* is full/busy, we resort to other available rings.
* If all rings are full, we drop the packet.
*
* TODO: Add throttling logic when all rings are full
*/
return smp_processor_id();
}
static u32 ath11k_hw_wcn6750_get_tcl_ring_selector(struct sk_buff *skb)
{
/* Select the TCL ring based on the flow hash of the SKB instead
* of CPU ID. Since applications pumping the traffic can be scheduled
* on multiple CPUs, there is a chance that packets of the same flow
* could end on different TCL rings, this could sometimes results in
* an out of order arrival of the packets at the receiver.
*/
return skb_get_hash(skb);
}
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@ -850,6 +937,7 @@ const struct ath11k_hw_ops ipq8074_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops ipq6018_ops = {
@ -889,6 +977,7 @@ const struct ath11k_hw_ops ipq6018_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qca6390_ops = {
@ -928,6 +1017,7 @@ const struct ath11k_hw_ops qca6390_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qcn9074_ops = {
@ -964,9 +1054,10 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.mpdu_info_get_peerid = ath11k_hw_qcn9074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops wcn6855_ops = {
@ -983,6 +1074,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
@ -1005,11 +1097,95 @@ const struct ath11k_hw_ops wcn6855_ops = {
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
#define ATH11K_TX_RING_MASK_0 0x1
#define ATH11K_TX_RING_MASK_1 0x2
#define ATH11K_TX_RING_MASK_2 0x4
const struct ath11k_hw_ops wcn6750_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_qca6390,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_wcn6855_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
};
/* IPQ5018 hw ops is similar to QCN9074 except for the dest ring remap */
const struct ath11k_hw_ops ipq5018_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
.mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
.mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
.tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
.rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
.rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
.rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
.rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
.rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
.rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
.rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
.rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
.rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
.rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
.rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
.rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
.rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
.rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
.rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.reo_setup = ath11k_hw_ipq5018_reo_setup,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
.get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
#define ATH11K_TX_RING_MASK_0 BIT(0)
#define ATH11K_TX_RING_MASK_1 BIT(1)
#define ATH11K_TX_RING_MASK_2 BIT(2)
#define ATH11K_TX_RING_MASK_3 BIT(3)
#define ATH11K_TX_RING_MASK_4 BIT(4)
#define ATH11K_RX_RING_MASK_0 0x1
#define ATH11K_RX_RING_MASK_1 0x2
@ -1060,6 +1236,7 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
ATH11K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
0, 0, 0,
ATH11K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
@ -1856,6 +2033,251 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
},
};
const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
.tx = {
ATH11K_TX_RING_MASK_0,
0,
ATH11K_TX_RING_MASK_2,
0,
ATH11K_TX_RING_MASK_4,
},
.rx_mon_status = {
0, 0, 0, 0, 0, 0,
ATH11K_RX_MON_STATUS_RING_MASK_0,
},
.rx = {
0, 0, 0, 0, 0, 0, 0,
ATH11K_RX_RING_MASK_0,
ATH11K_RX_RING_MASK_1,
ATH11K_RX_RING_MASK_2,
ATH11K_RX_RING_MASK_3,
},
.rx_err = {
0, ATH11K_RX_ERR_RING_MASK_0,
},
.rx_wbm_rel = {
0, ATH11K_RX_WBM_REL_RING_MASK_0,
},
.reo_status = {
0, ATH11K_REO_STATUS_RING_MASK_0,
},
.rxdma2host = {
ATH11K_RXDMA2HOST_RING_MASK_0,
ATH11K_RXDMA2HOST_RING_MASK_1,
ATH11K_RXDMA2HOST_RING_MASK_2,
},
.host2rxdma = {
},
};
/* Target firmware's Copy Engine configuration for IPQ5018 */
const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host Pktlog */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(0x2000),
.reserved = __cpu_to_le32(0),
},
/* CE8 target->host used only by IPA */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
};
/* Map from service/endpoint to Copy Engine for IPQ5018.
* This table is derived from the CE TABLE, above.
* It is passed to the Target at startup for use by firmware.
*/
const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[] = {
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(3),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(2),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(0),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(0),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
.pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
.pipenum = __cpu_to_le32(4),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(1),
},
{
.service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
.pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
.pipenum = __cpu_to_le32(5),
},
/* (Additions here) */
{ /* terminator entry */ }
};
const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074 = {
.ie1_reg_addr = CE_HOST_IE_ADDRESS,
.ie2_reg_addr = CE_HOST_IE_2_ADDRESS,
.ie3_reg_addr = CE_HOST_IE_3_ADDRESS,
};
const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018 = {
.ie1_reg_addr = CE_HOST_IPQ5018_IE_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
.ie2_reg_addr = CE_HOST_IPQ5018_IE_2_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
.ie3_reg_addr = CE_HOST_IPQ5018_IE_3_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
};
const struct ce_remap ath11k_ce_remap_ipq5018 = {
.base = HAL_IPQ5018_CE_WFSS_REG_BASE,
.size = HAL_IPQ5018_CE_SIZE,
};
const struct ath11k_hw_regs ipq8074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000510,
@ -1901,10 +2323,18 @@ const struct ath11k_hw_regs ipq8074_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x00000194,
.hal_reo_cmd_ring_hp = 0x00003020,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x000001ec,
.hal_sw2reo_ring_hp = 0x00003028,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
@ -1925,6 +2355,12 @@ const struct ath11k_hw_regs ipq8074_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x0,
.pcie_pcs_osc_dtct_config_base = 0x0,
/* Shadow register area */
.hal_shadow_base_addr = 0x0,
/* REO misc control register, not used in IPQ8074 */
.hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs qca6390_regs = {
@ -1972,10 +2408,18 @@ const struct ath11k_hw_regs qca6390_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003a4,
.hal_reo_tcl_ring_hp = 0x00003050,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x00000194,
.hal_reo_cmd_ring_hp = 0x00003020,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x000004ac,
.hal_reo_status_hp = 0x00003068,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x000001ec,
.hal_sw2reo_ring_hp = 0x00003028,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
@ -1996,6 +2440,12 @@ const struct ath11k_hw_regs qca6390_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
/* Shadow register area */
.hal_shadow_base_addr = 0x000008fc,
/* REO misc control register, not used in QCA6390 */
.hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs qcn9074_regs = {
@ -2043,10 +2493,18 @@ const struct ath11k_hw_regs qcn9074_regs = {
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x00000194,
.hal_reo_cmd_ring_hp = 0x00003020,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x000001ec,
.hal_sw2reo_ring_hp = 0x00003028,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
@ -2067,6 +2525,12 @@ const struct ath11k_hw_regs qcn9074_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
/* Shadow register area */
.hal_shadow_base_addr = 0x0,
/* REO misc control register, not used in QCN9074 */
.hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs wcn6855_regs = {
@ -2114,10 +2578,18 @@ const struct ath11k_hw_regs wcn6855_regs = {
.hal_reo_tcl_ring_base_lsb = 0x00000454,
.hal_reo_tcl_ring_hp = 0x00003060,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x00000194,
.hal_reo_cmd_ring_hp = 0x00003020,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x0000055c,
.hal_reo_status_hp = 0x00003078,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x000001ec,
.hal_sw2reo_ring_hp = 0x00003028,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
@ -2138,12 +2610,249 @@ const struct ath11k_hw_regs wcn6855_regs = {
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
/* Shadow register area */
.hal_shadow_base_addr = 0x000008fc,
/* REO misc control register, used for fragment
* destination ring config in WCN6855.
*/
.hal_reo1_misc_ctl = 0x00000630,
};
const struct ath11k_hw_regs wcn6750_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000694,
.hal_tcl1_ring_base_msb = 0x00000698,
.hal_tcl1_ring_id = 0x0000069c,
.hal_tcl1_ring_misc = 0x000006a4,
.hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
.hal_tcl1_ring_tp_addr_msb = 0x000006b4,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
.hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
.hal_tcl1_ring_msi1_base_msb = 0x000006e0,
.hal_tcl1_ring_msi1_data = 0x000006e4,
.hal_tcl2_ring_base_lsb = 0x000006ec,
.hal_tcl_ring_base_lsb = 0x0000079c,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x000008a4,
/* REO2SW(x) R0 ring configuration address */
.hal_reo1_ring_base_lsb = 0x000001ec,
.hal_reo1_ring_base_msb = 0x000001f0,
.hal_reo1_ring_id = 0x000001f4,
.hal_reo1_ring_misc = 0x000001fc,
.hal_reo1_ring_hp_addr_lsb = 0x00000200,
.hal_reo1_ring_hp_addr_msb = 0x00000204,
.hal_reo1_ring_producer_int_setup = 0x00000210,
.hal_reo1_ring_msi1_base_lsb = 0x00000234,
.hal_reo1_ring_msi1_base_msb = 0x00000238,
.hal_reo1_ring_msi1_data = 0x0000023c,
.hal_reo2_ring_base_lsb = 0x00000244,
.hal_reo1_aging_thresh_ix_0 = 0x00000564,
.hal_reo1_aging_thresh_ix_1 = 0x00000568,
.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
.hal_reo1_aging_thresh_ix_3 = 0x00000570,
/* REO2SW(x) R2 ring pointers (head/tail) address */
.hal_reo1_ring_hp = 0x00003028,
.hal_reo1_ring_tp = 0x0000302c,
.hal_reo2_ring_hp = 0x00003030,
/* REO2TCL R0 ring configuration address */
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x000000e4,
.hal_reo_cmd_ring_hp = 0x00003010,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x0000013c,
.hal_sw2reo_ring_hp = 0x00003018,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
.hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
.hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
.hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
/* WBM Idle address */
.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
.hal_wbm_idle_link_ring_misc = 0x00000884,
/* SW2WBM release address */
.hal_wbm_release_ring_base_lsb = 0x000001ec,
/* WBM2SW release address */
.hal_wbm0_release_ring_base_lsb = 0x00000924,
.hal_wbm1_release_ring_base_lsb = 0x0000097c,
/* PCIe base address */
.pcie_qserdes_sysclk_en_sel = 0x0,
.pcie_pcs_osc_dtct_config_base = 0x0,
/* Shadow register area */
.hal_shadow_base_addr = 0x00000504,
/* REO misc control register, used for fragment
* destination ring config in WCN6750.
*/
.hal_reo1_misc_ctl = 0x000005d8,
};
static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
{
.tcl_ring_num = 0,
.wbm_ring_num = 0,
.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
},
{
.tcl_ring_num = 1,
.wbm_ring_num = 1,
.rbm_id = HAL_RX_BUF_RBM_SW1_BM,
},
{
.tcl_ring_num = 2,
.wbm_ring_num = 2,
.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
},
};
static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
{
.tcl_ring_num = 0,
.wbm_ring_num = 0,
.rbm_id = HAL_RX_BUF_RBM_SW0_BM,
},
{
.tcl_ring_num = 1,
.wbm_ring_num = 4,
.rbm_id = HAL_RX_BUF_RBM_SW4_BM,
},
{
.tcl_ring_num = 2,
.wbm_ring_num = 2,
.rbm_id = HAL_RX_BUF_RBM_SW2_BM,
},
};
const struct ath11k_hw_regs ipq5018_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000694,
.hal_tcl1_ring_base_msb = 0x00000698,
.hal_tcl1_ring_id = 0x0000069c,
.hal_tcl1_ring_misc = 0x000006a4,
.hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
.hal_tcl1_ring_tp_addr_msb = 0x000006b4,
.hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
.hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
.hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
.hal_tcl1_ring_msi1_base_msb = 0x000006e0,
.hal_tcl1_ring_msi1_data = 0x000006e4,
.hal_tcl2_ring_base_lsb = 0x000006ec,
.hal_tcl_ring_base_lsb = 0x0000079c,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x000008a4,
/* REO2SW(x) R0 ring configuration address */
.hal_reo1_ring_base_lsb = 0x000001ec,
.hal_reo1_ring_base_msb = 0x000001f0,
.hal_reo1_ring_id = 0x000001f4,
.hal_reo1_ring_misc = 0x000001fc,
.hal_reo1_ring_hp_addr_lsb = 0x00000200,
.hal_reo1_ring_hp_addr_msb = 0x00000204,
.hal_reo1_ring_producer_int_setup = 0x00000210,
.hal_reo1_ring_msi1_base_lsb = 0x00000234,
.hal_reo1_ring_msi1_base_msb = 0x00000238,
.hal_reo1_ring_msi1_data = 0x0000023c,
.hal_reo2_ring_base_lsb = 0x00000244,
.hal_reo1_aging_thresh_ix_0 = 0x00000564,
.hal_reo1_aging_thresh_ix_1 = 0x00000568,
.hal_reo1_aging_thresh_ix_2 = 0x0000056c,
.hal_reo1_aging_thresh_ix_3 = 0x00000570,
/* REO2SW(x) R2 ring pointers (head/tail) address */
.hal_reo1_ring_hp = 0x00003028,
.hal_reo1_ring_tp = 0x0000302c,
.hal_reo2_ring_hp = 0x00003030,
/* REO2TCL R0 ring configuration address */
.hal_reo_tcl_ring_base_lsb = 0x000003fc,
.hal_reo_tcl_ring_hp = 0x00003058,
/* SW2REO ring address */
.hal_sw2reo_ring_base_lsb = 0x0000013c,
.hal_sw2reo_ring_hp = 0x00003018,
/* REO CMD ring address */
.hal_reo_cmd_ring_base_lsb = 0x000000e4,
.hal_reo_cmd_ring_hp = 0x00003010,
/* REO status address */
.hal_reo_status_ring_base_lsb = 0x00000504,
.hal_reo_status_hp = 0x00003070,
/* WCSS relative address */
.hal_seq_wcss_umac_ce0_src_reg = 0x08400000
- HAL_IPQ5018_CE_WFSS_REG_BASE,
.hal_seq_wcss_umac_ce0_dst_reg = 0x08401000
- HAL_IPQ5018_CE_WFSS_REG_BASE,
.hal_seq_wcss_umac_ce1_src_reg = 0x08402000
- HAL_IPQ5018_CE_WFSS_REG_BASE,
.hal_seq_wcss_umac_ce1_dst_reg = 0x08403000
- HAL_IPQ5018_CE_WFSS_REG_BASE,
/* WBM Idle address */
.hal_wbm_idle_link_ring_base_lsb = 0x00000874,
.hal_wbm_idle_link_ring_misc = 0x00000884,
/* SW2WBM release address */
.hal_wbm_release_ring_base_lsb = 0x000001ec,
/* WBM2SW release address */
.hal_wbm0_release_ring_base_lsb = 0x00000924,
.hal_wbm1_release_ring_base_lsb = 0x0000097c,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
};
static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
{.start_freq = 2402, .end_freq = 2482 }, /* 2G ch1~ch13 */
{.start_freq = 5150, .end_freq = 5250 }, /* 5G UNII-1 ch32~ch48 */
{.start_freq = 5250, .end_freq = 5725 }, /* 5G UNII-2 ch50~ch144 */
{.start_freq = 5725, .end_freq = 5810 }, /* 5G UNII-3 ch149~ch161 */
{.start_freq = 5815, .end_freq = 5895 }, /* 5G UNII-4 ch163~ch177 */
{.start_freq = 5925, .end_freq = 6165 }, /* 6G UNII-5 Ch1, Ch2 ~ Ch41 */
{.start_freq = 6165, .end_freq = 6425 }, /* 6G UNII-5 ch45~ch93 */
{.start_freq = 6425, .end_freq = 6525 }, /* 6G UNII-6 ch97~ch113 */
{.start_freq = 6525, .end_freq = 6705 }, /* 6G UNII-7 ch117~ch149 */
{.start_freq = 6705, .end_freq = 6875 }, /* 6G UNII-7 ch153~ch185 */
{.start_freq = 6875, .end_freq = 7125 }, /* 6G UNII-8 ch189~ch233 */
};
const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855 = {
.type = NL80211_SAR_TYPE_POWER,
.num_freq_ranges = (ARRAY_SIZE(ath11k_hw_sar_freq_ranges_wcn6855)),
.freq_ranges = ath11k_hw_sar_freq_ranges_wcn6855,
};

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HW_H
@ -63,6 +64,7 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
#define TARGET_EMA_MAX_PROFILE_PERIOD 8
#define ATH11K_HW_MAX_QUEUES 4
#define ATH11K_QUEUE_LEN 4096
@ -79,6 +81,12 @@
#define ATH11K_M3_FILE "m3.bin"
#define ATH11K_REGDB_FILE_NAME "regdb.bin"
#if defined(__linux__)
#define ATH11K_CE_OFFSET(ab) (ab->mem_ce - ab->mem)
#elif defined(__FreeBSD__)
#define ATH11K_CE_OFFSET(ab) ((char *)ab->mem_ce - (char *)ab->mem)
#endif
enum ath11k_hw_rate_cck {
ATH11K_HW_RATE_CCK_LP_11M = 0,
ATH11K_HW_RATE_CCK_LP_5_5M,
@ -121,8 +129,15 @@ struct ath11k_hw_ring_mask {
u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
};
struct ath11k_hw_tcl2wbm_rbm_map {
u8 tcl_ring_num;
u8 wbm_ring_num;
u8 rbm_id;
};
struct ath11k_hw_hal_params {
enum hal_rx_buf_return_buf_manager rx_buf_rbm;
const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
};
struct ath11k_hw_params {
@ -150,11 +165,10 @@ struct ath11k_hw_params {
u32 target_ce_count;
const struct service_to_pipe *svc_to_ce_map;
u32 svc_to_ce_map_len;
const struct ce_ie_addr *ce_ie_addr;
const struct ce_remap *ce_remap;
bool single_pdev_only;
u32 rfkill_pin;
u32 rfkill_cfg;
u32 rfkill_on_level;
bool rxdma1_enable;
int num_rxmda_per_pdev;
@ -168,6 +182,7 @@ struct ath11k_hw_params {
u8 summary_pad_sz;
u8 fft_hdr_len;
u16 max_fft_bins;
bool fragment_160mhz;
} spectral;
u16 interface_modes;
@ -176,7 +191,9 @@ struct ath11k_hw_params {
bool supports_shadow_regs;
bool idle_ps;
bool supports_sta_ps;
bool cold_boot_calib;
bool coldboot_cal_mm;
bool coldboot_cal_ftm;
bool cbcal_restart_fw;
int fw_mem_mode;
u32 num_vdevs;
u32 num_peers;
@ -189,9 +206,30 @@ struct ath11k_hw_params {
const struct ath11k_hw_hal_params *hal_params;
bool supports_dynamic_smps_6ghz;
bool alloc_cacheable_memory;
bool wakeup_mhi;
bool supports_rssi_stats;
bool fw_wmi_diag_event;
bool current_cc_support;
bool dbr_debug_support;
bool global_reset;
const struct cfg80211_sar_capa *bios_sar_capa;
bool m3_fw_support;
bool fixed_bdf_addr;
bool fixed_mem_region;
bool static_window_map;
bool hybrid_bus_type;
bool fixed_fw_mem;
bool support_off_channel_tx;
bool supports_multi_bssid;
struct {
u32 start;
u32 end;
} sram_dump;
bool tcl_ring_retry;
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
};
struct ath11k_hw_ops {
@ -231,9 +269,10 @@ struct ath11k_hw_ops {
struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
void (*reo_setup)(struct ath11k_base *ab);
u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
u16 (*mpdu_info_get_peerid)(struct hal_rx_mpdu_info *mpdu_info);
bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
u32 (*get_ring_selector)(struct sk_buff *skb);
};
extern const struct ath11k_hw_ops ipq8074_ops;
@ -241,13 +280,22 @@ extern const struct ath11k_hw_ops ipq6018_ops;
extern const struct ath11k_hw_ops qca6390_ops;
extern const struct ath11k_hw_ops qcn9074_ops;
extern const struct ath11k_hw_ops wcn6855_ops;
extern const struct ath11k_hw_ops wcn6750_ops;
extern const struct ath11k_hw_ops ipq5018_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074;
extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018;
extern const struct ce_remap ath11k_ce_remap_ipq5018;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
static inline
int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
@ -288,10 +336,16 @@ enum ath11k_bd_ie_board_type {
ATH11K_BD_IE_BOARD_DATA = 1,
};
enum ath11k_bd_ie_regdb_type {
ATH11K_BD_IE_REGDB_NAME = 0,
ATH11K_BD_IE_REGDB_DATA = 1,
};
enum ath11k_bd_ie_type {
/* contains sub IEs of enum ath11k_bd_ie_board_type */
ATH11K_BD_IE_BOARD = 0,
ATH11K_BD_IE_BOARD_EXT = 1,
/* contains sub IEs of enum ath11k_bd_ie_regdb_type */
ATH11K_BD_IE_REGDB = 1,
};
struct ath11k_hw_regs {
@ -337,6 +391,12 @@ struct ath11k_hw_regs {
u32 hal_reo_status_ring_base_lsb;
u32 hal_reo_status_hp;
u32 hal_reo_cmd_ring_base_lsb;
u32 hal_reo_cmd_ring_hp;
u32 hal_sw2reo_ring_base_lsb;
u32 hal_sw2reo_ring_hp;
u32 hal_seq_wcss_umac_ce0_src_reg;
u32 hal_seq_wcss_umac_ce0_dst_reg;
u32 hal_seq_wcss_umac_ce1_src_reg;
@ -352,11 +412,30 @@ struct ath11k_hw_regs {
u32 pcie_qserdes_sysclk_en_sel;
u32 pcie_pcs_osc_dtct_config_base;
u32 hal_shadow_base_addr;
u32 hal_reo1_misc_ctl;
};
extern const struct ath11k_hw_regs ipq8074_regs;
extern const struct ath11k_hw_regs qca6390_regs;
extern const struct ath11k_hw_regs qcn9074_regs;
extern const struct ath11k_hw_regs wcn6855_regs;
extern const struct ath11k_hw_regs wcn6750_regs;
extern const struct ath11k_hw_regs ipq5018_regs;
static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
{
switch (type) {
case ATH11K_BD_IE_BOARD:
return "board data";
case ATH11K_BD_IE_REGDB:
return "regdb data";
}
return "unknown";
}
extern const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855;
#endif

File diff suppressed because it is too large Load diff

View file

@ -8,6 +8,7 @@
#include <net/mac80211.h>
#include <net/cfg80211.h>
#include "wmi.h"
struct ath11k;
struct ath11k_base;
@ -130,7 +131,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
#define ATH11K_SCAN_11D_INTERVAL 600000
#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_11d_scan_stop(struct ath11k *ar);
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
@ -147,8 +148,6 @@ u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
void __ath11k_mac_scan_finish(struct ath11k *ar);
void ath11k_mac_scan_finish(struct ath11k *ar);
int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable);
int ath11k_mac_rfkill_config(struct ath11k *ar);
struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
@ -164,7 +163,7 @@ void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
u32 ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
@ -172,4 +171,8 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
int ath11k_mac_wait_tx_complete(struct ath11k *ar);
int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
#endif

View file

@ -1,5 +1,8 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (c) 2020 The Linux Foundation. All rights reserved. */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
#include <linux/pci.h>
@ -16,8 +19,10 @@
#include "debug.h"
#include "mhi.h"
#include "pci.h"
#include "pcic.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000
static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
@ -209,37 +214,37 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
{
u32 val;
val = ath11k_pci_read32(ab, MHISTATUS);
val = ath11k_pcic_read32(ab, MHISTATUS);
ath11k_dbg(ab, ATH11K_DBG_PCI, "MHISTATUS 0x%x\n", val);
ath11k_dbg(ab, ATH11K_DBG_PCI, "mhistatus 0x%x\n", val);
/* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
* has SYSERR bit set and thus need to set MHICTRL_RESET
* to clear SYSERR.
*/
ath11k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
mdelay(10);
}
static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
{
ath11k_pci_write32(ab, PCIE_TXVECDB, 0);
ath11k_pcic_write32(ab, PCIE_TXVECDB, 0);
}
static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
{
ath11k_pci_write32(ab, PCIE_TXVECSTATUS, 0);
ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0);
}
static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
{
ath11k_pci_write32(ab, PCIE_RXVECDB, 0);
ath11k_pcic_write32(ab, PCIE_RXVECDB, 0);
}
static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
{
ath11k_pci_write32(ab, PCIE_RXVECSTATUS, 0);
ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0);
}
void ath11k_mhi_clear_vector(struct ath11k_base *ab)
@ -258,13 +263,12 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
int *irq;
unsigned int msi_data;
ret = ath11k_pci_get_user_msi_assignment(ab_pci,
"MHI", &num_vectors,
&user_base_data, &base_vector);
ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors,
&user_base_data, &base_vector);
if (ret)
return ret;
ath11k_dbg(ab, ATH11K_DBG_PCI, "Number of assigned MSI for MHI is %d, base vector is %d\n",
ath11k_dbg(ab, ATH11K_DBG_PCI, "num_vectors %d base_vector %d\n",
num_vectors, base_vector);
irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
@ -274,11 +278,10 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
for (i = 0; i < num_vectors; i++) {
msi_data = base_vector;
if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
msi_data += i;
irq[i] = ath11k_pci_get_msi_irq(ab->dev,
msi_data);
irq[i] = ath11k_pci_get_msi_irq(ab, msi_data);
}
ab_pci->mhi_ctrl->irq = irq;
@ -296,15 +299,48 @@ static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
{
}
static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason)
{
switch (reason) {
case MHI_CB_IDLE:
return "MHI_CB_IDLE";
case MHI_CB_PENDING_DATA:
return "MHI_CB_PENDING_DATA";
case MHI_CB_LPM_ENTER:
return "MHI_CB_LPM_ENTER";
case MHI_CB_LPM_EXIT:
return "MHI_CB_LPM_EXIT";
case MHI_CB_EE_RDDM:
return "MHI_CB_EE_RDDM";
case MHI_CB_EE_MISSION_MODE:
return "MHI_CB_EE_MISSION_MODE";
case MHI_CB_SYS_ERROR:
return "MHI_CB_SYS_ERROR";
case MHI_CB_FATAL_ERROR:
return "MHI_CB_FATAL_ERROR";
case MHI_CB_BW_REQ:
return "MHI_CB_BW_REQ";
default:
return "UNKNOWN";
}
};
static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "notify status reason %s\n",
ath11k_mhi_op_callback_to_str(cb));
switch (cb) {
case MHI_CB_SYS_ERROR:
ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
case MHI_CB_EE_RDDM:
if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
break;
default:
break;
}
@ -375,22 +411,22 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
ret = ath11k_mhi_get_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to get msi for mhi\n");
mhi_free_controller(mhi_ctrl);
return ret;
goto free_controller;
}
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
if (ret < 0)
return ret;
goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xFFFFFFFF;
}
mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
@ -412,18 +448,22 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
default:
ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n",
ab->hw_rev);
mhi_free_controller(mhi_ctrl);
return -EINVAL;
ret = -EINVAL;
goto free_controller;
}
ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
if (ret) {
ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
mhi_free_controller(mhi_ctrl);
return ret;
goto free_controller;
}
return 0;
free_controller:
mhi_free_controller(mhi_ctrl);
ab_pci->mhi_ctrl = NULL;
return ret;
}
void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
@ -435,216 +475,62 @@ void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
mhi_free_controller(mhi_ctrl);
}
static char *ath11k_mhi_state_to_str(enum ath11k_mhi_state mhi_state)
{
switch (mhi_state) {
case ATH11K_MHI_INIT:
return "INIT";
case ATH11K_MHI_DEINIT:
return "DEINIT";
case ATH11K_MHI_POWER_ON:
return "POWER_ON";
case ATH11K_MHI_POWER_OFF:
return "POWER_OFF";
case ATH11K_MHI_FORCE_POWER_OFF:
return "FORCE_POWER_OFF";
case ATH11K_MHI_SUSPEND:
return "SUSPEND";
case ATH11K_MHI_RESUME:
return "RESUME";
case ATH11K_MHI_TRIGGER_RDDM:
return "TRIGGER_RDDM";
case ATH11K_MHI_RDDM_DONE:
return "RDDM_DONE";
default:
return "UNKNOWN";
}
};
static void ath11k_mhi_set_state_bit(struct ath11k_pci *ab_pci,
enum ath11k_mhi_state mhi_state)
{
struct ath11k_base *ab = ab_pci->ab;
switch (mhi_state) {
case ATH11K_MHI_INIT:
set_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
break;
case ATH11K_MHI_DEINIT:
clear_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
break;
case ATH11K_MHI_POWER_ON:
set_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
break;
case ATH11K_MHI_POWER_OFF:
case ATH11K_MHI_FORCE_POWER_OFF:
clear_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
clear_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
clear_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
break;
case ATH11K_MHI_SUSPEND:
set_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
break;
case ATH11K_MHI_RESUME:
clear_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
break;
case ATH11K_MHI_TRIGGER_RDDM:
set_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
break;
case ATH11K_MHI_RDDM_DONE:
set_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
break;
default:
ath11k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
}
}
static int ath11k_mhi_check_state_bit(struct ath11k_pci *ab_pci,
enum ath11k_mhi_state mhi_state)
{
struct ath11k_base *ab = ab_pci->ab;
switch (mhi_state) {
case ATH11K_MHI_INIT:
if (!test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_DEINIT:
case ATH11K_MHI_POWER_ON:
if (test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state) &&
!test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_FORCE_POWER_OFF:
if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_POWER_OFF:
case ATH11K_MHI_SUSPEND:
if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_RESUME:
if (test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_TRIGGER_RDDM:
if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_RDDM_DONE:
return 0;
default:
ath11k_err(ab, "unhandled mhi state: %s(%d)\n",
ath11k_mhi_state_to_str(mhi_state), mhi_state);
}
ath11k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n",
ath11k_mhi_state_to_str(mhi_state), mhi_state,
ab_pci->mhi_state);
return -EINVAL;
}
static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
enum ath11k_mhi_state mhi_state)
{
struct ath11k_base *ab = ab_pci->ab;
int ret;
ret = ath11k_mhi_check_state_bit(ab_pci, mhi_state);
if (ret)
goto out;
ath11k_dbg(ab, ATH11K_DBG_PCI, "setting mhi state: %s(%d)\n",
ath11k_mhi_state_to_str(mhi_state), mhi_state);
switch (mhi_state) {
case ATH11K_MHI_INIT:
ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_DEINIT:
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
ret = 0;
break;
case ATH11K_MHI_POWER_ON:
ret = mhi_async_power_up(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
ret = 0;
break;
case ATH11K_MHI_FORCE_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, false);
ret = 0;
break;
case ATH11K_MHI_SUSPEND:
ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_RESUME:
/* Do force MHI resume as some devices like QCA6390, WCN6855
* are not in M3 state but they are functional. So just ignore
* the MHI state while resuming.
*/
ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_TRIGGER_RDDM:
ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_RDDM_DONE:
break;
default:
ath11k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
ret = -EINVAL;
}
if (ret)
goto out;
ath11k_mhi_set_state_bit(ab_pci, mhi_state);
return 0;
out:
ath11k_err(ab, "failed to set mhi state: %s(%d)\n",
ath11k_mhi_state_to_str(mhi_state), mhi_state);
return ret;
}
int ath11k_mhi_start(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
int ret;
ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_INIT);
if (ret)
goto out;
ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
if (ret) {
ath11k_warn(ab, "failed to prepare mhi: %d", ret);
return ret;
}
ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_ON);
if (ret)
goto out;
ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
if (ret) {
ath11k_warn(ab, "failed to power up mhi: %d", ret);
return ret;
}
return 0;
out:
return ret;
}
void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
{
ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_OFF);
ath11k_mhi_set_state(ab_pci, ATH11K_MHI_DEINIT);
mhi_power_down(ab_pci->mhi_ctrl, true);
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
void ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
int ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
{
ath11k_mhi_set_state(ab_pci, ATH11K_MHI_SUSPEND);
struct ath11k_base *ab = ab_pci->ab;
int ret;
ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
if (ret) {
ath11k_warn(ab, "failed to suspend mhi: %d", ret);
return ret;
}
return 0;
}
void ath11k_mhi_resume(struct ath11k_pci *ab_pci)
int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
{
ath11k_mhi_set_state(ab_pci, ATH11K_MHI_RESUME);
struct ath11k_base *ab = ab_pci->ab;
int ret;
/* Do force MHI resume as some devices like QCA6390, WCN6855
* are not in M3 state but they are functional. So just ignore
* the MHI state while resuming.
*/
ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
if (ret) {
ath11k_warn(ab, "failed to resume mhi: %d", ret);
return ret;
}
return 0;
}

View file

@ -16,19 +16,6 @@
#define MHICTRL 0x38
#define MHICTRL_RESET_MASK 0x2
enum ath11k_mhi_state {
ATH11K_MHI_INIT,
ATH11K_MHI_DEINIT,
ATH11K_MHI_POWER_ON,
ATH11K_MHI_POWER_OFF,
ATH11K_MHI_FORCE_POWER_OFF,
ATH11K_MHI_SUSPEND,
ATH11K_MHI_RESUME,
ATH11K_MHI_TRIGGER_RDDM,
ATH11K_MHI_RDDM,
ATH11K_MHI_RDDM_DONE,
};
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
@ -36,7 +23,7 @@ void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
void ath11k_mhi_clear_vector(struct ath11k_base *ab);
void ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
void ath11k_mhi_resume(struct ath11k_pci *ar_pci);
int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
#endif

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H
@ -52,23 +53,8 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
struct ath11k_msi_user {
char *name;
int num_vectors;
u32 base_vector;
};
struct ath11k_msi_config {
int total_vectors;
int total_users;
struct ath11k_msi_user *users;
};
enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
ATH11K_PCI_ASPM_RESTORE,
ATH11K_PCI_FLAG_MULTI_MSI_VECTORS,
};
struct ath11k_pci {
@ -76,10 +62,8 @@ struct ath11k_pci {
struct ath11k_base *ab;
u16 dev_id;
char amss_path[100];
u32 msi_ep_base_data;
struct mhi_controller *mhi_ctrl;
const struct ath11k_msi_config *msi_config;
unsigned long mhi_state;
u32 register_window;
/* protects register_window above */
@ -88,8 +72,6 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
unsigned long irq_flags;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
@ -97,11 +79,5 @@ static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
return (struct ath11k_pci *)ab->drv_priv;
}
int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector);
int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector);
void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value);
u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset);
int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector);
#endif

View file

@ -0,0 +1,827 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "pcic.h"
#include "debug.h"
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"bhi",
"mhi-er0",
"mhi-er1",
"ce0",
"ce1",
"ce2",
"ce3",
"ce4",
"ce5",
"ce6",
"ce7",
"ce8",
"ce9",
"ce10",
"ce11",
"host2wbm-desc-feed",
"host2reo-re-injection",
"host2reo-command",
"host2rxdma-monitor-ring3",
"host2rxdma-monitor-ring2",
"host2rxdma-monitor-ring1",
"reo2ost-exception",
"wbm2host-rx-release",
"reo2host-status",
"reo2host-destination-ring4",
"reo2host-destination-ring3",
"reo2host-destination-ring2",
"reo2host-destination-ring1",
"rxdma2host-monitor-destination-mac3",
"rxdma2host-monitor-destination-mac2",
"rxdma2host-monitor-destination-mac1",
"ppdu-end-interrupts-mac3",
"ppdu-end-interrupts-mac2",
"ppdu-end-interrupts-mac1",
"rxdma2host-monitor-status-ring-mac3",
"rxdma2host-monitor-status-ring-mac2",
"rxdma2host-monitor-status-ring-mac1",
"host2rxdma-host-buf-ring-mac3",
"host2rxdma-host-buf-ring-mac2",
"host2rxdma-host-buf-ring-mac1",
"rxdma2host-destination-ring-mac3",
"rxdma2host-destination-ring-mac2",
"rxdma2host-destination-ring-mac1",
"host2tcl-input-ring4",
"host2tcl-input-ring3",
"host2tcl-input-ring2",
"host2tcl-input-ring1",
"wbm2host-tx-completions-ring3",
"wbm2host-tx-completions-ring2",
"wbm2host-tx-completions-ring1",
"tcl2host-status-ring",
};
static const struct ath11k_msi_config ath11k_msi_config[] = {
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_QCA6390_HW20,
},
{
.total_vectors = 16,
.total_users = 3,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
},
.hw_rev = ATH11K_HW_QCN9074_HW10,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW20,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW21,
},
{
.total_vectors = 28,
.total_users = 2,
.users = (struct ath11k_msi_user[]) {
{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
{
const struct ath11k_msi_config *msi_config;
int i;
for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
msi_config = &ath11k_msi_config[i];
if (msi_config->hw_rev == ab->hw_rev)
break;
}
if (i == ARRAY_SIZE(ath11k_msi_config)) {
ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
ab->hw_rev);
return -EINVAL;
}
ab->pci.msi.config = msi_config;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
if (offset < ATH11K_PCI_WINDOW_START)
#if defined(__linux__)
iowrite32(value, ab->mem + offset);
#elif defined(__FreeBSD__)
iowrite32(value, (char *)ab->mem + offset);
#endif
else
ab->pci.ops->window_write32(ab, offset, value);
}
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
int ret = 0;
bool wakeup_required;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
if (wakeup_required && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
__ath11k_pcic_write32(ab, offset, value);
if (wakeup_required && !ret && ab->pci.ops->release)
ab->pci.ops->release(ab);
}
EXPORT_SYMBOL(ath11k_pcic_write32);
static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
u32 val;
if (offset < ATH11K_PCI_WINDOW_START)
#if defined(__linux__)
val = ioread32(ab->mem + offset);
#elif defined(__FreeBSD__)
val = ioread32((char *)ab->mem + offset);
#endif
else
val = ab->pci.ops->window_read32(ab, offset);
return val;
}
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
int ret = 0;
u32 val;
bool wakeup_required;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
if (wakeup_required && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
val = __ath11k_pcic_read32(ab, offset);
if (wakeup_required && !ret && ab->pci.ops->release)
ab->pci.ops->release(ab);
return val;
}
EXPORT_SYMBOL(ath11k_pcic_read32);
int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
{
int ret = 0;
bool wakeup_required;
u32 *data = buf;
u32 i;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
if (wakeup_required && ab->pci.ops->wakeup) {
ret = ab->pci.ops->wakeup(ab);
if (ret) {
ath11k_warn(ab,
"wakeup failed, data may be invalid: %d",
ret);
/* Even though wakeup() failed, continue processing rather
* than returning because some parts of the data may still
* be valid and useful in some cases, e.g. could give us
* some clues on firmware crash.
* Mislead due to invalid data could be avoided because we
* are aware of the wakeup failure.
*/
}
}
for (i = start; i < end + 1; i += 4)
*data++ = __ath11k_pcic_read32(ab, i);
if (wakeup_required && ab->pci.ops->release)
ab->pci.ops->release(ab);
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_read);
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi)
{
*msi_addr_lo = ab->pci.msi.addr_lo;
*msi_addr_hi = ab->pci.msi.addr_hi;
}
EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector)
{
const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
int idx;
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
*base_vector = msi_config->users[idx].base_vector;
*user_base_data = *base_vector + ab->pci.msi.ep_base_data;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
return 0;
}
}
ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
return -EINVAL;
}
EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
{
u32 i, msi_data_idx;
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
if (ce_id == i)
break;
msi_data_idx++;
}
*msi_idx = msi_data_idx;
}
EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
{
int i, j;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
void ath11k_pcic_free_irq(struct ath11k_base *ab)
{
int i, irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
}
ath11k_pcic_free_ext_irq(ab);
}
EXPORT_SYMBOL(ath11k_pcic_free_irq);
static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pcic_ce_irq_disable(ab, i);
}
}
static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
{
int i;
int irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
synchronize_irq(ab->irq_num[irq_idx]);
}
}
static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
enable_irq(ce_pipe->ab->irq_num[irq_idx]);
}
static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
struct ath11k_base *ab = ce_pipe->ab;
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
disable_irq_nosync(ab->irq_num[irq_idx]);
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
}
static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_base *ab = irq_grp->ab;
int i;
/* In case of one MSI vector, we handle irq enable/disable
* in a uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc)
{
int i;
clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
ath11k_pcic_ext_grp_disable(irq_grp);
if (irq_grp->napi_enabled) {
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
irq_grp->napi_enabled = false;
}
}
}
static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_base *ab = irq_grp->ab;
int i;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
ath11k_pcic_ext_grp_enable(irq_grp);
}
}
EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
{
int i, j, irq_idx;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++) {
irq_idx = irq_grp->irqs[j];
synchronize_irq(ab->irq_num[irq_idx]);
}
}
}
void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
{
__ath11k_pcic_ext_irq_disable(ab);
ath11k_pcic_sync_ext_irqs(ab);
}
EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
{
struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
struct ath11k_ext_irq_grp,
napi);
struct ath11k_base *ab = irq_grp->ab;
int work_done;
int i;
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
if (work_done > budget)
work_done = budget;
return work_done;
}
static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
struct ath11k_base *ab = irq_grp->ab;
int i;
if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
napi_schedule(&irq_grp->napi);
return IRQ_HANDLED;
}
static int
ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
return ab->pci.ops->get_msi_irq(ab, vector);
}
static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
{
int i, j, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
&user_base_data,
&base_vector);
if (ret < 0)
return ret;
irq_flags = IRQF_SHARED;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
irq_flags |= IRQF_NOBALANCING;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll);
if (ab->hw_params.ring_mask->tx[i] ||
ab->hw_params.ring_mask->rx[i] ||
ab->hw_params.ring_mask->rx_err[i] ||
ab->hw_params.ring_mask->rx_wbm_rel[i] ||
ab->hw_params.ring_mask->reo_status[i] ||
ab->hw_params.ring_mask->rxdma2host[i] ||
ab->hw_params.ring_mask->host2rxdma[i] ||
ab->hw_params.ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
irq_grp->num_irq = num_irq;
irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
int vector = (i % num_vectors) + base_vector;
int irq = ath11k_pcic_get_msi_irq(ab, vector);
if (irq < 0)
return irq;
ab->irq_num[irq_idx] = irq;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"irq %d group %d\n", irq, i);
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
irq_flags, "DP_EXT_IRQ", irq_grp);
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
}
ath11k_pcic_ext_grp_disable(irq_grp);
}
return 0;
}
int ath11k_pcic_config_irq(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
unsigned int msi_data;
int irq, i, ret, irq_idx;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
&msi_data_start, &msi_irq_start);
if (ret)
return ret;
irq_flags = IRQF_SHARED;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
irq_flags |= IRQF_NOBALANCING;
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
irq = ath11k_pcic_get_msi_irq(ab, msi_data);
if (irq < 0)
return irq;
ce_pipe = &ab->ce.ce_pipe[i];
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
irq_flags, irq_name[irq_idx], ce_pipe);
if (ret) {
ath11k_err(ab, "failed to request irq %d: %d\n",
irq_idx, ret);
return ret;
}
ab->irq_num[irq_idx] = irq;
msi_data_idx++;
ath11k_pcic_ce_irq_disable(ab, i);
}
ret = ath11k_pcic_ext_irq_config(ab);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_config_irq);
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pcic_ce_irq_enable(ab, i);
}
}
EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
}
}
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
{
ath11k_pcic_ce_irqs_disable(ab);
ath11k_pcic_sync_ce_irqs(ab);
ath11k_pcic_kill_tasklets(ab);
}
EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
void ath11k_pcic_stop(struct ath11k_base *ab)
{
ath11k_pcic_ce_irq_disable_sync(ab);
ath11k_ce_cleanup_pipes(ab);
}
EXPORT_SYMBOL(ath11k_pcic_stop);
int ath11k_pcic_start(struct ath11k_base *ab)
{
set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pcic_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_start);
int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
entry = &ab->hw_params.svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
switch (__le32_to_cpu(entry->pipedir)) {
case PIPEDIR_NONE:
break;
case PIPEDIR_IN:
WARN_ON(dl_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
break;
case PIPEDIR_OUT:
WARN_ON(ul_set);
*ul_pipe = __le32_to_cpu(entry->pipenum);
ul_set = true;
break;
case PIPEDIR_INOUT:
WARN_ON(dl_set);
WARN_ON(ul_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
*ul_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
ul_set = true;
break;
}
}
if (WARN_ON(!ul_set || !dl_set))
return -ENOENT;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
const struct ath11k_pci_ops *pci_ops)
{
if (!pci_ops)
return 0;
/* Return error if mandatory pci_ops callbacks are missing */
if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
!pci_ops->window_read32)
return -EINVAL;
ab->pci.ops = pci_ops;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
i == ATH11K_PCI_CE_WAKE_IRQ)
continue;
ath11k_pcic_ce_irq_enable(ab, i);
}
}
EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
{
int i;
int irq_idx;
struct ath11k_ce_pipe *ce_pipe;
for (i = 0; i < ab->hw_params.ce_count; i++) {
ce_pipe = &ab->ce.ce_pipe[i];
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
i == ATH11K_PCI_CE_WAKE_IRQ)
continue;
disable_irq_nosync(ab->irq_num[irq_idx]);
synchronize_irq(ab->irq_num[irq_idx]);
tasklet_kill(&ce_pipe->intr_tq);
}
}
EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);

View file

@ -0,0 +1,54 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_CMN_H
#define _ATH11K_PCI_CMN_H
#include "core.h"
#define ATH11K_PCI_IRQ_CE0_OFFSET 3
#define ATH11K_PCI_IRQ_DP_OFFSET 14
#define ATH11K_PCI_CE_WAKE_IRQ 2
#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
#define ATH11K_PCI_WINDOW_START 0x80000
#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0)
/* BAR0 + 4k is always accessible, and no
* need to force wakeup.
* 4K - 32 = 0xFE0
*/
#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0
int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector);
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value);
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset);
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi);
void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
void ath11k_pcic_free_irq(struct ath11k_base *ab);
int ath11k_pcic_config_irq(struct ath11k_base *ab);
void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
void ath11k_pcic_stop(struct ath11k_base *ab);
int ath11k_pcic_start(struct ath11k_base *ab);
int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
const struct ath11k_pci_ops *pci_ops);
int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end);
void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
#endif

View file

@ -1,12 +1,30 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "peer.h"
#include "debug.h"
static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
int peer_id)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (peer->peer_id != peer_id)
continue;
return peer;
}
return NULL;
}
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
const u8 *addr)
{
@ -26,25 +44,6 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
return NULL;
}
static struct ath11k_peer *ath11k_peer_find_by_pdev_idx(struct ath11k_base *ab,
u8 pdev_idx, const u8 *addr)
{
struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (peer->pdev_idx != pdev_idx)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
}
return NULL;
}
struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
const u8 *addr)
{
@ -52,14 +51,13 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) {
if (!ether_addr_equal(peer->addr, addr))
continue;
if (!ab->rhead_peer_addr)
return NULL;
return peer;
}
peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
ab->rhash_peer_addr_param);
return NULL;
return peer;
}
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
@ -69,11 +67,13 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list)
if (peer_id == peer->peer_id)
return peer;
if (!ab->rhead_peer_id)
return NULL;
return NULL;
peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
ab->rhash_peer_id_param);
return peer;
}
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
@ -99,14 +99,14 @@ void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, peer_id);
peer = ath11k_peer_find_list_by_id(ab, peer_id);
if (!peer) {
ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
peer_id);
goto exit;
}
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, peer_id);
list_del(&peer->list);
@ -138,7 +138,7 @@ void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
wake_up(&ab->peer_mapping_wq);
}
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer map vdev %d peer %pM id %d\n",
vdev_id, mac_addr, peer_id);
exit:
@ -167,6 +167,76 @@ static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
return 0;
}
static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
struct rhashtable *rtbl,
struct rhash_head *rhead,
struct rhashtable_params *params,
void *key)
{
struct ath11k_peer *tmp;
lockdep_assert_held(&ab->tbl_mtx_lock);
tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
if (!tmp)
return 0;
else if (IS_ERR(tmp))
return PTR_ERR(tmp);
else
return -EEXIST;
}
static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
struct rhashtable *rtbl,
struct rhash_head *rhead,
struct rhashtable_params *params)
{
int ret;
lockdep_assert_held(&ab->tbl_mtx_lock);
ret = rhashtable_remove_fast(rtbl, rhead, *params);
if (ret && ret != -ENOENT)
return ret;
return 0;
}
static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
{
int ret;
lockdep_assert_held(&ab->base_lock);
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
return -EPERM;
ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
&ab->rhash_peer_id_param, &peer->peer_id);
if (ret) {
ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
peer->addr, peer->peer_id, ret);
return ret;
}
ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
&ab->rhash_peer_addr_param, &peer->addr);
if (ret) {
ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
peer->addr, peer->peer_id, ret);
goto err_clean;
}
return 0;
err_clean:
ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
&ab->rhash_peer_id_param);
return ret;
}
void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
{
struct ath11k_peer *peer, *tmp;
@ -174,6 +244,7 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
if (peer->vdev_id != vdev_id)
@ -182,12 +253,14 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
}
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
}
static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
@ -217,17 +290,51 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
return 0;
}
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
struct ath11k_peer *peer;
struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_addr(ab, addr);
/* Check if the found peer is what we want to remove.
* While the sta is transitioning to another band we may
* have 2 peer with the same addr assigned to different
* vdev_id. Make sure we are deleting the correct peer.
*/
if (peer && peer->vdev_id == vdev_id)
ath11k_peer_rhash_delete(ab, peer);
/* Fallback to peer list search if the correct peer can't be found.
* Skip the deletion of the peer from the rhash since it has already
* been deleted in peer add.
*/
if (!peer)
peer = ath11k_peer_find(ab, vdev_id, addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
ath11k_warn(ab,
"failed to find peer vdev_id %d addr %pM in delete\n",
vdev_id, addr);
return -EINVAL;
}
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
ath11k_warn(ar->ab,
ath11k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
@ -237,6 +344,19 @@ int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
if (ret)
return ret;
return 0;
}
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
ret = __ath11k_peer_delete(ar, vdev_id, addr);
if (ret)
return ret;
ar->num_peers--;
return 0;
@ -262,13 +382,23 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOBUFS;
}
mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr);
peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
if (peer) {
spin_unlock_bh(&ar->ab->base_lock);
return -EINVAL;
if (peer->vdev_id == param->vdev_id) {
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
return -EINVAL;
}
/* Assume sta is transitioning to another band.
* Remove here the peer from rhash.
*/
ath11k_peer_rhash_delete(ar->ab, peer);
}
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
ret = ath11k_wmi_send_peer_create_cmd(ar, param);
if (ret) {
@ -283,11 +413,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
if (ret)
return ret;
mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
@ -295,6 +427,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
goto cleanup;
}
ret = ath11k_peer_rhash_add(ar->ab, peer);
if (ret) {
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
goto cleanup;
}
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
@ -319,26 +458,213 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
return 0;
cleanup:
reinit_completion(&ar->peer_delete_done);
fbret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
param->vdev_id);
if (fbret) {
ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
param->vdev_id, param->peer_addr);
goto exit;
}
fbret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
param->peer_addr);
fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
if (fbret)
ath11k_warn(ar->ab, "failed wait for peer %pM delete done id %d fallback ret %d\n",
ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
param->peer_addr, param->vdev_id, fbret);
exit:
return ret;
}
int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
{
int ret;
lockdep_assert_held(&ab->base_lock);
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
return -EPERM;
ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
&ab->rhash_peer_addr_param);
if (ret) {
ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
peer->addr, peer->peer_id, ret);
return ret;
}
ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
&ab->rhash_peer_id_param);
if (ret) {
ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
peer->addr, peer->peer_id, ret);
return ret;
}
return 0;
}
static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
{
struct rhashtable_params *param;
struct rhashtable *rhash_id_tbl;
int ret;
size_t size;
lockdep_assert_held(&ab->tbl_mtx_lock);
if (ab->rhead_peer_id)
return 0;
size = sizeof(*ab->rhead_peer_id);
rhash_id_tbl = kzalloc(size, GFP_KERNEL);
if (!rhash_id_tbl) {
ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
size);
return -ENOMEM;
}
param = &ab->rhash_peer_id_param;
param->key_offset = offsetof(struct ath11k_peer, peer_id);
param->head_offset = offsetof(struct ath11k_peer, rhash_id);
param->key_len = sizeof_field(struct ath11k_peer, peer_id);
param->automatic_shrinking = true;
param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
ret = rhashtable_init(rhash_id_tbl, param);
if (ret) {
ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
goto err_free;
}
spin_lock_bh(&ab->base_lock);
if (!ab->rhead_peer_id) {
ab->rhead_peer_id = rhash_id_tbl;
} else {
spin_unlock_bh(&ab->base_lock);
goto cleanup_tbl;
}
spin_unlock_bh(&ab->base_lock);
return 0;
cleanup_tbl:
rhashtable_destroy(rhash_id_tbl);
err_free:
kfree(rhash_id_tbl);
return ret;
}
static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
{
struct rhashtable_params *param;
struct rhashtable *rhash_addr_tbl;
int ret;
size_t size;
lockdep_assert_held(&ab->tbl_mtx_lock);
if (ab->rhead_peer_addr)
return 0;
size = sizeof(*ab->rhead_peer_addr);
rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
if (!rhash_addr_tbl) {
ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
size);
return -ENOMEM;
}
param = &ab->rhash_peer_addr_param;
param->key_offset = offsetof(struct ath11k_peer, addr);
param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
param->key_len = sizeof_field(struct ath11k_peer, addr);
param->automatic_shrinking = true;
param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
ret = rhashtable_init(rhash_addr_tbl, param);
if (ret) {
ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
goto err_free;
}
spin_lock_bh(&ab->base_lock);
if (!ab->rhead_peer_addr) {
ab->rhead_peer_addr = rhash_addr_tbl;
} else {
spin_unlock_bh(&ab->base_lock);
goto cleanup_tbl;
}
spin_unlock_bh(&ab->base_lock);
return 0;
cleanup_tbl:
rhashtable_destroy(rhash_addr_tbl);
err_free:
kfree(rhash_addr_tbl);
return ret;
}
static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
{
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id)
return;
rhashtable_destroy(ab->rhead_peer_id);
kfree(ab->rhead_peer_id);
ab->rhead_peer_id = NULL;
}
static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
{
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_addr)
return;
rhashtable_destroy(ab->rhead_peer_addr);
kfree(ab->rhead_peer_addr);
ab->rhead_peer_addr = NULL;
}
int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
{
int ret;
mutex_lock(&ab->tbl_mtx_lock);
ret = ath11k_peer_rhash_id_tbl_init(ab);
if (ret)
goto out;
ret = ath11k_peer_rhash_addr_tbl_init(ab);
if (ret)
goto cleanup_tbl;
mutex_unlock(&ab->tbl_mtx_lock);
return 0;
cleanup_tbl:
ath11k_peer_rhash_id_tbl_destroy(ab);
out:
mutex_unlock(&ab->tbl_mtx_lock);
return ret;
}
void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
{
mutex_lock(&ab->tbl_mtx_lock);
ath11k_peer_rhash_addr_tbl_destroy(ab);
ath11k_peer_rhash_id_tbl_destroy(ab);
mutex_unlock(&ab->tbl_mtx_lock);
}

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_PEER_H
@ -20,6 +21,11 @@ struct ath11k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
/* peer id based rhashtable list pointer */
struct rhash_head rhash_id;
/* peer addr based rhashtable list pointer */
struct rhash_head rhash_addr;
/* Info used in MMIC verification of
* RX fragments
*/
@ -29,6 +35,7 @@ struct ath11k_peer {
u16 sec_type;
u16 sec_type_grp;
bool is_authorized;
bool dp_setup_done;
};
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
@ -47,5 +54,7 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
int vdev_id);
int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab);
void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab);
int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer);
#endif /* _PEER_H_ */

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_QMI_H
@ -24,21 +25,25 @@
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750 0x03
#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH11K_QMI_RESP_LEN_MAX 8192
#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
#define ATH11K_QMI_CALDB_SIZE 0x480000
#define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20
#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 3
#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 5
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0021
#define QMI_WLFW_FW_READY_IND_V01 0x0038
#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x003E
#define QMI_WLFW_FW_READY_IND_V01 0x0021
#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH11K_FIRMWARE_MODE_OFF 4
#define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ)
#define ATH11K_COLD_BOOT_FW_RESET_DELAY (60 * HZ)
#define ATH11K_QMI_DEVICE_BAR_SIZE 0x200000
struct ath11k_base;
@ -69,6 +74,7 @@ enum ath11k_qmi_event_type {
ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
ATH11K_QMI_EVENT_POWER_UP,
ATH11K_QMI_EVENT_POWER_DOWN,
ATH11K_QMI_EVENT_FW_INIT_DONE,
ATH11K_QMI_EVENT_MAX,
};
@ -97,6 +103,8 @@ struct ath11k_qmi_event_msg {
struct target_mem_chunk {
u32 size;
u32 type;
u32 prev_size;
u32 prev_type;
dma_addr_t paddr;
u32 *vaddr;
void __iomem *iaddr;
@ -289,10 +297,16 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
char placeholder;
};
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
#define QMI_WLANFW_CAP_REQ_V01 0x0024
#define QMI_WLANFW_CAP_RESP_V01 0x0024
struct qmi_wlfw_fw_init_done_ind_msg_v01 {
char placeholder;
};
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
#define QMI_WLANFW_CAP_REQ_V01 0x0024
#define QMI_WLANFW_CAP_RESP_V01 0x0024
#define QMI_WLANFW_DEVICE_INFO_REQ_V01 0x004C
#define QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN 0
enum qmi_wlanfw_pipedir_enum_v01 {
QMI_WLFW_PIPEDIR_NONE_V01 = 0,
@ -385,6 +399,18 @@ struct qmi_wlanfw_cap_req_msg_v01 {
char placeholder;
};
struct qmi_wlanfw_device_info_req_msg_v01 {
char placeholder;
};
struct qmi_wlanfw_device_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u64 bar_addr;
u32 bar_size;
u8 bar_addr_valid;
u8 bar_size_valid;
};
#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
@ -496,5 +522,7 @@ void ath11k_qmi_event_work(struct work_struct *work);
void ath11k_qmi_msg_recv_work(struct work_struct *work);
void ath11k_qmi_deinit_service(struct ath11k_base *ab);
int ath11k_qmi_init_service(struct ath11k_base *ab);
void ath11k_qmi_free_resource(struct ath11k_base *ab);
int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab);
#endif

View file

@ -48,6 +48,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
@ -76,24 +77,33 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
return;
}
/* Set the country code to the firmware and wait for
/* Set the country code to the firmware and will receive
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
init_country_param.flags = ALPHA_IS_SET;
memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
init_country_param.cc_info.alpha2[2] = 0;
if (ar->ab->hw_params.current_cc_support) {
memcpy(&set_current_param.alpha2, request->alpha2, 2);
memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret)
ath11k_warn(ar->ab,
"failed set current country code: %d\n", ret);
} else {
init_country_param.flags = ALPHA_IS_SET;
memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
init_country_param.cc_info.alpha2[2] = 0;
ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
if (ret)
ath11k_warn(ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
if (ret)
ath11k_warn(ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
}
ath11k_mac_11d_scan_stop(ar);
ar->regdom_set_by_user = true;
}
int ath11k_reg_update_chan_list(struct ath11k *ar)
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
{
struct ieee80211_supported_band **bands;
struct scan_chan_list_params *params;
@ -102,7 +112,35 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
int i, ret;
int i, ret, left;
if (wait && ar->state_11d != ATH11K_11D_IDLE) {
left = wait_for_completion_timeout(&ar->completed_11d_scan,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive 11d scan complete: timed out\n");
ar->state_11d = ATH11K_11D_IDLE;
}
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"11d scan wait left time %d\n", left);
}
if (wait &&
(ar->scan.state == ATH11K_SCAN_STARTING ||
ar->scan.state == ATH11K_SCAN_RUNNING)) {
left = wait_for_completion_timeout(&ar->scan.completed,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive hw scan complete: timed out\n");
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"hw scan wait left time %d\n", left);
}
if (ar->state == ATH11K_STATE_RESTARTING)
return 0;
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
@ -184,11 +222,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
if (ar->pending_11d) {
complete(&ar->finish_11d_ch_list);
ar->pending_11d = false;
}
return ret;
}
@ -267,18 +300,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
}
if (ar->pending_11d)
complete(&ar->finish_11d_scan);
rtnl_lock();
wiphy_lock(ar->hw->wiphy);
if (ar->pending_11d)
reinit_completion(&ar->finish_11d_ch_list);
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
ret = regulatory_set_wiphy_regd(ar->hw->wiphy, regd_copy);
kfree(regd_copy);
@ -286,7 +308,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err;
if (ar->state == ATH11K_STATE_ON) {
ret = ath11k_reg_update_chan_list(ar);
ret = ath11k_reg_update_chan_list(ar, true);
if (ret)
goto err;
}
@ -604,13 +626,19 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
struct cur_reg_rule *reg_rule;
u8 i = 0, j = 0;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
u32 flags;
char alpha2[3];
num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
/* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list.
* This can be updated after complete 6 GHz regulatory support is added.
*/
if (reg_info->is_ext_reg_event)
num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
if (!num_rules)
goto ret;
@ -631,24 +659,24 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
"Country %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region),
reg_info->dfs_region, num_rules);
/* Update reg_rules[] below. Firmware is expected to
* send these rules in order(2G rules first and then 5G)
* send these rules in order(2 GHz rules first and then 5 GHz)
*/
for (; i < num_rules; i++) {
if (reg_info->num_2g_reg_rules &&
(i < reg_info->num_2g_reg_rules)) {
reg_rule = reg_info->reg_rules_2g_ptr + i;
if (reg_info->num_2ghz_reg_rules &&
(i < reg_info->num_2ghz_reg_rules)) {
reg_rule = reg_info->reg_rules_2ghz_ptr + i;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_2g);
reg_info->max_bw_2ghz);
flags = 0;
} else if (reg_info->num_5g_reg_rules &&
(j < reg_info->num_5g_reg_rules)) {
reg_rule = reg_info->reg_rules_5g_ptr + j++;
} else if (reg_info->num_5ghz_reg_rules &&
(j < reg_info->num_5ghz_reg_rules)) {
reg_rule = reg_info->reg_rules_5ghz_ptr + j++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_5g);
reg_info->max_bw_5ghz);
/* FW doesn't pass NL80211_RRF_AUTO_BW flag for
* BW Auto correction, we can enable this by default
@ -657,6 +685,14 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
} else if (reg_info->is_ext_reg_event &&
reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] &&
(k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) {
reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] +
k++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]);
flags = NL80211_RRF_AUTO_BW;
} else {
break;
}
@ -684,12 +720,21 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
continue;
}
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms,
flags);
if (reg_info->is_ext_reg_event) {
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms, flags,
reg_rule->psd_flag, reg_rule->psd_eirp);
} else {
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms,
flags);
}
}
tmp_regd->n_reg_rules = i;

View file

@ -32,5 +32,5 @@ struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect);
int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
#endif

View file

@ -877,7 +877,7 @@ struct rx_msdu_start_wcn6855 {
*
* l4_offset
* Depending upon mode bit, this field either indicates the
* L4 offset nin bytes from the start of RX_HEADER (only valid
* L4 offset in bytes from the start of RX_HEADER (only valid
* if either ipv4_proto or ipv6_proto is set to 1) or indicates
* the offset in bytes to the start of TCP or UDP header from
* the start of the IP header after decapsulation (Only valid if
@ -1445,7 +1445,7 @@ struct hal_rx_desc_ipq8074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
u8 msdu_payload[0];
u8 msdu_payload[];
} __packed;
struct hal_rx_desc_qcn9074 {
@ -1464,7 +1464,7 @@ struct hal_rx_desc_qcn9074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
u8 msdu_payload[0];
u8 msdu_payload[];
} __packed;
struct hal_rx_desc_wcn6855 {
@ -1483,7 +1483,7 @@ struct hal_rx_desc_wcn6855 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
u8 msdu_payload[0];
u8 msdu_payload[];
} __packed;
struct hal_rx_desc {

View file

@ -30,6 +30,7 @@
#define ATH11K_SPECTRAL_20MHZ 20
#define ATH11K_SPECTRAL_40MHZ 40
#define ATH11K_SPECTRAL_80MHZ 80
#define ATH11K_SPECTRAL_160MHZ 160
#define ATH11K_SPECTRAL_SIGNATURE 0xFA
@ -107,7 +108,7 @@ struct spectral_search_fft_report {
__le32 info1;
__le32 info2;
__le32 reserve0;
u8 bins[0];
u8 bins[];
} __packed;
struct ath11k_spectral_search_report {
@ -183,6 +184,8 @@ static int ath11k_spectral_scan_trigger(struct ath11k *ar)
if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED)
return 0;
ar->spectral.is_primary = true;
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
@ -212,7 +215,10 @@ static int ath11k_spectral_scan_config(struct ath11k *ar,
return -ENODEV;
arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
spin_lock_bh(&ar->spectral.lock);
ar->spectral.mode = mode;
spin_unlock_bh(&ar->spectral.lock);
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
@ -582,6 +588,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
u8 chan_width_mhz, bin_sz;
int ret;
u32 check_length;
bool fragment_sample = false;
lockdep_assert_held(&ar->spectral.lock);
@ -636,6 +643,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
case ATH11K_SPECTRAL_80MHZ:
fft_sample->chan_width_mhz = chan_width_mhz;
break;
case ATH11K_SPECTRAL_160MHZ:
if (ab->hw_params.spectral.fragment_160mhz) {
chan_width_mhz /= 2;
fragment_sample = true;
}
fft_sample->chan_width_mhz = chan_width_mhz;
break;
default:
ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz);
return -EINVAL;
@ -660,6 +674,17 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
freq = summary->meta.freq2;
fft_sample->freq2 = __cpu_to_be16(freq);
/* If freq2 is available then the spectral scan results are fragmented
* as primary and secondary
*/
if (fragment_sample && freq) {
if (!ar->spectral.is_primary)
fft_sample->freq1 = cpu_to_be16(freq);
/* We have to toggle the is_primary to handle the next report */
ar->spectral.is_primary = !ar->spectral.is_primary;
}
ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
ab->hw_params.spectral.fft_sz);
@ -843,9 +868,6 @@ static inline void ath11k_spectral_ring_free(struct ath11k *ar)
{
struct ath11k_spectral *sp = &ar->spectral;
if (!sp->enabled)
return;
ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
}
@ -897,15 +919,16 @@ void ath11k_spectral_deinit(struct ath11k_base *ab)
if (!sp->enabled)
continue;
ath11k_spectral_debug_unregister(ar);
ath11k_spectral_ring_free(ar);
mutex_lock(&ar->conf_mutex);
ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
mutex_unlock(&ar->conf_mutex);
spin_lock_bh(&sp->lock);
sp->mode = ATH11K_SPECTRAL_DISABLED;
sp->enabled = false;
spin_unlock_bh(&sp->lock);
ath11k_spectral_debug_unregister(ar);
ath11k_spectral_ring_free(ar);
}
}

View file

@ -35,6 +35,7 @@ struct ath11k_spectral {
u16 count;
u8 fft_size;
bool enabled;
bool is_primary;
};
#ifdef CONFIG_ATH11K_SPECTRAL

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "testmode.h"
@ -11,6 +12,9 @@
#include "core.h"
#include "testmode_i.h"
#define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
[ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
@ -20,58 +24,162 @@ static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
[ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
/* Returns true if callee consumes the skb and the skb should be discarded.
* Returns false if skb is not used. Does not sleep.
static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
{
struct ath11k_pdev *pdev;
struct ath11k *ar = NULL;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ar && ar->state == ATH11K_STATE_FTM)
break;
}
return ar;
}
/* This function handles unsegmented events. Data in various events are aggregated
* in application layer, this event is unsegmented from host perspective.
*/
bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
struct sk_buff *skb)
{
struct sk_buff *nl_skb;
bool consumed;
int ret;
struct ath11k *ar;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode event wmi cmd_id %d skb %pK skb->len %d\n",
cmd_id, skb, skb->len);
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"event wmi cmd_id %d skb length %d\n",
cmd_id, skb->len);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
ar = ath11k_tm_get_ar(ab);
if (!ar) {
ath11k_warn(ab, "testmode event not handled due to invalid pdev\n");
return;
}
spin_lock_bh(&ar->data_lock);
consumed = true;
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * sizeof(u32) + skb->len,
2 * nla_total_size(sizeof(u32)) +
nla_total_size(skb->len),
GFP_ATOMIC);
if (!nl_skb) {
ath11k_warn(ar->ab,
"failed to allocate skb for testmode wmi event\n");
ath11k_warn(ab,
"failed to allocate skb for unsegmented testmode wmi event\n");
goto out;
}
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI);
if (ret) {
ath11k_warn(ar->ab,
"failed to put testmode wmi event cmd attribute: %d\n",
ret);
if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) ||
nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) {
ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
kfree_skb(nl_skb);
goto out;
}
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath11k_warn(ar->ab,
"failed to put testmode wmi even cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
spin_unlock_bh(&ar->data_lock);
return;
out:
spin_unlock_bh(&ar->data_lock);
ath11k_warn(ab, "Failed to send testmode event to higher layers\n");
}
/* This function handles segmented events. Data of various events received
* from firmware is aggregated and sent to application layer
*/
static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
const struct wmi_ftm_event_msg *ftm_msg,
u16 length)
{
struct sk_buff *nl_skb;
int ret = 0;
struct ath11k *ar;
u8 const *buf_pos;
u16 datalen;
u8 total_segments, current_seq;
u32 data_pos;
u32 pdev_id;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"event wmi cmd_id %d ftm event msg %pK datalen %d\n",
cmd_id, ftm_msg, length);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
if (pdev_id >= ab->num_radios) {
ath11k_warn(ab, "testmode event not handled due to invalid pdev id: %d\n",
pdev_id);
return -EINVAL;
}
ar = ab->pdevs[pdev_id].ar;
if (!ar) {
ath11k_warn(ab, "testmode event not handled due to absence of pdev\n");
return -ENODEV;
}
current_seq = FIELD_GET(ATH11K_FTM_SEGHDR_CURRENT_SEQ,
ftm_msg->seg_hdr.segmentinfo);
total_segments = FIELD_GET(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS,
ftm_msg->seg_hdr.segmentinfo);
datalen = length - (sizeof(struct wmi_ftm_seg_hdr));
buf_pos = ftm_msg->data;
spin_lock_bh(&ar->data_lock);
if (current_seq == 0) {
ab->testmode.expected_seq = 0;
ab->testmode.data_pos = 0;
}
data_pos = ab->testmode.data_pos;
if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
data_pos, datalen);
ret = -EINVAL;
goto out;
}
ret = nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data);
if (ret) {
ath11k_warn(ar->ab,
"failed to copy skb to testmode wmi event: %d\n",
ret);
memcpy(&ab->testmode.eventdata[data_pos], buf_pos, datalen);
data_pos += datalen;
if (++ab->testmode.expected_seq != total_segments) {
ab->testmode.data_pos = data_pos;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"partial data received current_seq %d total_seg %d\n",
current_seq, total_segments);
goto out;
}
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"total data length pos %d len %d\n",
data_pos, ftm_msg->seg_hdr.len);
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * nla_total_size(sizeof(u32)) +
nla_total_size(data_pos),
GFP_ATOMIC);
if (!nl_skb) {
ath11k_warn(ab,
"failed to allocate skb for segmented testmode wmi event\n");
ret = -ENOMEM;
goto out;
}
if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
ATH11K_TM_CMD_WMI_FTM) ||
nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
&ab->testmode.eventdata[0])) {
ath11k_warn(ab, "failed to populate segmented testmode event");
kfree_skb(nl_skb);
ret = -ENOBUFS;
goto out;
}
@ -79,8 +187,45 @@ bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb)
out:
spin_unlock_bh(&ar->data_lock);
return ret;
}
return consumed;
static void ath11k_tm_wmi_event_segmented(struct ath11k_base *ab, u32 cmd_id,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_ftm_event_msg *ev;
u16 length;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_ARRAY_BYTE];
if (!ev) {
ath11k_warn(ab, "failed to fetch ftm msg\n");
kfree(tb);
return;
}
length = skb->len - TLV_HDR_SIZE;
ret = ath11k_tm_process_event(ab, cmd_id, ev, length);
if (ret)
ath11k_warn(ab, "Failed to process ftm event\n");
kfree(tb);
}
void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id, struct sk_buff *skb)
{
if (test_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
ath11k_tm_wmi_event_segmented(ab, cmd_id, skb);
else
ath11k_tm_wmi_event_unsegmented(ab, cmd_id, skb);
}
static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
@ -89,7 +234,7 @@ static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode cmd get version_major %d version_minor %d\n",
"cmd get version_major %d version_minor %d\n",
ATH11K_TESTMODE_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MINOR);
@ -115,21 +260,56 @@ static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
return cfg80211_testmode_reply(skb);
}
static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[])
static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct sk_buff *skb;
u32 cmd_id, buf_len;
int ret;
void *buf;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_ON) {
ret = -ENETDOWN;
goto out;
if (ar->state == ATH11K_STATE_FTM) {
ret = -EALREADY;
goto err;
}
/* start utf only when the driver is not in use */
if (ar->state != ATH11K_STATE_OFF) {
ret = -EBUSY;
goto err;
}
ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH,
GFP_KERNEL);
if (!ar->ab->testmode.eventdata) {
ret = -ENOMEM;
goto err;
}
ar->state = ATH11K_STATE_FTM;
ar->ftm_msgref = 0;
mutex_unlock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, "cmd start\n");
return 0;
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
struct ieee80211_vif *vif)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct sk_buff *skb;
struct ath11k_vif *arvif;
u32 cmd_id, buf_len;
int ret, tag;
void *buf;
u32 *ptr;
mutex_lock(&ar->conf_mutex);
if (!tb[ATH11K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
@ -142,11 +322,45 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[])
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
if (!buf_len) {
ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
ret = -EINVAL;
goto out;
}
cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
/* Make sure that the buffer length is long enough to
* hold TLV and pdev/vdev id.
*/
if (buf_len < sizeof(struct wmi_tlv) + sizeof(u32)) {
ret = -EINVAL;
goto out;
}
ptr = buf;
tag = FIELD_GET(WMI_TLV_TAG, *ptr);
/* pdev/vdev id start after TLV header */
ptr++;
if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
*ptr = ar->pdev->pdev_id;
if (ar->ab->fw_mode != ATH11K_FIRMWARE_MODE_FTM &&
(tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) {
if (vif) {
arvif = (struct ath11k_vif *)vif->drv_priv;
*ptr = arvif->vdev_id;
} else {
ret = -EINVAL;
goto out;
}
}
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
"cmd wmi cmd_id %d buf length %d\n",
cmd_id, buf_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
@ -173,6 +387,91 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[])
return ret;
}
static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[])
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = ar->ab;
struct sk_buff *skb;
u32 cmd_id, buf_len, hdr_info;
int ret;
void *buf;
u8 segnumber = 0, seginfo;
u16 chunk_len, total_bytes, num_segments;
u8 *bufpos;
struct wmi_ftm_cmd *ftm_cmd;
set_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH11K_STATE_FTM) {
ret = -ENETDOWN;
goto out;
}
if (!tb[ATH11K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"cmd wmi ftm cmd_id %d buffer length %d\n",
cmd_id, buf_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
bufpos = buf;
total_bytes = buf_len;
num_segments = total_bytes / MAX_WMI_UTF_LEN;
if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
num_segments++;
while (buf_len) {
chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
sizeof(struct wmi_ftm_cmd)));
if (!skb) {
ret = -ENOMEM;
goto out;
}
ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, (chunk_len +
sizeof(struct wmi_ftm_seg_hdr)));
ftm_cmd->tlv_header = hdr_info;
ftm_cmd->seg_hdr.len = total_bytes;
ftm_cmd->seg_hdr.msgref = ar->ftm_msgref;
seginfo = FIELD_PREP(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
FIELD_PREP(ATH11K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
ftm_cmd->seg_hdr.segmentinfo = seginfo;
segnumber++;
memcpy(&ftm_cmd->data, bufpos, chunk_len);
ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
if (ret) {
ath11k_warn(ar->ab, "failed to send wmi ftm command: %d\n", ret);
goto out;
}
buf_len -= chunk_len;
bufpos += chunk_len;
}
ar->ftm_msgref++;
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
@ -192,7 +491,11 @@ int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case ATH11K_TM_CMD_GET_VERSION:
return ath11k_tm_cmd_get_version(ar, tb);
case ATH11K_TM_CMD_WMI:
return ath11k_tm_cmd_wmi(ar, tb);
return ath11k_tm_cmd_wmi(ar, tb, vif);
case ATH11K_TM_CMD_TESTMODE_START:
return ath11k_tm_cmd_testmode_start(ar, tb);
case ATH11K_TM_CMD_WMI_FTM:
return ath11k_tm_cmd_wmi_ftm(ar, tb);
default:
return -EOPNOTSUPP;
}

View file

@ -1,22 +1,22 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#ifdef CONFIG_NL80211_TESTMODE
bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb);
void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id, struct sk_buff *skb);
int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
#else
static inline bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id,
static inline void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id,
struct sk_buff *skb)
{
return false;
}
static inline int ath11k_tm_cmd(struct ieee80211_hw *hw,

View file

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* "API" level of the ath11k testmode interface. Bump it after every
@ -11,9 +12,10 @@
/* Bump this after every _compatible_ interface change, for example
* addition of a new command or an attribute.
*/
#define ATH11K_TESTMODE_VERSION_MINOR 0
#define ATH11K_TESTMODE_VERSION_MINOR 1
#define ATH11K_TM_DATA_MAX_LEN 5000
#define ATH11K_FTM_EVENT_MAX_BUF_LENGTH 2048
enum ath11k_tm_attr {
__ATH11K_TM_ATTR_INVALID = 0,
@ -47,4 +49,18 @@ enum ath11k_tm_cmd {
* ATH11K_TM_ATTR_DATA.
*/
ATH11K_TM_CMD_WMI = 1,
/* Boots the UTF firmware, the netdev interface must be down at the
* time.
*/
ATH11K_TM_CMD_TESTMODE_START = 2,
/* The command used to transmit a FTM WMI command to the firmware
* and the event to receive WMI events from the firmware. The data
* received only contain the payload, need to add the tlv header
* and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID.
* The data payload size could be large and the driver needs to
* send segmented data to firmware.
*/
ATH11K_TM_CMD_WMI_FTM = 3,
};

View file

@ -99,7 +99,7 @@ static ssize_t ath11k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock);
/* display in millidegree celcius */
/* display in millidegree Celsius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);

View file

@ -19,7 +19,7 @@ struct ath11k_thermal {
/* protected by conf_mutex */
u32 throttle_state;
/* temperature value in Celcius degree
/* temperature value in Celsius degree
* protected by data_lock
*/
int temperature;

View file

@ -126,15 +126,12 @@ DECLARE_EVENT_CLASS(ath11k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
__dynamic_array(char, msg, ATH11K_MSG_MAX)
__vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH11K_MSG_MAX,
vaf->fmt,
*vaf->va) >= ATH11K_MSG_MAX);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
@ -308,6 +305,34 @@ TRACE_EVENT(ath11k_wmi_diag,
)
);
TRACE_EVENT(ath11k_ps_timekeeper,
TP_PROTO(struct ath11k *ar, const void *peer_addr,
u32 peer_ps_timestamp, u8 peer_ps_state),
TP_ARGS(ar, peer_addr, peer_ps_timestamp, peer_ps_state),
TP_STRUCT__entry(__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__dynamic_array(u8, peer_addr, ETH_ALEN)
__field(u8, peer_ps_state)
__field(u32, peer_ps_timestamp)
),
TP_fast_assign(__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
memcpy(__get_dynamic_array(peer_addr), peer_addr,
ETH_ALEN);
__entry->peer_ps_state = peer_ps_state;
__entry->peer_ps_timestamp = peer_ps_timestamp;
),
TP_printk("%s %s %u %u",
__get_str(driver),
__get_str(device),
__entry->peer_ps_state,
__entry->peer_ps_timestamp
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,16 +1,30 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
#include "mac.h"
#include <net/mac80211.h>
#include "core.h"
#include "hif.h"
#include "debug.h"
#include "wmi.h"
#include "wow.h"
#include "dp_rx.h"
static const struct wiphy_wowlan_support ath11k_wowlan_support = {
.flags = WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE,
.pattern_min_len = WOW_MIN_PATTERN_SIZE,
.pattern_max_len = WOW_MAX_PATTERN_SIZE,
.max_pkt_offset = WOW_MAX_PKT_OFFSET,
};
int ath11k_wow_enable(struct ath11k_base *ab)
{
@ -54,6 +68,13 @@ int ath11k_wow_wakeup(struct ath11k_base *ab)
struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
int ret;
/* In the case of WCN6750, WoW wakeup is done
* by sending SMP2P power save exit message
* to the target processor.
*/
if (ab->hw_params.smp2p_wow_exit)
return 0;
reinit_completion(&ab->wow.wakeup_completed);
ret = ath11k_wmi_wow_host_wakeup_ind(ar);
@ -71,3 +92,787 @@ int ath11k_wow_wakeup(struct ath11k_base *ab)
return 0;
}
static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
int i, ret;
for (i = 0; i < WOW_EVENT_MAX; i++) {
ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
if (ret) {
ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
for (i = 0; i < ar->wow.max_num_patterns; i++) {
ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
if (ret) {
ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
i, arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_cleanup(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_wow_vif_cleanup(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
/* Convert a 802.3 format to a 802.11 format.
* +------------+-----------+--------+----------------+
* 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
* +------------+-----------+--------+----------------+
* |__ |_______ |____________ |________
* | | | |
* +--+------------+----+-----------+---------------+-----------+
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
int total_len = old->pkt_offset + old->pattern_len;
int hdr_80211_end_offset;
struct ieee80211_hdr_3addr *new_hdr_pattern =
(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
struct ieee80211_hdr_3addr *new_hdr_mask =
(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
int hdr_len = sizeof(*new_hdr_pattern);
struct rfc1042_hdr *new_rfc_pattern =
(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
struct rfc1042_hdr *new_rfc_mask =
(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
int rfc_len = sizeof(*new_rfc_pattern);
memcpy(hdr_8023_pattern + old->pkt_offset,
old->pattern, ETH_HLEN - old->pkt_offset);
memcpy(hdr_8023_bit_mask + old->pkt_offset,
old->mask, ETH_HLEN - old->pkt_offset);
/* Copy destination address */
memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
/* Copy source address */
memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
/* Copy logic link type */
memcpy(&new_rfc_pattern->snap_type,
&old_hdr_pattern->h_proto,
sizeof(old_hdr_pattern->h_proto));
memcpy(&new_rfc_mask->snap_type,
&old_hdr_mask->h_proto,
sizeof(old_hdr_mask->h_proto));
/* Compute new pkt_offset */
if (old->pkt_offset < ETH_ALEN)
new->pkt_offset = old->pkt_offset +
offsetof(struct ieee80211_hdr_3addr, addr1);
else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
new->pkt_offset = old->pkt_offset +
offsetof(struct ieee80211_hdr_3addr, addr3) -
offsetof(struct ethhdr, h_source);
else
new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
/* Compute new hdr end offset */
if (total_len > ETH_HLEN)
hdr_80211_end_offset = hdr_len + rfc_len;
else if (total_len > offsetof(struct ethhdr, h_proto))
hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
else if (total_len > ETH_ALEN)
hdr_80211_end_offset = total_len - ETH_ALEN +
offsetof(struct ieee80211_hdr_3addr, addr3);
else
hdr_80211_end_offset = total_len +
offsetof(struct ieee80211_hdr_3addr, addr1);
new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
memcpy((u8 *)new->pattern,
hdr_80211_pattern + new->pkt_offset,
new->pattern_len);
memcpy((u8 *)new->mask,
hdr_80211_bit_mask + new->pkt_offset,
new->pattern_len);
if (total_len > ETH_HLEN) {
/* Copy frame body */
memcpy((u8 *)new->pattern + new->pattern_len,
(void *)old->pattern + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
memcpy((u8 *)new->mask + new->pattern_len,
(void *)old->mask + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
new->pattern_len += total_len - ETH_HLEN;
}
}
static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
struct cfg80211_sched_scan_request *nd_config,
struct wmi_pno_scan_req *pno)
{
int i, j;
u8 ssid_len;
pno->enable = 1;
pno->vdev_id = vdev_id;
pno->uc_networks_count = nd_config->n_match_sets;
if (!pno->uc_networks_count ||
pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
return -EINVAL;
if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
return -EINVAL;
/* Filling per profile params */
for (i = 0; i < pno->uc_networks_count; i++) {
ssid_len = nd_config->match_sets[i].ssid.ssid_len;
if (ssid_len == 0 || ssid_len > 32)
return -EINVAL;
pno->a_networks[i].ssid.ssid_len = ssid_len;
memcpy(pno->a_networks[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid_len);
pno->a_networks[i].authentication = 0;
pno->a_networks[i].encryption = 0;
pno->a_networks[i].bcast_nw_type = 0;
/* Copying list of valid channel into request */
pno->a_networks[i].channel_count = nd_config->n_channels;
pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
for (j = 0; j < nd_config->n_channels; j++) {
pno->a_networks[i].channels[j] =
nd_config->channels[j]->center_freq;
}
}
/* set scan to passive if no SSIDs are specified in the request */
if (nd_config->n_ssids == 0)
pno->do_passive_scan = true;
else
pno->do_passive_scan = false;
for (i = 0; i < nd_config->n_ssids; i++) {
j = 0;
while (j < pno->uc_networks_count) {
if (pno->a_networks[j].ssid.ssid_len ==
nd_config->ssids[i].ssid_len &&
(memcmp(pno->a_networks[j].ssid.ssid,
nd_config->ssids[i].ssid,
pno->a_networks[j].ssid.ssid_len) == 0)) {
pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
break;
}
j++;
}
}
if (nd_config->n_scan_plans == 2) {
pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
pno->slow_scan_period =
nd_config->scan_plans[1].interval * MSEC_PER_SEC;
} else if (nd_config->n_scan_plans == 1) {
pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
pno->fast_scan_max_cycles = 1;
pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
} else {
ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
nd_config->n_scan_plans);
}
if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
/* enable mac randomization */
pno->enable_pno_scan_randomization = 1;
memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
}
pno->delay_start_time = nd_config->delay;
/* Current FW does not support min-max range for dwell time */
pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
return 0;
}
static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
int ret, i;
unsigned long wow_mask = 0;
struct ath11k *ar = arvif->ar;
const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
int pattern_id = 0;
/* Setup requested WOW features */
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_IBSS:
__set_bit(WOW_BEACON_EVENT, &wow_mask);
fallthrough;
case WMI_VDEV_TYPE_AP:
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
__set_bit(WOW_HTT_EVENT, &wow_mask);
__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
break;
case WMI_VDEV_TYPE_STA:
if (wowlan->disconnect) {
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_BMISS_EVENT, &wow_mask);
__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
}
if (wowlan->magic_pkt)
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
if (wowlan->nd_config) {
struct wmi_pno_scan_req *pno;
int ret;
pno = kzalloc(sizeof(*pno), GFP_KERNEL);
if (!pno)
return -ENOMEM;
ar->nlo_enabled = true;
ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
wowlan->nd_config, pno);
if (!ret) {
ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
}
kfree(pno);
}
break;
default:
break;
}
for (i = 0; i < wowlan->n_patterns; i++) {
u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
struct cfg80211_pkt_pattern new_pattern = {};
struct cfg80211_pkt_pattern old_pattern = patterns[i];
int j;
new_pattern.pattern = ath_pattern;
new_pattern.mask = ath_bitmask;
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
continue;
/* convert bytemask to bitmask */
for (j = 0; j < patterns[i].pattern_len; j++)
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
ATH11K_HW_TXRX_NATIVE_WIFI) {
if (patterns[i].pkt_offset < ETH_HLEN) {
u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
memcpy(pattern_ext, old_pattern.pattern,
old_pattern.pattern_len);
old_pattern.pattern = pattern_ext;
ath11k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
} else {
new_pattern = old_pattern;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
}
}
if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
return -EINVAL;
ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
pattern_id,
new_pattern.pattern,
new_pattern.mask,
new_pattern.pattern_len,
new_pattern.pkt_offset);
if (ret) {
ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
pattern_id,
arvif->vdev_id, ret);
return ret;
}
pattern_id++;
__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
}
for (i = 0; i < WOW_EVENT_MAX; i++) {
if (!test_bit(i, &wow_mask))
continue;
ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
if (ret) {
ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_set_wakeups(struct ath11k *ar,
struct cfg80211_wowlan *wowlan)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
{
int ret = 0;
struct ath11k *ar = arvif->ar;
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
if (ar->nlo_enabled) {
struct wmi_pno_scan_req *pno;
pno = kzalloc(sizeof(*pno), GFP_KERNEL);
if (!pno)
return -ENOMEM;
pno->enable = 0;
ar->nlo_enabled = false;
ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
kfree(pno);
}
break;
default:
break;
}
return ret;
}
static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_vif_wow_clean_nlo(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_set_hw_filter(struct ath11k *ar)
{
struct ath11k_vif *arvif;
u32 bitmap;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
bitmap,
true);
if (ret) {
ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
if (ret) {
ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
continue;
ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
arvif->vdev_id, enable, ret);
return ret;
}
}
return 0;
}
static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
!arvif->is_up ||
!arvif->rekey_data.enable_offload)
continue;
/* get rekey info before disable rekey offload */
if (!enable) {
ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
arvif->vdev_id, ret);
return ret;
}
}
ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
arvif->vdev_id, enable, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
{
int ret;
ret = ath11k_wow_arp_ns_offload(ar, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
enable, ret);
return ret;
}
ret = ath11k_gtk_rekey_offload(ar, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
enable, ret);
return ret;
}
return 0;
}
static int ath11k_wow_set_keepalive(struct ath11k *ar,
enum wmi_sta_keepalive_method method,
u32 interval)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
if (ret)
return ret;
}
return 0;
}
int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
struct ath11k *ar = hw->priv;
int ret;
ret = ath11k_mac_wait_tx_complete(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
mutex_lock(&ar->conf_mutex);
ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
if (ret) {
ath11k_warn(ar->ab,
"failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
ret);
goto exit;
}
ret = ath11k_wow_cleanup(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
ret);
goto exit;
}
ret = ath11k_wow_set_wakeups(ar, wowlan);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
ret);
goto cleanup;
}
ret = ath11k_wow_protocol_offload(ar, true);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
ret);
goto cleanup;
}
ret = ath11k_wow_set_hw_filter(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
ret);
goto cleanup;
}
ret = ath11k_wow_set_keepalive(ar,
WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
if (ret) {
ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
goto cleanup;
}
ret = ath11k_wow_enable(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
goto cleanup;
}
ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
if (ret) {
ath11k_warn(ar->ab,
"failed to stop dp rx pktlog during wow suspend: %d\n",
ret);
goto cleanup;
}
ath11k_ce_stop_shadow_timers(ar->ab);
ath11k_dp_stop_shadow_timers(ar->ab);
ath11k_hif_irq_disable(ar->ab);
ath11k_hif_ce_irq_disable(ar->ab);
ret = ath11k_hif_suspend(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
goto wakeup;
}
goto exit;
wakeup:
ath11k_wow_wakeup(ar->ab);
cleanup:
ath11k_wow_cleanup(ar);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
}
void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
struct ath11k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
device_set_wakeup_enable(ar->ab->dev, enabled);
mutex_unlock(&ar->conf_mutex);
}
int ath11k_wow_op_resume(struct ieee80211_hw *hw)
{
struct ath11k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
ret = ath11k_hif_resume(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
goto exit;
}
ath11k_hif_ce_irq_enable(ar->ab);
ath11k_hif_irq_enable(ar->ab);
ret = ath11k_dp_rx_pktlog_start(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
goto exit;
}
ret = ath11k_wow_wakeup(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
goto exit;
}
ret = ath11k_wow_nlo_cleanup(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
goto exit;
}
ret = ath11k_wow_clear_hw_filter(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
goto exit;
}
ret = ath11k_wow_protocol_offload(ar, false);
if (ret) {
ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
ret);
goto exit;
}
ret = ath11k_wow_set_keepalive(ar,
WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
if (ret) {
ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
goto exit;
}
exit:
if (ret) {
switch (ar->state) {
case ATH11K_STATE_ON:
ar->state = ATH11K_STATE_RESTARTING;
ret = 1;
break;
case ATH11K_STATE_OFF:
case ATH11K_STATE_RESTARTING:
case ATH11K_STATE_RESTARTED:
case ATH11K_STATE_WEDGED:
case ATH11K_STATE_FTM:
ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
ar->state);
ret = -EIO;
break;
}
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath11k_wow_init(struct ath11k *ar)
{
if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
return 0;
ar->wow.wowlan_support = ath11k_wowlan_support;
if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
ATH11K_HW_TXRX_NATIVE_WIFI) {
ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
}
if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
}
ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
device_set_wakeup_capable(ar->ab->dev, true);
return 0;
}

View file

@ -3,8 +3,53 @@
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
*/
#ifndef _WOW_H_
#define _WOW_H_
struct ath11k_wow {
u32 max_num_patterns;
struct completion wakeup_completed;
struct wiphy_wowlan_support wowlan_support;
};
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
#define ATH11K_WOW_RETRY_NUM 3
#define ATH11K_WOW_RETRY_WAIT_MS 200
#define ATH11K_WOW_PATTERNS 22
#ifdef CONFIG_PM
int ath11k_wow_init(struct ath11k *ar);
int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
int ath11k_wow_op_resume(struct ieee80211_hw *hw);
void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
int ath11k_wow_enable(struct ath11k_base *ab);
int ath11k_wow_wakeup(struct ath11k_base *ab);
#else
static inline int ath11k_wow_init(struct ath11k *ar)
{
return 0;
}
static inline int ath11k_wow_enable(struct ath11k_base *ab)
{
return 0;
}
static inline int ath11k_wow_wakeup(struct ath11k_base *ab)
{
return 0;
}
#endif /* CONFIG_PM */
#endif /* _WOW_H_ */

View file

@ -4,15 +4,21 @@ DEVATH11KDIR= ${SRCTOP}/sys/contrib/dev/athk/ath11k
.PATH: ${DEVATH11KDIR}
WITH_DEBUGFS= 0 # Does not yet compile
WITH_CONFIG_PM= 0
KMOD= if_ath11k
SRCS+= core.c hal.c hal_tx.c hal_rx.c
SRCS+= wmi.c mac.c reg.c htc.c qmi.c
SRCS+= dp.c dp_tx.c dp_rx.c debug.c
SRCS+= ce.c peer.c dbring.c hw.c wow.c
SRCS+= ce.c peer.c dbring.c hw.c
SRCS+= mhi.c pci.c
SRCS+= mhi.c pci.c pcic.c
.if defined(WITH_CONFIG_PM) && ${WITH_CONFIG_PM} > 0
CFLAGS+= -DCONFIG_PM=${WITH_CONFIG_PM}
SRCS+= wow.c
.endif
# Other
SRCS+= ${LINUXKPI_GENSRCS}