Merge tag 'ieee802154-for-net-next-2022-10-25' of git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next

Stefan Schmidt says:

====================

==
One of the biggest cycles for ieee802154 in a long time. We are landing the
first pieces of a big enhancements in managing PAN's. We might have another pull
request ready for this cycle later on, but I want to get this one out first.

Miquel Raynal added support for sending frames synchronously as a dependency
to handle MLME commands. Also introducing more filtering levels to match with
the needs of a device when scanning or operating as a pan coordinator.
To support development and testing the hwsim driver for ieee802154 was also
enhanced for the new filtering levels and to update the PIB attributes.

Alexander Aring fixed quite a few bugs spotted during reviewing changes. He
also added support for TRAC in the atusb driver to have better failure
handling if the firmware provides the needed information.

Jilin Yuan fixed a comment with a repeated word in it.
==================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2022-10-26 15:24:36 +01:00
commit 34e0b94520
16 changed files with 709 additions and 219 deletions

View file

@ -191,7 +191,7 @@ static void atusb_work_urbs(struct work_struct *work)
/* ----- Asynchronous USB -------------------------------------------------- */
static void atusb_tx_done(struct atusb *atusb, u8 seq)
static void atusb_tx_done(struct atusb *atusb, u8 seq, int reason)
{
struct usb_device *usb_dev = atusb->usb_dev;
u8 expect = atusb->tx_ack_seq;
@ -199,7 +199,10 @@ static void atusb_tx_done(struct atusb *atusb, u8 seq)
dev_dbg(&usb_dev->dev, "%s (0x%02x/0x%02x)\n", __func__, seq, expect);
if (seq == expect) {
/* TODO check for ifs handling in firmware */
ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
if (reason == IEEE802154_SUCCESS)
ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
else
ieee802154_xmit_error(atusb->hw, atusb->tx_skb, reason);
} else {
/* TODO I experience this case when atusb has a tx complete
* irq before probing, we should fix the firmware it's an
@ -215,7 +218,8 @@ static void atusb_in_good(struct urb *urb)
struct usb_device *usb_dev = urb->dev;
struct sk_buff *skb = urb->context;
struct atusb *atusb = SKB_ATUSB(skb);
u8 len, lqi;
int result = IEEE802154_SUCCESS;
u8 len, lqi, trac;
if (!urb->actual_length) {
dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
@ -224,8 +228,27 @@ static void atusb_in_good(struct urb *urb)
len = *skb->data;
if (urb->actual_length == 1) {
atusb_tx_done(atusb, len);
switch (urb->actual_length) {
case 2:
trac = TRAC_MASK(*(skb->data + 1));
switch (trac) {
case TRAC_SUCCESS:
case TRAC_SUCCESS_DATA_PENDING:
/* already IEEE802154_SUCCESS */
break;
case TRAC_CHANNEL_ACCESS_FAILURE:
result = IEEE802154_CHANNEL_ACCESS_FAILURE;
break;
case TRAC_NO_ACK:
result = IEEE802154_NO_ACK;
break;
default:
result = IEEE802154_SYSTEM_ERROR;
}
fallthrough;
case 1:
atusb_tx_done(atusb, len, result);
return;
}

View file

@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <net/ieee802154_netdev.h>
#include <net/mac802154.h>
#include <net/cfg802154.h>
#include <net/genetlink.h>
@ -47,6 +48,8 @@ static const struct genl_multicast_group hwsim_mcgrps[] = {
struct hwsim_pib {
u8 page;
u8 channel;
struct ieee802154_hw_addr_filt filt;
enum ieee802154_filtering_level filt_level;
struct rcu_head rcu;
};
@ -88,24 +91,168 @@ static int hwsim_hw_ed(struct ieee802154_hw *hw, u8 *level)
return 0;
}
static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
static int hwsim_update_pib(struct ieee802154_hw *hw, u8 page, u8 channel,
struct ieee802154_hw_addr_filt *filt,
enum ieee802154_filtering_level filt_level)
{
struct hwsim_phy *phy = hw->priv;
struct hwsim_pib *pib, *pib_old;
pib = kzalloc(sizeof(*pib), GFP_KERNEL);
pib = kzalloc(sizeof(*pib), GFP_ATOMIC);
if (!pib)
return -ENOMEM;
pib_old = rtnl_dereference(phy->pib);
pib->page = page;
pib->channel = channel;
pib->filt.short_addr = filt->short_addr;
pib->filt.pan_id = filt->pan_id;
pib->filt.ieee_addr = filt->ieee_addr;
pib->filt.pan_coord = filt->pan_coord;
pib->filt_level = filt_level;
pib_old = rtnl_dereference(phy->pib);
rcu_assign_pointer(phy->pib, pib);
kfree_rcu(pib_old, rcu);
return 0;
}
static int hwsim_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
struct hwsim_phy *phy = hw->priv;
struct hwsim_pib *pib;
int ret;
rcu_read_lock();
pib = rcu_dereference(phy->pib);
ret = hwsim_update_pib(hw, page, channel, &pib->filt, pib->filt_level);
rcu_read_unlock();
return ret;
}
static int hwsim_hw_addr_filt(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed)
{
struct hwsim_phy *phy = hw->priv;
struct hwsim_pib *pib;
int ret;
rcu_read_lock();
pib = rcu_dereference(phy->pib);
ret = hwsim_update_pib(hw, pib->page, pib->channel, filt, pib->filt_level);
rcu_read_unlock();
return ret;
}
static void hwsim_hw_receive(struct ieee802154_hw *hw, struct sk_buff *skb,
u8 lqi)
{
struct ieee802154_hdr hdr;
struct hwsim_phy *phy = hw->priv;
struct hwsim_pib *pib;
rcu_read_lock();
pib = rcu_dereference(phy->pib);
if (!pskb_may_pull(skb, 3)) {
dev_dbg(hw->parent, "invalid frame\n");
goto drop;
}
memcpy(&hdr, skb->data, 3);
/* Level 4 filtering: Frame fields validity */
if (pib->filt_level == IEEE802154_FILTERING_4_FRAME_FIELDS) {
/* a) Drop reserved frame types */
switch (mac_cb(skb)->type) {
case IEEE802154_FC_TYPE_BEACON:
case IEEE802154_FC_TYPE_DATA:
case IEEE802154_FC_TYPE_ACK:
case IEEE802154_FC_TYPE_MAC_CMD:
break;
default:
dev_dbg(hw->parent, "unrecognized frame type 0x%x\n",
mac_cb(skb)->type);
goto drop;
}
/* b) Drop reserved frame versions */
switch (hdr.fc.version) {
case IEEE802154_2003_STD:
case IEEE802154_2006_STD:
case IEEE802154_STD:
break;
default:
dev_dbg(hw->parent,
"unrecognized frame version 0x%x\n",
hdr.fc.version);
goto drop;
}
/* c) PAN ID constraints */
if ((mac_cb(skb)->dest.mode == IEEE802154_ADDR_LONG ||
mac_cb(skb)->dest.mode == IEEE802154_ADDR_SHORT) &&
mac_cb(skb)->dest.pan_id != pib->filt.pan_id &&
mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
dev_dbg(hw->parent,
"unrecognized PAN ID %04x\n",
le16_to_cpu(mac_cb(skb)->dest.pan_id));
goto drop;
}
/* d1) Short address constraints */
if (mac_cb(skb)->dest.mode == IEEE802154_ADDR_SHORT &&
mac_cb(skb)->dest.short_addr != pib->filt.short_addr &&
mac_cb(skb)->dest.short_addr != cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
dev_dbg(hw->parent,
"unrecognized short address %04x\n",
le16_to_cpu(mac_cb(skb)->dest.short_addr));
goto drop;
}
/* d2) Extended address constraints */
if (mac_cb(skb)->dest.mode == IEEE802154_ADDR_LONG &&
mac_cb(skb)->dest.extended_addr != pib->filt.ieee_addr) {
dev_dbg(hw->parent,
"unrecognized long address 0x%016llx\n",
mac_cb(skb)->dest.extended_addr);
goto drop;
}
/* d4) Specific PAN coordinator case (no parent) */
if ((mac_cb(skb)->type == IEEE802154_FC_TYPE_DATA ||
mac_cb(skb)->type == IEEE802154_FC_TYPE_MAC_CMD) &&
mac_cb(skb)->dest.mode == IEEE802154_ADDR_NONE) {
dev_dbg(hw->parent,
"relaying is not supported\n");
goto drop;
}
/* e) Beacon frames follow specific PAN ID rules */
if (mac_cb(skb)->type == IEEE802154_FC_TYPE_BEACON &&
pib->filt.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST) &&
mac_cb(skb)->dest.pan_id != pib->filt.pan_id) {
dev_dbg(hw->parent,
"invalid beacon PAN ID %04x\n",
le16_to_cpu(mac_cb(skb)->dest.pan_id));
goto drop;
}
}
rcu_read_unlock();
ieee802154_rx_irqsafe(hw, skb, lqi);
return;
drop:
rcu_read_unlock();
kfree_skb(skb);
}
static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
struct hwsim_phy *current_phy = hw->priv;
@ -133,8 +280,7 @@ static int hwsim_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
einfo = rcu_dereference(e->info);
if (newskb)
ieee802154_rx_irqsafe(e->endpoint->hw, newskb,
einfo->lqi);
hwsim_hw_receive(e->endpoint->hw, newskb, einfo->lqi);
}
}
rcu_read_unlock();
@ -148,6 +294,7 @@ static int hwsim_hw_start(struct ieee802154_hw *hw)
struct hwsim_phy *phy = hw->priv;
phy->suspended = false;
return 0;
}
@ -161,7 +308,22 @@ static void hwsim_hw_stop(struct ieee802154_hw *hw)
static int
hwsim_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
{
return 0;
enum ieee802154_filtering_level filt_level;
struct hwsim_phy *phy = hw->priv;
struct hwsim_pib *pib;
int ret;
if (on)
filt_level = IEEE802154_FILTERING_NONE;
else
filt_level = IEEE802154_FILTERING_4_FRAME_FIELDS;
rcu_read_lock();
pib = rcu_dereference(phy->pib);
ret = hwsim_update_pib(hw, pib->page, pib->channel, &pib->filt, filt_level);
rcu_read_unlock();
return ret;
}
static const struct ieee802154_ops hwsim_ops = {
@ -172,6 +334,7 @@ static const struct ieee802154_ops hwsim_ops = {
.start = hwsim_hw_start,
.stop = hwsim_hw_stop,
.set_promiscuous_mode = hwsim_set_promiscuous_mode,
.set_hw_addr_filt = hwsim_hw_addr_filt,
};
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
@ -788,11 +951,13 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
}
pib->channel = 13;
pib->filt.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
pib->filt.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
rcu_assign_pointer(phy->pib, pib);
phy->idx = idx;
INIT_LIST_HEAD(&phy->edges);
hw->flags = IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_RX_DROP_BAD_CKSUM;
hw->flags = IEEE802154_HW_PROMISCUOUS;
hw->parent = dev;
err = ieee802154_register_hw(hw);

View file

@ -1233,12 +1233,9 @@ mcr20a_probe(struct spi_device *spi)
}
rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH);
if (IS_ERR(rst_b)) {
ret = PTR_ERR(rst_b);
if (ret != -EPROBE_DEFER)
dev_err(&spi->dev, "Failed to get 'rst_b' gpio: %d", ret);
return ret;
}
if (IS_ERR(rst_b))
return dev_err_probe(&spi->dev, PTR_ERR(rst_b),
"Failed to get 'rst_b' gpio");
/* reset mcr20a */
usleep_range(10, 20);

View file

@ -276,6 +276,30 @@ enum {
IEEE802154_SYSTEM_ERROR = 0xff,
};
/**
* enum ieee802154_filtering_level - Filtering levels applicable to a PHY
*
* @IEEE802154_FILTERING_NONE: No filtering at all, what is received is
* forwarded to the softMAC
* @IEEE802154_FILTERING_1_FCS: First filtering level, frames with an invalid
* FCS should be dropped
* @IEEE802154_FILTERING_2_PROMISCUOUS: Second filtering level, promiscuous
* mode as described in the spec, identical in terms of filtering to the
* level one on PHY side, but at the MAC level the frame should be
* forwarded to the upper layer directly
* @IEEE802154_FILTERING_3_SCAN: Third filtering level, scan related, where
* only beacons must be processed, all remaining traffic gets dropped
* @IEEE802154_FILTERING_4_FRAME_FIELDS: Fourth filtering level actually
* enforcing the validity of the content of the frame with various checks
*/
enum ieee802154_filtering_level {
IEEE802154_FILTERING_NONE,
IEEE802154_FILTERING_1_FCS,
IEEE802154_FILTERING_2_PROMISCUOUS,
IEEE802154_FILTERING_3_SCAN,
IEEE802154_FILTERING_4_FRAME_FIELDS,
};
/* frame control handling */
#define IEEE802154_FCTL_FTYPE 0x0003
#define IEEE802154_FCTL_ACKREQ 0x0020

View file

@ -11,7 +11,7 @@
#include <linux/ieee802154.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/bug.h>
#include <net/nl802154.h>
@ -166,11 +166,14 @@ wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
* level setting.
* @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
* setting.
* @WPAN_PHY_FLAG_STATE_QUEUE_STOPPED: Indicates that the transmit queue was
* temporarily stopped.
*/
enum wpan_phy_flags {
WPAN_PHY_FLAG_TXPOWER = BIT(1),
WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
WPAN_PHY_FLAG_CCA_MODE = BIT(3),
WPAN_PHY_FLAG_STATE_QUEUE_STOPPED = BIT(4),
};
struct wpan_phy {
@ -182,7 +185,7 @@ struct wpan_phy {
*/
const void *privid;
u32 flags;
unsigned long flags;
/*
* This is a PIB according to 802.15.4-2011.
@ -214,6 +217,17 @@ struct wpan_phy {
/* the network namespace this phy lives in currently */
possible_net_t _net;
/* Transmission monitoring and control */
spinlock_t queue_lock;
atomic_t ongoing_txs;
atomic_t hold_txs;
wait_queue_head_t sync_txq;
/* Current filtering level on reception.
* Only allowed to be changed if phy is not operational.
*/
enum ieee802154_filtering_level filtering;
char priv[] __aligned(NETDEV_ALIGN);
};
@ -365,8 +379,6 @@ struct wpan_dev {
bool lbt;
bool promiscuous_mode;
/* fallback for acknowledgment bit setting */
bool ackreq;
};

View file

@ -85,6 +85,14 @@ struct ieee802154_hdr_fc {
#endif
};
enum ieee802154_frame_version {
IEEE802154_2003_STD,
IEEE802154_2006_STD,
IEEE802154_STD,
IEEE802154_RESERVED_STD,
IEEE802154_MULTIPURPOSE_STD = IEEE802154_2003_STD,
};
struct ieee802154_hdr {
struct ieee802154_hdr_fc fc;
u8 seq;

View file

@ -111,9 +111,6 @@ struct ieee802154_hw {
* promiscuous mode setting.
*
* @IEEE802154_HW_RX_OMIT_CKSUM: Indicates that receiver omits FCS.
*
* @IEEE802154_HW_RX_DROP_BAD_CKSUM: Indicates that receiver will not filter
* frames with bad checksum.
*/
enum ieee802154_hw_flags {
IEEE802154_HW_TX_OMIT_CKSUM = BIT(0),
@ -123,7 +120,6 @@ enum ieee802154_hw_flags {
IEEE802154_HW_AFILT = BIT(4),
IEEE802154_HW_PROMISCUOUS = BIT(5),
IEEE802154_HW_RX_OMIT_CKSUM = BIT(6),
IEEE802154_HW_RX_DROP_BAD_CKSUM = BIT(7),
};
/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
@ -460,33 +456,6 @@ void ieee802154_unregister_hw(struct ieee802154_hw *hw);
*/
void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
u8 lqi);
/**
* ieee802154_wake_queue - wake ieee802154 queue
* @hw: pointer as obtained from ieee802154_alloc_hw().
*
* Tranceivers usually have either one transmit framebuffer or one framebuffer
* for both transmitting and receiving. Hence, the core currently only handles
* one frame at a time for each phy, which means we had to stop the queue to
* avoid new skb to come during the transmission. The queue then needs to be
* woken up after the operation.
*
* Drivers should use this function instead of netif_wake_queue.
*/
void ieee802154_wake_queue(struct ieee802154_hw *hw);
/**
* ieee802154_stop_queue - stop ieee802154 queue
* @hw: pointer as obtained from ieee802154_alloc_hw().
*
* Tranceivers usually have either one transmit framebuffer or one framebuffer
* for both transmitting and receiving. Hence, the core currently only handles
* one frame at a time for each phy, which means we need to tell upper layers to
* stop giving us new skbs while we are busy with the transmitted one. The queue
* must then be stopped before transmitting.
*
* Drivers should use this function instead of netif_stop_queue.
*/
void ieee802154_stop_queue(struct ieee802154_hw *hw);
/**
* ieee802154_xmit_complete - frame transmission complete

View file

@ -129,6 +129,9 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
wpan_phy_net_set(&rdev->wpan_phy, &init_net);
init_waitqueue_head(&rdev->dev_wait);
init_waitqueue_head(&rdev->wpan_phy.sync_txq);
spin_lock_init(&rdev->wpan_phy.queue_lock);
return &rdev->wpan_phy;
}

View file

@ -46,7 +46,7 @@ static int ieee802154_suspend(struct wpan_phy *wpan_phy)
if (!local->open_count)
goto suspend;
ieee802154_stop_queue(&local->hw);
ieee802154_sync_and_hold_queue(local);
synchronize_net();
/* stop hardware - this must stop RX */
@ -67,12 +67,12 @@ static int ieee802154_resume(struct wpan_phy *wpan_phy)
goto wake_up;
/* restart hardware */
ret = drv_start(local);
ret = drv_start(local, local->phy->filtering, &local->addr_filt);
if (ret)
return ret;
wake_up:
ieee802154_wake_queue(&local->hw);
ieee802154_release_queue(local);
local->suspended = false;
return 0;
}

View file

@ -24,12 +24,186 @@ drv_xmit_sync(struct ieee802154_local *local, struct sk_buff *skb)
return local->ops->xmit_sync(&local->hw, skb);
}
static inline int drv_start(struct ieee802154_local *local)
static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
{
struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
filt.pan_id = pan_id;
trace_802154_drv_set_pan_id(local, pan_id);
ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
IEEE802154_AFILT_PANID_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
drv_set_extended_addr(struct ieee802154_local *local, __le64 extended_addr)
{
struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
filt.ieee_addr = extended_addr;
trace_802154_drv_set_extended_addr(local, extended_addr);
ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
IEEE802154_AFILT_IEEEADDR_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
drv_set_short_addr(struct ieee802154_local *local, __le16 short_addr)
{
struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
filt.short_addr = short_addr;
trace_802154_drv_set_short_addr(local, short_addr);
ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
IEEE802154_AFILT_SADDR_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
drv_set_pan_coord(struct ieee802154_local *local, bool is_coord)
{
struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
filt.pan_coord = is_coord;
trace_802154_drv_set_pan_coord(local, is_coord);
ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
IEEE802154_AFILT_PANC_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
drv_set_promiscuous_mode(struct ieee802154_local *local, bool on)
{
int ret;
might_sleep();
if (!local->ops->set_promiscuous_mode) {
WARN_ON(1);
return -EOPNOTSUPP;
}
trace_802154_drv_set_promiscuous_mode(local, on);
ret = local->ops->set_promiscuous_mode(&local->hw, on);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int drv_start(struct ieee802154_local *local,
enum ieee802154_filtering_level level,
const struct ieee802154_hw_addr_filt *addr_filt)
{
int ret;
might_sleep();
/* setup receive mode parameters e.g. address mode */
if (local->hw.flags & IEEE802154_HW_AFILT) {
ret = drv_set_pan_id(local, addr_filt->pan_id);
if (ret < 0)
return ret;
ret = drv_set_short_addr(local, addr_filt->short_addr);
if (ret < 0)
return ret;
ret = drv_set_extended_addr(local, addr_filt->ieee_addr);
if (ret < 0)
return ret;
}
switch (level) {
case IEEE802154_FILTERING_NONE:
fallthrough;
case IEEE802154_FILTERING_1_FCS:
fallthrough;
case IEEE802154_FILTERING_2_PROMISCUOUS:
/* TODO: Requires a different receive mode setup e.g.
* at86rf233 hardware.
*/
fallthrough;
case IEEE802154_FILTERING_3_SCAN:
if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
ret = drv_set_promiscuous_mode(local, true);
if (ret < 0)
return ret;
} else {
return -EOPNOTSUPP;
}
/* In practice other filtering levels can be requested, but as
* for now most hardware/drivers only support
* IEEE802154_FILTERING_NONE, we fallback to this actual
* filtering level in hardware and make our own additional
* filtering in mac802154 receive path.
*
* TODO: Move this logic to the device drivers as hardware may
* support more higher level filters. Hardware may also require
* a different order how register are set, which could currently
* be buggy, so all received parameters need to be moved to the
* start() callback and let the driver go into the mode before
* it will turn on receive handling.
*/
local->phy->filtering = IEEE802154_FILTERING_NONE;
break;
case IEEE802154_FILTERING_4_FRAME_FIELDS:
/* Do not error out if IEEE802154_HW_PROMISCUOUS because we
* expect the hardware to operate at the level
* IEEE802154_FILTERING_4_FRAME_FIELDS anyway.
*/
if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
ret = drv_set_promiscuous_mode(local, false);
if (ret < 0)
return ret;
}
local->phy->filtering = IEEE802154_FILTERING_4_FRAME_FIELDS;
break;
default:
WARN_ON(1);
return -EINVAL;
}
trace_802154_drv_start(local);
local->started = true;
smp_mb();
@ -138,93 +312,6 @@ drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm)
return ret;
}
static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
{
struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
filt.pan_id = pan_id;
trace_802154_drv_set_pan_id(local, pan_id);
ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
IEEE802154_AFILT_PANID_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
drv_set_extended_addr(struct ieee802154_local *local, __le64 extended_addr)
{
struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
filt.ieee_addr = extended_addr;
trace_802154_drv_set_extended_addr(local, extended_addr);
ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
IEEE802154_AFILT_IEEEADDR_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
drv_set_short_addr(struct ieee802154_local *local, __le16 short_addr)
{
struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
filt.short_addr = short_addr;
trace_802154_drv_set_short_addr(local, short_addr);
ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
IEEE802154_AFILT_SADDR_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
drv_set_pan_coord(struct ieee802154_local *local, bool is_coord)
{
struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
filt.pan_coord = is_coord;
trace_802154_drv_set_pan_coord(local, is_coord);
ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
IEEE802154_AFILT_PANC_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
drv_set_csma_params(struct ieee802154_local *local, u8 min_be, u8 max_be,
u8 max_csma_backoffs)
@ -264,22 +351,4 @@ drv_set_max_frame_retries(struct ieee802154_local *local, s8 max_frame_retries)
return ret;
}
static inline int
drv_set_promiscuous_mode(struct ieee802154_local *local, bool on)
{
int ret;
might_sleep();
if (!local->ops->set_promiscuous_mode) {
WARN_ON(1);
return -EOPNOTSUPP;
}
trace_802154_drv_set_promiscuous_mode(local, on);
ret = local->ops->set_promiscuous_mode(&local->hw, on);
trace_802154_drv_return_int(local, ret);
return ret;
}
#endif /* __MAC802154_DRIVER_OPS */

View file

@ -26,6 +26,8 @@ struct ieee802154_local {
struct ieee802154_hw hw;
const struct ieee802154_ops *ops;
/* hardware address filter */
struct ieee802154_hw_addr_filt addr_filt;
/* ieee802154 phy */
struct wpan_phy *phy;
@ -55,7 +57,7 @@ struct ieee802154_local {
struct sk_buff_head skb_queue;
struct sk_buff *tx_skb;
struct work_struct tx_work;
struct work_struct sync_tx_work;
/* A negative Linux error code or a null/positive MLME error status */
int tx_result;
};
@ -82,6 +84,16 @@ struct ieee802154_sub_if_data {
struct ieee802154_local *local;
struct net_device *dev;
/* Each interface starts and works in nominal state at a given filtering
* level given by iface_default_filtering, which is set once for all at
* the interface creation and should not evolve over time. For some MAC
* operations however, the filtering level may change temporarily, as
* reflected in the required_filtering field. The actual filtering at
* the PHY level may be different and is shown in struct wpan_phy.
*/
enum ieee802154_filtering_level iface_default_filtering;
enum ieee802154_filtering_level required_filtering;
unsigned long state;
char name[IFNAMSIZ];
@ -123,13 +135,53 @@ ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata)
extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
void ieee802154_xmit_worker(struct work_struct *work);
void ieee802154_xmit_sync_worker(struct work_struct *work);
int ieee802154_sync_and_hold_queue(struct ieee802154_local *local);
int ieee802154_mlme_op_pre(struct ieee802154_local *local);
int ieee802154_mlme_tx(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb);
void ieee802154_mlme_op_post(struct ieee802154_local *local);
int ieee802154_mlme_tx_one(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb);
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t
ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer);
/**
* ieee802154_hold_queue - hold ieee802154 queue
* @local: main mac object
*
* Hold a queue by incrementing an atomic counter and requesting the netif
* queues to be stopped. The queues cannot be woken up while the counter has not
* been reset with as any ieee802154_release_queue() calls as needed.
*/
void ieee802154_hold_queue(struct ieee802154_local *local);
/**
* ieee802154_release_queue - release ieee802154 queue
* @local: main mac object
*
* Release a queue which is held by decrementing an atomic counter and wake it
* up only if the counter reaches 0.
*/
void ieee802154_release_queue(struct ieee802154_local *local);
/**
* ieee802154_disable_queue - disable ieee802154 queue
* @local: main mac object
*
* When trying to sync the Tx queue, we cannot just stop the queue
* (which is basically a bit being set without proper lock handling)
* because it would be racy. We actually need to call netif_tx_disable()
* instead, which is done by this helper. Restarting the queue can
* however still be done with a regular wake call.
*/
void ieee802154_disable_queue(struct ieee802154_local *local);
/* MIB callbacks */
void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);

View file

@ -147,25 +147,12 @@ static int ieee802154_setup_hw(struct ieee802154_sub_if_data *sdata)
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
int ret;
if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
ret = drv_set_promiscuous_mode(local,
wpan_dev->promiscuous_mode);
if (ret < 0)
return ret;
}
sdata->required_filtering = sdata->iface_default_filtering;
if (local->hw.flags & IEEE802154_HW_AFILT) {
ret = drv_set_pan_id(local, wpan_dev->pan_id);
if (ret < 0)
return ret;
ret = drv_set_extended_addr(local, wpan_dev->extended_addr);
if (ret < 0)
return ret;
ret = drv_set_short_addr(local, wpan_dev->short_addr);
if (ret < 0)
return ret;
local->addr_filt.pan_id = wpan_dev->pan_id;
local->addr_filt.ieee_addr = wpan_dev->extended_addr;
local->addr_filt.short_addr = wpan_dev->short_addr;
}
if (local->hw.flags & IEEE802154_HW_LBT) {
@ -206,7 +193,8 @@ static int mac802154_slave_open(struct net_device *dev)
if (res)
goto err;
res = drv_start(local);
res = drv_start(local, sdata->required_filtering,
&local->addr_filt);
if (res)
goto err;
}
@ -223,15 +211,16 @@ static int mac802154_slave_open(struct net_device *dev)
static int
ieee802154_check_mac_settings(struct ieee802154_local *local,
struct wpan_dev *wpan_dev,
struct wpan_dev *nwpan_dev)
struct ieee802154_sub_if_data *sdata,
struct ieee802154_sub_if_data *nsdata)
{
struct wpan_dev *nwpan_dev = &nsdata->wpan_dev;
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
ASSERT_RTNL();
if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
if (wpan_dev->promiscuous_mode != nwpan_dev->promiscuous_mode)
return -EBUSY;
}
if (sdata->iface_default_filtering != nsdata->iface_default_filtering)
return -EBUSY;
if (local->hw.flags & IEEE802154_HW_AFILT) {
if (wpan_dev->pan_id != nwpan_dev->pan_id ||
@ -285,8 +274,7 @@ ieee802154_check_concurrent_iface(struct ieee802154_sub_if_data *sdata,
/* check all phy mac sublayer settings are the same.
* We have only one phy, different values makes trouble.
*/
ret = ieee802154_check_mac_settings(local, wpan_dev,
&nsdata->wpan_dev);
ret = ieee802154_check_mac_settings(local, sdata, nsdata);
if (ret < 0)
return ret;
}
@ -586,7 +574,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
sdata->dev->priv_destructor = mac802154_wpan_free;
sdata->dev->netdev_ops = &mac802154_wpan_ops;
sdata->dev->ml_priv = &mac802154_mlme_wpan;
wpan_dev->promiscuous_mode = false;
sdata->iface_default_filtering = IEEE802154_FILTERING_4_FRAME_FIELDS;
wpan_dev->header_ops = &ieee802154_header_ops;
mutex_init(&sdata->sec_mtx);
@ -600,7 +588,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
case NL802154_IFTYPE_MONITOR:
sdata->dev->needs_free_netdev = true;
sdata->dev->netdev_ops = &mac802154_monitor_ops;
wpan_dev->promiscuous_mode = true;
sdata->iface_default_filtering = IEEE802154_FILTERING_NONE;
break;
default:
BUG();

View file

@ -95,7 +95,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
skb_queue_head_init(&local->skb_queue);
INIT_WORK(&local->tx_work, ieee802154_xmit_worker);
INIT_WORK(&local->sync_tx_work, ieee802154_xmit_sync_worker);
/* init supported flags with 802.15.4 default ranges */
phy->supported.max_minbe = 8;

View file

@ -34,6 +34,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb, const struct ieee802154_hdr *hdr)
{
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
struct wpan_phy *wpan_phy = sdata->local->hw.phy;
__le16 span, sshort;
int rc;
@ -42,6 +43,17 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
span = wpan_dev->pan_id;
sshort = wpan_dev->short_addr;
/* Level 3 filtering: Only beacons are accepted during scans */
if (sdata->required_filtering == IEEE802154_FILTERING_3_SCAN &&
sdata->required_filtering > wpan_phy->filtering) {
if (mac_cb(skb)->type != IEEE802154_FC_TYPE_BEACON) {
dev_dbg(&sdata->dev->dev,
"drop non-beacon frame (0x%x) during scan\n",
mac_cb(skb)->type);
goto fail;
}
}
switch (mac_cb(skb)->dest.mode) {
case IEEE802154_ADDR_NONE:
if (hdr->source.mode != IEEE802154_ADDR_NONE)
@ -114,8 +126,10 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
static void
ieee802154_print_addr(const char *name, const struct ieee802154_addr *addr)
{
if (addr->mode == IEEE802154_ADDR_NONE)
if (addr->mode == IEEE802154_ADDR_NONE) {
pr_debug("%s not present\n", name);
return;
}
pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id));
if (addr->mode == IEEE802154_ADDR_SHORT) {
@ -209,6 +223,13 @@ __ieee802154_rx_handle_packet(struct ieee802154_local *local,
if (!ieee802154_sdata_running(sdata))
continue;
/* Do not deliver packets received on interfaces expecting
* AACK=1 if the address filters where disabled.
*/
if (local->hw.phy->filtering < IEEE802154_FILTERING_4_FRAME_FIELDS &&
sdata->required_filtering == IEEE802154_FILTERING_4_FRAME_FIELDS)
continue;
ieee802154_subif_frame(sdata, skb, &hdr);
skb = NULL;
break;
@ -268,10 +289,8 @@ void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
ieee802154_monitors_rx(local, skb);
/* Check if transceiver doesn't validate the checksum.
* If not we validate the checksum here.
*/
if (local->hw.flags & IEEE802154_HW_RX_DROP_BAD_CKSUM) {
/* Level 1 filtering: Check the FCS by software when relevant */
if (local->hw.phy->filtering == IEEE802154_FILTERING_NONE) {
crc = crc_ccitt(0, skb->data, skb->len);
if (crc) {
rcu_read_unlock();

View file

@ -22,10 +22,10 @@
#include "ieee802154_i.h"
#include "driver-ops.h"
void ieee802154_xmit_worker(struct work_struct *work)
void ieee802154_xmit_sync_worker(struct work_struct *work)
{
struct ieee802154_local *local =
container_of(work, struct ieee802154_local, tx_work);
container_of(work, struct ieee802154_local, sync_tx_work);
struct sk_buff *skb = local->tx_skb;
struct net_device *dev = skb->dev;
int res;
@ -43,7 +43,9 @@ void ieee802154_xmit_worker(struct work_struct *work)
err_tx:
/* Restart the netif queue on each sub_if_data object. */
ieee802154_wake_queue(&local->hw);
ieee802154_release_queue(local);
if (atomic_dec_and_test(&local->phy->ongoing_txs))
wake_up(&local->phy->sync_txq);
kfree_skb(skb);
netdev_dbg(dev, "transmission failed\n");
}
@ -65,7 +67,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
consume_skb(skb);
skb = nskb;
} else {
goto err_tx;
goto err_free_skb;
}
}
@ -74,32 +76,134 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
}
/* Stop the netif queue on each sub_if_data object. */
ieee802154_stop_queue(&local->hw);
ieee802154_hold_queue(local);
atomic_inc(&local->phy->ongoing_txs);
/* async is priority, otherwise sync is fallback */
/* Drivers should preferably implement the async callback. In some rare
* cases they only provide a sync callback which we will use as a
* fallback.
*/
if (local->ops->xmit_async) {
unsigned int len = skb->len;
ret = drv_xmit_async(local, skb);
if (ret) {
ieee802154_wake_queue(&local->hw);
goto err_tx;
}
if (ret)
goto err_wake_netif_queue;
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
} else {
local->tx_skb = skb;
queue_work(local->workqueue, &local->tx_work);
queue_work(local->workqueue, &local->sync_tx_work);
}
return NETDEV_TX_OK;
err_tx:
err_wake_netif_queue:
ieee802154_release_queue(local);
if (atomic_dec_and_test(&local->phy->ongoing_txs))
wake_up(&local->phy->sync_txq);
err_free_skb:
kfree_skb(skb);
return NETDEV_TX_OK;
}
static int ieee802154_sync_queue(struct ieee802154_local *local)
{
int ret;
ieee802154_hold_queue(local);
ieee802154_disable_queue(local);
wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
ret = local->tx_result;
ieee802154_release_queue(local);
return ret;
}
int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
{
int ret;
ieee802154_hold_queue(local);
ret = ieee802154_sync_queue(local);
set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
return ret;
}
int ieee802154_mlme_op_pre(struct ieee802154_local *local)
{
return ieee802154_sync_and_hold_queue(local);
}
int ieee802154_mlme_tx(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb)
{
int ret;
/* Avoid possible calls to ->ndo_stop() when we asynchronously perform
* MLME transmissions.
*/
rtnl_lock();
/* Ensure the device was not stopped, otherwise error out */
if (!local->open_count) {
rtnl_unlock();
return -ENETDOWN;
}
/* Warn if the ieee802154 core thinks MLME frames can be sent while the
* net interface expects this cannot happen.
*/
if (WARN_ON_ONCE(!netif_running(sdata->dev))) {
rtnl_unlock();
return -ENETDOWN;
}
ieee802154_tx(local, skb);
ret = ieee802154_sync_queue(local);
rtnl_unlock();
return ret;
}
void ieee802154_mlme_op_post(struct ieee802154_local *local)
{
ieee802154_release_queue(local);
}
int ieee802154_mlme_tx_one(struct ieee802154_local *local,
struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb)
{
int ret;
ieee802154_mlme_op_pre(local);
ret = ieee802154_mlme_tx(local, sdata, skb);
ieee802154_mlme_op_post(local);
return ret;
}
static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
{
return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
}
static netdev_tx_t
ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
{
/* Warn if the net interface tries to transmit frames while the
* ieee802154 core assumes the queue is stopped.
*/
WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
return ieee802154_tx(local, skb);
}
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@ -107,7 +211,7 @@ ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->skb_iif = dev->ifindex;
return ieee802154_tx(sdata->local, skb);
return ieee802154_hot_tx(sdata->local, skb);
}
netdev_tx_t
@ -129,5 +233,5 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->skb_iif = dev->ifindex;
return ieee802154_tx(sdata->local, skb);
return ieee802154_hot_tx(sdata->local, skb);
}

View file

@ -13,12 +13,23 @@
/* privid for wpan_phys to determine whether they belong to us or not */
const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
void ieee802154_wake_queue(struct ieee802154_hw *hw)
/**
* ieee802154_wake_queue - wake ieee802154 queue
* @local: main mac object
*
* Tranceivers usually have either one transmit framebuffer or one framebuffer
* for both transmitting and receiving. Hence, the core currently only handles
* one frame at a time for each phy, which means we had to stop the queue to
* avoid new skb to come during the transmission. The queue then needs to be
* woken up after the operation.
*/
static void ieee802154_wake_queue(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
struct ieee802154_sub_if_data *sdata;
rcu_read_lock();
clear_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!sdata->dev)
continue;
@ -27,9 +38,18 @@ void ieee802154_wake_queue(struct ieee802154_hw *hw)
}
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee802154_wake_queue);
void ieee802154_stop_queue(struct ieee802154_hw *hw)
/**
* ieee802154_stop_queue - stop ieee802154 queue
* @local: main mac object
*
* Tranceivers usually have either one transmit framebuffer or one framebuffer
* for both transmitting and receiving. Hence, the core currently only handles
* one frame at a time for each phy, which means we need to tell upper layers to
* stop giving us new skbs while we are busy with the transmitted one. The queue
* must then be stopped before transmitting.
*/
static void ieee802154_stop_queue(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
struct ieee802154_sub_if_data *sdata;
@ -43,14 +63,47 @@ void ieee802154_stop_queue(struct ieee802154_hw *hw)
}
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee802154_stop_queue);
void ieee802154_hold_queue(struct ieee802154_local *local)
{
unsigned long flags;
spin_lock_irqsave(&local->phy->queue_lock, flags);
if (!atomic_fetch_inc(&local->phy->hold_txs))
ieee802154_stop_queue(&local->hw);
spin_unlock_irqrestore(&local->phy->queue_lock, flags);
}
void ieee802154_release_queue(struct ieee802154_local *local)
{
unsigned long flags;
spin_lock_irqsave(&local->phy->queue_lock, flags);
if (atomic_dec_and_test(&local->phy->hold_txs))
ieee802154_wake_queue(&local->hw);
spin_unlock_irqrestore(&local->phy->queue_lock, flags);
}
void ieee802154_disable_queue(struct ieee802154_local *local)
{
struct ieee802154_sub_if_data *sdata;
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!sdata->dev)
continue;
netif_tx_disable(sdata->dev);
}
rcu_read_unlock();
}
enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
{
struct ieee802154_local *local =
container_of(timer, struct ieee802154_local, ifs_timer);
ieee802154_wake_queue(&local->hw);
ieee802154_release_queue(local);
return HRTIMER_NORESTART;
}
@ -84,10 +137,12 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
hw->phy->sifs_period * NSEC_PER_USEC,
HRTIMER_MODE_REL);
} else {
ieee802154_wake_queue(hw);
ieee802154_release_queue(local);
}
dev_consume_skb_any(skb);
if (atomic_dec_and_test(&hw->phy->ongoing_txs))
wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_complete);
@ -97,8 +152,10 @@ void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
struct ieee802154_local *local = hw_to_local(hw);
local->tx_result = reason;
ieee802154_wake_queue(hw);
ieee802154_release_queue(local);
dev_kfree_skb_any(skb);
if (atomic_dec_and_test(&hw->phy->ongoing_txs))
wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_error);