LinuxKPI: Remove sys/rman.h include from LKPI headers.

sys/rman.h defines `resource` structure which conflicts with the Linux
structure of the same name. To fix that remove reference to sys/rman.h
from linux/pci.h and move resource management code to linux_pci.c.
Update consumers which were depending on linux/pci.h pollution.

No functional changes intended.

Sponsored by:	Serenity Cyber Security, LLC
MFC after:	1 week
Differential Revision:	https://reviews.freebsd.org/D42792
This commit is contained in:
Vladimir Kondratyev 2023-12-24 11:19:57 +03:00
parent 0ee1cd6da9
commit 96ab16ebab
7 changed files with 417 additions and 387 deletions

View file

@ -40,7 +40,7 @@
typedef irqreturn_t (*irq_handler_t)(int, void *);
#define IRQF_SHARED RF_SHAREABLE
#define IRQF_SHARED 0x0004 /* Historically */
#define IRQF_NOBALANCING 0
#define IRQ_DISABLE_UNLAZY 0

View file

@ -42,7 +42,6 @@
#include <sys/module.h>
#include <sys/nv.h>
#include <sys/pciio.h>
#include <sys/rman.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pci_private.h>
@ -306,10 +305,16 @@ struct msi_desc {
struct pci_msi_desc pci;
};
struct msix_entry {
int entry;
int vector;
};
/*
* If we find drivers accessing this from multiple KPIs we may have to
* refcount objects of this structure.
*/
struct resource;
struct pci_mmio_region {
TAILQ_ENTRY(pci_mmio_region) next;
struct resource *res;
@ -346,32 +351,30 @@ struct pci_dev {
TAILQ_HEAD(, pci_mmio_region) mmio;
};
/* We need some meta-struct to keep track of these for devres. */
struct pci_devres {
bool enable_io;
/* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */
uint8_t region_mask;
struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */
};
struct pcim_iomap_devres {
void *mmio_table[PCIR_MAX_BAR_0 + 1];
struct resource *res_table[PCIR_MAX_BAR_0 + 1];
};
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name);
int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
unsigned int flags);
bool pci_device_is_present(struct pci_dev *pdev);
int linuxkpi_pcim_enable_device(struct pci_dev *pdev);
void __iomem **linuxkpi_pcim_iomap_table(struct pci_dev *pdev);
void *linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size);
void linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res);
int linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask,
const char *name);
int linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name);
void linuxkpi_pci_release_region(struct pci_dev *pdev, int bar);
void linuxkpi_pci_release_regions(struct pci_dev *pdev);
int linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
int nreq);
/* Internal helper function(s). */
struct pci_dev *lkpinew_pci_dev(device_t);
struct pci_devres *lkpi_pci_devres_get_alloc(struct pci_dev *pdev);
void lkpi_pci_devres_release(struct device *, void *);
struct resource *_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size);
struct pcim_iomap_devres *lkpi_pcim_iomap_devres_find(struct pci_dev *pdev);
void lkpi_pcim_iomap_table_release(struct device *, void *);
struct pci_dev *lkpi_pci_get_device(uint16_t, uint16_t, struct pci_dev *);
struct msi_desc *lkpi_pci_msi_desc_alloc(int);
struct device *lkpi_pci_find_irq_dev(unsigned int irq);
int _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec);
static inline bool
dev_is_pci(struct device *dev)
@ -395,56 +398,6 @@ pci_resource_type(struct pci_dev *pdev, int bar)
return (SYS_RES_MEMORY);
}
struct resource_list_entry *linux_pci_reserve_bar(struct pci_dev *pdev,
struct resource_list *rl, int type, int rid);
static inline struct resource_list_entry *
linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
{
struct pci_devinfo *dinfo;
struct resource_list *rl;
struct resource_list_entry *rle;
dinfo = device_get_ivars(pdev->dev.bsddev);
rl = &dinfo->resources;
rle = resource_list_find(rl, type, rid);
/* Reserve resources for this BAR if needed. */
if (rle == NULL && reserve_bar)
rle = linux_pci_reserve_bar(pdev, rl, type, rid);
return (rle);
}
static inline struct resource_list_entry *
linux_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
{
int type;
type = pci_resource_type(pdev, bar);
if (type < 0)
return (NULL);
bar = PCIR_BAR(bar);
return (linux_pci_get_rle(pdev, type, bar, reserve));
}
static inline struct device *
linux_pci_find_irq_dev(unsigned int irq)
{
struct pci_dev *pdev;
struct device *found;
found = NULL;
spin_lock(&pci_lock);
list_for_each_entry(pdev, &pci_devices, links) {
if (irq == pdev->dev.irq ||
(irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
found = &pdev->dev;
break;
}
}
spin_unlock(&pci_lock);
return (found);
}
/*
* All drivers just seem to want to inspect the type not flags.
*/
@ -578,73 +531,10 @@ pci_upstream_bridge(struct pci_dev *pdev)
return (pdev->bus->self);
}
static inline struct pci_devres *
lkpi_pci_devres_find(struct pci_dev *pdev)
{
if (!pdev->managed)
return (NULL);
return (lkpi_pci_devres_get_alloc(pdev));
}
static inline void
pci_release_region(struct pci_dev *pdev, int bar)
{
struct resource_list_entry *rle;
struct pci_devres *dr;
struct pci_mmio_region *mmio, *p;
if ((rle = linux_pci_get_bar(pdev, bar, false)) == NULL)
return;
/*
* As we implicitly track the requests we also need to clear them on
* release. Do clear before resource release.
*/
dr = lkpi_pci_devres_find(pdev);
if (dr != NULL) {
KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
" region_table res %p != rel->res %p\n", __func__, pdev,
bar, dr->region_table[bar], rle->res));
dr->region_table[bar] = NULL;
dr->region_mask &= ~(1 << bar);
}
TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
if (rle->res != (void *)rman_get_bushandle(mmio->res))
continue;
TAILQ_REMOVE(&pdev->mmio, mmio, next);
free(mmio, M_DEVBUF);
}
bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
}
static inline void
pci_release_regions(struct pci_dev *pdev)
{
int i;
for (i = 0; i <= PCIR_MAX_BAR_0; i++)
pci_release_region(pdev, i);
}
static inline int
pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
int error;
int i;
for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
error = pci_request_region(pdev, i, res_name);
if (error && error != -ENODEV) {
pci_release_regions(pdev);
return (error);
}
}
return (0);
}
#define pci_release_region(pdev, bar) linuxkpi_pci_release_region(pdev, bar)
#define pci_release_regions(pdev) linuxkpi_pci_release_regions(pdev)
#define pci_request_regions(pdev, res_name) \
linuxkpi_pci_request_regions(pdev, res_name)
static inline void
lkpi_pci_disable_msix(struct pci_dev *pdev)
@ -655,7 +545,7 @@ lkpi_pci_disable_msix(struct pci_dev *pdev)
/*
* The MSIX IRQ numbers associated with this PCI device are no
* longer valid and might be re-assigned. Make sure
* linux_pci_find_irq_dev() does no longer see them by
* lkpi_pci_find_irq_dev() does no longer see them by
* resetting their references to zero:
*/
pdev->dev.irq_start = 0;
@ -813,11 +703,6 @@ void linux_pci_unregister_drm_driver(struct pci_driver *pdrv);
#define pci_register_driver(pdrv) linux_pci_register_driver(pdrv)
#define pci_unregister_driver(pdrv) linux_pci_unregister_driver(pdrv)
struct msix_entry {
int entry;
int vector;
};
/*
* Enable msix, positive errors indicate actual number of available
* vectors. Negative errors are failures.
@ -825,42 +710,7 @@ struct msix_entry {
* NB: define added to prevent this definition of pci_enable_msix from
* clashing with the native FreeBSD version.
*/
#define pci_enable_msix(...) \
linux_pci_enable_msix(__VA_ARGS__)
static inline int
pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq)
{
struct resource_list_entry *rle;
int error;
int avail;
int i;
avail = pci_msix_count(pdev->dev.bsddev);
if (avail < nreq) {
if (avail == 0)
return -EINVAL;
return avail;
}
avail = nreq;
if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
return error;
/*
* Handle case where "pci_alloc_msix()" may allocate less
* interrupts than available and return with no error:
*/
if (avail < nreq) {
pci_release_msi(pdev->dev.bsddev);
return avail;
}
rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
pdev->dev.irq_start = rle->start;
pdev->dev.irq_end = rle->start + avail;
for (i = 0; i < nreq; i++)
entries[i].vector = pdev->dev.irq_start + i;
pdev->msix_enabled = true;
return (0);
}
#define pci_enable_msix(...) linuxkpi_pci_enable_msix(__VA_ARGS__)
#define pci_enable_msix_range(...) \
linux_pci_enable_msix_range(__VA_ARGS__)
@ -891,38 +741,6 @@ pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
#define pci_enable_msi(pdev) \
linux_pci_enable_msi(pdev)
static inline int
_lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)
{
struct resource_list_entry *rle;
int error;
int nvec;
if (maxvec < minvec)
return (-EINVAL);
nvec = pci_msi_count(pdev->dev.bsddev);
if (nvec < 1 || nvec < minvec)
return (-ENOSPC);
nvec = min(nvec, maxvec);
if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)
return error;
/* Native PCI might only ever ask for 32 vectors. */
if (nvec < minvec) {
pci_release_msi(pdev->dev.bsddev);
return (-ENOSPC);
}
rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
pdev->dev.irq_start = rle->start;
pdev->dev.irq_end = rle->start + nvec;
pdev->irq = rle->start;
pdev->msi_enabled = true;
return (0);
}
static inline int
pci_enable_msi(struct pci_dev *pdev)
{
@ -946,35 +764,9 @@ static inline void pci_disable_sriov(struct pci_dev *dev)
{
}
static inline void *
pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size)
{
struct resource *res;
res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size);
if (res == NULL)
return (NULL);
/* This is a FreeBSD extension so we can use bus_*(). */
if (pdev->want_iomap_res)
return (res);
return ((void *)rman_get_bushandle(res));
}
static inline void
pci_iounmap(struct pci_dev *pdev, void *res)
{
struct pci_mmio_region *mmio, *p;
TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
if (res != (void *)rman_get_bushandle(mmio->res))
continue;
bus_release_resource(pdev->dev.bsddev,
mmio->type, mmio->rid, mmio->res);
TAILQ_REMOVE(&pdev->mmio, mmio, next);
free(mmio, M_DEVBUF);
return;
}
}
#define pci_iomap(pdev, mmio_bar, mmio_size) \
linuxkpi_pci_iomap(pdev, mmio_bar, mmio_size)
#define pci_iounmap(pdev, res) linuxkpi_pci_iounmap(pdev, res)
static inline void
lkpi_pci_save_state(struct pci_dev *pdev)
@ -1534,97 +1326,10 @@ struct pci_dev *lkpi_pci_get_class(unsigned int class, struct pci_dev *from);
/* -------------------------------------------------------------------------- */
static inline int
pcim_enable_device(struct pci_dev *pdev)
{
struct pci_devres *dr;
int error;
/* Here we cannot run through the pdev->managed check. */
dr = lkpi_pci_devres_get_alloc(pdev);
if (dr == NULL)
return (-ENOMEM);
/* If resources were enabled before do not do it again. */
if (dr->enable_io)
return (0);
error = pci_enable_device(pdev);
if (error == 0)
dr->enable_io = true;
/* This device is not managed. */
pdev->managed = true;
return (error);
}
static inline void __iomem **
pcim_iomap_table(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr;
dr = lkpi_pcim_iomap_devres_find(pdev);
if (dr == NULL)
return (NULL);
/*
* If the driver has manually set a flag to be able to request the
* resource to use bus_read/write_<n>, return the shadow table.
*/
if (pdev->want_iomap_res)
return ((void **)dr->res_table);
/* This is the Linux default. */
return (dr->mmio_table);
}
static inline int
pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
{
struct pcim_iomap_devres *dr;
void *res;
uint32_t mappings;
int bar;
dr = lkpi_pcim_iomap_devres_find(pdev);
if (dr == NULL)
return (-ENOMEM);
/* Now iomap all the requested (by "mask") ones. */
for (bar = mappings = 0; mappings != mask; bar++) {
if ((mask & (1 << bar)) == 0)
continue;
/* Request double is not allowed. */
if (dr->mmio_table[bar] != NULL) {
device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
__func__, bar, dr->mmio_table[bar]);
goto err;
}
res = _lkpi_pci_iomap(pdev, bar, 0);
if (res == NULL)
goto err;
dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
dr->res_table[bar] = res;
mappings |= (1 << bar);
}
return (0);
err:
for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
if ((mappings & (1 << bar)) != 0) {
res = dr->mmio_table[bar];
if (res == NULL)
continue;
pci_iounmap(pdev, res);
}
}
return (-EINVAL);
}
#define pcim_enable_device(pdev) linuxkpi_pcim_enable_device(pdev)
#define pcim_iomap_table(pdev) linuxkpi_pcim_iomap_table(pdev)
#define pcim_iomap_regions(pdev, mask, name) \
linuxkpi_pcim_iomap_regions(pdev, mask, name)
static inline int
pcim_iomap_regions_request_all(struct pci_dev *pdev, uint32_t mask, char *name)

View file

@ -117,17 +117,20 @@ lkpi_request_irq(struct device *xdev, unsigned int irq,
struct resource *res;
struct irq_ent *irqe;
struct device *dev;
unsigned resflags;
int error;
int rid;
dev = linux_pci_find_irq_dev(irq);
dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return -ENXIO;
if (xdev != NULL && xdev != dev)
return -ENXIO;
rid = lkpi_irq_rid(dev, irq);
res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid,
flags | RF_ACTIVE);
resflags = RF_ACTIVE;
if ((flags & IRQF_SHARED) != 0)
resflags |= RF_SHAREABLE;
res = bus_alloc_resource_any(dev->bsddev, SYS_RES_IRQ, &rid, resflags);
if (res == NULL)
return (-ENXIO);
if (xdev != NULL)
@ -167,7 +170,7 @@ lkpi_enable_irq(unsigned int irq)
struct irq_ent *irqe;
struct device *dev;
dev = linux_pci_find_irq_dev(irq);
dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return -EINVAL;
irqe = lkpi_irq_ent(dev, irq);
@ -183,7 +186,7 @@ lkpi_disable_irq(unsigned int irq)
struct irq_ent *irqe;
struct device *dev;
dev = linux_pci_find_irq_dev(irq);
dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return;
irqe = lkpi_irq_ent(dev, irq);
@ -200,7 +203,7 @@ lkpi_bind_irq_to_cpu(unsigned int irq, int cpu_id)
struct irq_ent *irqe;
struct device *dev;
dev = linux_pci_find_irq_dev(irq);
dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return (-ENOENT);
@ -217,7 +220,7 @@ lkpi_free_irq(unsigned int irq, void *device __unused)
struct irq_ent *irqe;
struct device *dev;
dev = linux_pci_find_irq_dev(irq);
dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return;
irqe = lkpi_irq_ent(dev, irq);
@ -233,7 +236,7 @@ lkpi_devm_free_irq(struct device *xdev, unsigned int irq, void *p __unused)
struct device *dev;
struct irq_ent *irqe;
dev = linux_pci_find_irq_dev(irq);
dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return;
if (xdev != dev)

View file

@ -41,11 +41,14 @@
#include <sys/filio.h>
#include <sys/pciio.h>
#include <sys/pctrie.h>
#include <sys/rman.h>
#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/stdarg.h>
#include <dev/pci/pcivar.h>
@ -95,6 +98,7 @@ static pci_iov_add_vf_t linux_pci_iov_add_vf;
static int linux_backlight_get_status(device_t dev, struct backlight_props *props);
static int linux_backlight_update_status(device_t dev, struct backlight_props *props);
static int linux_backlight_get_info(device_t dev, struct backlight_info *info);
static void lkpi_pcim_iomap_table_release(struct device *, void *);
static device_method_t pci_methods[] = {
DEVMETHOD(device_probe, linux_pci_probe),
@ -118,6 +122,18 @@ const char *pci_power_names[] = {
"UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold"
};
/* We need some meta-struct to keep track of these for devres. */
struct pci_devres {
bool enable_io;
/* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */
uint8_t region_mask;
struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */
};
struct pcim_iomap_devres {
void *mmio_table[PCIR_MAX_BAR_0 + 1];
struct resource *res_table[PCIR_MAX_BAR_0 + 1];
};
struct linux_dma_priv {
uint64_t dma_mask;
bus_dma_tag_t dmat;
@ -434,6 +450,41 @@ linux_pci_attach(device_t dev)
return (linux_pci_attach_device(dev, pdrv, id, pdev));
}
static struct resource_list_entry *
linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
int type, int rid)
{
device_t dev;
struct resource *res;
KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
("trying to reserve non-BAR type %d", type));
dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0,
1, 1, 0);
if (res == NULL)
return (NULL);
return (resource_list_find(rl, type, rid));
}
static struct resource_list_entry *
linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
{
struct pci_devinfo *dinfo;
struct resource_list *rl;
struct resource_list_entry *rle;
dinfo = device_get_ivars(pdev->dev.bsddev);
rl = &dinfo->resources;
rle = resource_list_find(rl, type, rid);
/* Reserve resources for this BAR if needed. */
if (rle == NULL && reserve_bar)
rle = linux_pci_reserve_bar(pdev, rl, type, rid);
return (rle);
}
int
linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
const struct pci_device_id *id, struct pci_dev *pdev)
@ -542,7 +593,7 @@ lkpi_pci_disable_dev(struct device *dev)
return (0);
}
struct pci_devres *
static struct pci_devres *
lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
{
struct pci_devres *dr;
@ -558,6 +609,15 @@ lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
return (dr);
}
static struct pci_devres *
lkpi_pci_devres_find(struct pci_dev *pdev)
{
if (!pdev->managed)
return (NULL);
return (lkpi_pci_devres_get_alloc(pdev));
}
void
lkpi_pci_devres_release(struct device *dev, void *p)
{
@ -586,7 +646,32 @@ lkpi_pci_devres_release(struct device *dev, void *p)
}
}
struct pcim_iomap_devres *
int
linuxkpi_pcim_enable_device(struct pci_dev *pdev)
{
struct pci_devres *dr;
int error;
/* Here we cannot run through the pdev->managed check. */
dr = lkpi_pci_devres_get_alloc(pdev);
if (dr == NULL)
return (-ENOMEM);
/* If resources were enabled before do not do it again. */
if (dr->enable_io)
return (0);
error = pci_enable_device(pdev);
if (error == 0)
dr->enable_io = true;
/* This device is not managed. */
pdev->managed = true;
return (error);
}
static struct pcim_iomap_devres *
lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr;
@ -606,7 +691,144 @@ lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)
return (dr);
}
void __iomem **
linuxkpi_pcim_iomap_table(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr;
dr = lkpi_pcim_iomap_devres_find(pdev);
if (dr == NULL)
return (NULL);
/*
* If the driver has manually set a flag to be able to request the
* resource to use bus_read/write_<n>, return the shadow table.
*/
if (pdev->want_iomap_res)
return ((void **)dr->res_table);
/* This is the Linux default. */
return (dr->mmio_table);
}
static struct resource *
_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
{
struct pci_mmio_region *mmio, *p;
int type;
type = pci_resource_type(pdev, bar);
if (type < 0) {
device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
__func__, bar, type);
return (NULL);
}
/*
* Check for duplicate mappings.
* This can happen if a driver calls pci_request_region() first.
*/
TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
return (mmio->res);
}
}
mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
mmio->rid = PCIR_BAR(bar);
mmio->type = type;
mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
&mmio->rid, RF_ACTIVE|RF_SHAREABLE);
if (mmio->res == NULL) {
device_printf(pdev->dev.bsddev, "%s: failed to alloc "
"bar %d type %d rid %d\n",
__func__, bar, type, PCIR_BAR(bar));
free(mmio, M_DEVBUF);
return (NULL);
}
TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
return (mmio->res);
}
void *
linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size)
{
struct resource *res;
res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size);
if (res == NULL)
return (NULL);
/* This is a FreeBSD extension so we can use bus_*(). */
if (pdev->want_iomap_res)
return (res);
return ((void *)rman_get_bushandle(res));
}
void
linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res)
{
struct pci_mmio_region *mmio, *p;
TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
if (res != (void *)rman_get_bushandle(mmio->res))
continue;
bus_release_resource(pdev->dev.bsddev,
mmio->type, mmio->rid, mmio->res);
TAILQ_REMOVE(&pdev->mmio, mmio, next);
free(mmio, M_DEVBUF);
return;
}
}
int
linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
{
struct pcim_iomap_devres *dr;
void *res;
uint32_t mappings;
int bar;
dr = lkpi_pcim_iomap_devres_find(pdev);
if (dr == NULL)
return (-ENOMEM);
/* Now iomap all the requested (by "mask") ones. */
for (bar = mappings = 0; mappings != mask; bar++) {
if ((mask & (1 << bar)) == 0)
continue;
/* Request double is not allowed. */
if (dr->mmio_table[bar] != NULL) {
device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
__func__, bar, dr->mmio_table[bar]);
goto err;
}
res = _lkpi_pci_iomap(pdev, bar, 0);
if (res == NULL)
goto err;
dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
dr->res_table[bar] = res;
mappings |= (1 << bar);
}
return (0);
err:
for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
if ((mappings & (1 << bar)) != 0) {
res = dr->mmio_table[bar];
if (res == NULL)
continue;
pci_iounmap(pdev, res);
}
}
return (-EINVAL);
}
static void
lkpi_pcim_iomap_table_release(struct device *dev, void *p)
{
struct pcim_iomap_devres *dr;
@ -758,23 +980,35 @@ linux_pci_register_driver(struct pci_driver *pdrv)
return (_linux_pci_register_driver(pdrv, dc));
}
struct resource_list_entry *
linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
int type, int rid)
static struct resource_list_entry *
lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
{
device_t dev;
struct resource *res;
int type;
KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
("trying to reserve non-BAR type %d", type));
dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0,
1, 1, 0);
if (res == NULL)
type = pci_resource_type(pdev, bar);
if (type < 0)
return (NULL);
return (resource_list_find(rl, type, rid));
bar = PCIR_BAR(bar);
return (linux_pci_get_rle(pdev, type, bar, reserve));
}
struct device *
lkpi_pci_find_irq_dev(unsigned int irq)
{
struct pci_dev *pdev;
struct device *found;
found = NULL;
spin_lock(&pci_lock);
list_for_each_entry(pdev, &pci_devices, links) {
if (irq == pdev->dev.irq ||
(irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
found = &pdev->dev;
break;
}
}
spin_unlock(&pci_lock);
return (found);
}
unsigned long
@ -785,7 +1019,7 @@ pci_resource_start(struct pci_dev *pdev, int bar)
device_t dev;
int error;
if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL)
if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
return (0);
dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
@ -804,7 +1038,7 @@ pci_resource_len(struct pci_dev *pdev, int bar)
{
struct resource_list_entry *rle;
if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL)
if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
return (0);
return (rle->count);
}
@ -853,44 +1087,62 @@ pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
return (0);
}
struct resource *
_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
int
linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
struct pci_mmio_region *mmio, *p;
int type;
int error;
int i;
type = pci_resource_type(pdev, bar);
if (type < 0) {
device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
__func__, bar, type);
return (NULL);
}
/*
* Check for duplicate mappings.
* This can happen if a driver calls pci_request_region() first.
*/
TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
return (mmio->res);
for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
error = pci_request_region(pdev, i, res_name);
if (error && error != -ENODEV) {
pci_release_regions(pdev);
return (error);
}
}
return (0);
}
mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
mmio->rid = PCIR_BAR(bar);
mmio->type = type;
mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
&mmio->rid, RF_ACTIVE|RF_SHAREABLE);
if (mmio->res == NULL) {
device_printf(pdev->dev.bsddev, "%s: failed to alloc "
"bar %d type %d rid %d\n",
__func__, bar, type, PCIR_BAR(bar));
free(mmio, M_DEVBUF);
return (NULL);
void
linuxkpi_pci_release_region(struct pci_dev *pdev, int bar)
{
struct resource_list_entry *rle;
struct pci_devres *dr;
struct pci_mmio_region *mmio, *p;
if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL)
return;
/*
* As we implicitly track the requests we also need to clear them on
* release. Do clear before resource release.
*/
dr = lkpi_pci_devres_find(pdev);
if (dr != NULL) {
KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
" region_table res %p != rel->res %p\n", __func__, pdev,
bar, dr->region_table[bar], rle->res));
dr->region_table[bar] = NULL;
dr->region_mask &= ~(1 << bar);
}
TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
return (mmio->res);
TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
if (rle->res != (void *)rman_get_bushandle(mmio->res))
continue;
TAILQ_REMOVE(&pdev->mmio, mmio, next);
free(mmio, M_DEVBUF);
}
bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
}
void
linuxkpi_pci_release_regions(struct pci_dev *pdev)
{
int i;
for (i = 0; i <= PCIR_MAX_BAR_0; i++)
pci_release_region(pdev, i);
}
int
@ -938,6 +1190,73 @@ linux_pci_unregister_drm_driver(struct pci_driver *pdrv)
bus_topo_unlock();
}
int
linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
int nreq)
{
struct resource_list_entry *rle;
int error;
int avail;
int i;
avail = pci_msix_count(pdev->dev.bsddev);
if (avail < nreq) {
if (avail == 0)
return -EINVAL;
return avail;
}
avail = nreq;
if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
return error;
/*
* Handle case where "pci_alloc_msix()" may allocate less
* interrupts than available and return with no error:
*/
if (avail < nreq) {
pci_release_msi(pdev->dev.bsddev);
return avail;
}
rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
pdev->dev.irq_start = rle->start;
pdev->dev.irq_end = rle->start + avail;
for (i = 0; i < nreq; i++)
entries[i].vector = pdev->dev.irq_start + i;
pdev->msix_enabled = true;
return (0);
}
int
_lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)
{
struct resource_list_entry *rle;
int error;
int nvec;
if (maxvec < minvec)
return (-EINVAL);
nvec = pci_msi_count(pdev->dev.bsddev);
if (nvec < 1 || nvec < minvec)
return (-ENOSPC);
nvec = min(nvec, maxvec);
if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)
return error;
/* Native PCI might only ever ask for 32 vectors. */
if (nvec < minvec) {
pci_release_msi(pdev->dev.bsddev);
return (-ENOSPC);
}
rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
pdev->dev.irq_start = rle->start;
pdev->dev.irq_end = rle->start + nvec;
pdev->irq = rle->start;
pdev->msi_enabled = true;
return (0);
}
int
pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
unsigned int flags)
@ -986,7 +1305,7 @@ lkpi_pci_msi_desc_alloc(int irq)
struct pcicfg_msi *msi;
int vec;
dev = linux_pci_find_irq_dev(irq);
dev = lkpi_pci_find_irq_dev(irq);
if (dev == NULL)
return (NULL);

View file

@ -15,6 +15,7 @@
#include <linux/wait.h>
#include <linux/seq_file.h>
#if defined(__FreeBSD__)
#include <sys/rman.h>
#include <linux/delay.h>
#endif

View file

@ -17,6 +17,7 @@
#include "ps.h"
#include "debug.h"
#if defined(__FreeBSD__)
#include <sys/rman.h>
#include <linux/pm.h>
#endif

View file

@ -39,6 +39,7 @@
#include <netinet/ip6.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <netinet/if_ether.h>
#include <linux/slab.h>