svn+ssh://svn.freebsd.org/base/head@216199

This commit is contained in:
Marcel Moolenaar 2010-12-05 20:47:36 +00:00
commit 0c21a60cf6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/altix/; revision=216201
967 changed files with 306850 additions and 142989 deletions

View file

@ -14,7 +14,7 @@ CSCOPEDIRS= boot bsm cam cddl compat conf contrib crypto ddb dev fs gdb \
netsmb nfs nfsclient nfsserver nlm opencrypto \
pci rpc security sys ufs vm xdr ${CSCOPE_ARCHDIR}
.if defined(ALL_ARCH)
CSCOPE_ARCHDIR ?= amd64 arm i386 ia64 mips pc98 powerpc sparc64 sun4v
CSCOPE_ARCHDIR ?= amd64 arm i386 ia64 mips pc98 powerpc sparc64 sun4v x86
.else
CSCOPE_ARCHDIR ?= ${MACHINE}
.endif
@ -34,7 +34,7 @@ cscope.out: ${.CURDIR}/cscope.files
${.CURDIR}/cscope.files: .PHONY
cd ${.CURDIR}; \
find ${CSCOPEDIRS} -name "*.[chSs]" -a -type f > ${.TARGET}
find ${CSCOPEDIRS} -name "*.[chSsly]" -a -type f > ${.TARGET}
cscope-clean:
rm -f cscope.files cscope.out cscope.in.out cscope.po.out

View file

@ -44,24 +44,6 @@ __FBSDID("$FreeBSD$");
#include <machine/nexusvar.h>
/*
* APM driver emulation
*/
#include <sys/condvar.h>
#include <sys/conf.h>
#include <sys/fcntl.h>
#include <sys/malloc.h>
#include <sys/poll.h>
#include <sys/uio.h>
#include <dev/acpica/acpiio.h>
#include <machine/apm_bios.h>
#include <i386/include/pc/bios.h>
#include <i386/bios/apm.h>
SYSCTL_DECL(_debug_acpi);
int acpi_resume_beep;
@ -73,445 +55,15 @@ int acpi_reset_video;
TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video);
static int intr_model = ACPI_INTR_PIC;
static int apm_active;
static struct clonedevs *apm_clones;
MALLOC_DEFINE(M_APMDEV, "apmdev", "APM device emulation");
static d_open_t apmopen;
static d_close_t apmclose;
static d_write_t apmwrite;
static d_ioctl_t apmioctl;
static d_poll_t apmpoll;
static d_kqfilter_t apmkqfilter;
static void apmreadfiltdetach(struct knote *kn);
static int apmreadfilt(struct knote *kn, long hint);
static struct filterops apm_readfiltops = {
.f_isfd = 1,
.f_detach = apmreadfiltdetach,
.f_event = apmreadfilt,
};
static struct cdevsw apm_cdevsw = {
.d_version = D_VERSION,
.d_flags = D_TRACKCLOSE | D_NEEDMINOR,
.d_open = apmopen,
.d_close = apmclose,
.d_write = apmwrite,
.d_ioctl = apmioctl,
.d_poll = apmpoll,
.d_name = "apm",
.d_kqfilter = apmkqfilter
};
static int
acpi_capm_convert_battstate(struct acpi_battinfo *battp)
{
int state;
state = APM_UNKNOWN;
if (battp->state & ACPI_BATT_STAT_DISCHARG) {
if (battp->cap >= 50)
state = 0; /* high */
else
state = 1; /* low */
}
if (battp->state & ACPI_BATT_STAT_CRITICAL)
state = 2; /* critical */
if (battp->state & ACPI_BATT_STAT_CHARGING)
state = 3; /* charging */
/* If still unknown, determine it based on the battery capacity. */
if (state == APM_UNKNOWN) {
if (battp->cap >= 50)
state = 0; /* high */
else
state = 1; /* low */
}
return (state);
}
static int
acpi_capm_convert_battflags(struct acpi_battinfo *battp)
{
int flags;
flags = 0;
if (battp->cap >= 50)
flags |= APM_BATT_HIGH;
else {
if (battp->state & ACPI_BATT_STAT_CRITICAL)
flags |= APM_BATT_CRITICAL;
else
flags |= APM_BATT_LOW;
}
if (battp->state & ACPI_BATT_STAT_CHARGING)
flags |= APM_BATT_CHARGING;
if (battp->state == ACPI_BATT_STAT_NOT_PRESENT)
flags = APM_BATT_NOT_PRESENT;
return (flags);
}
static int
acpi_capm_get_info(apm_info_t aip)
{
int acline;
struct acpi_battinfo batt;
aip->ai_infoversion = 1;
aip->ai_major = 1;
aip->ai_minor = 2;
aip->ai_status = apm_active;
aip->ai_capabilities= 0xff00; /* unknown */
if (acpi_acad_get_acline(&acline))
aip->ai_acline = APM_UNKNOWN; /* unknown */
else
aip->ai_acline = acline; /* on/off */
if (acpi_battery_get_battinfo(NULL, &batt) != 0) {
aip->ai_batt_stat = APM_UNKNOWN;
aip->ai_batt_life = APM_UNKNOWN;
aip->ai_batt_time = -1; /* unknown */
aip->ai_batteries = ~0U; /* unknown */
} else {
aip->ai_batt_stat = acpi_capm_convert_battstate(&batt);
aip->ai_batt_life = batt.cap;
aip->ai_batt_time = (batt.min == -1) ? -1 : batt.min * 60;
aip->ai_batteries = acpi_battery_get_units();
}
return (0);
}
static int
acpi_capm_get_pwstatus(apm_pwstatus_t app)
{
device_t dev;
int acline, unit, error;
struct acpi_battinfo batt;
if (app->ap_device != PMDV_ALLDEV &&
(app->ap_device < PMDV_BATT0 || app->ap_device > PMDV_BATT_ALL))
return (1);
if (app->ap_device == PMDV_ALLDEV)
error = acpi_battery_get_battinfo(NULL, &batt);
else {
unit = app->ap_device - PMDV_BATT0;
dev = devclass_get_device(devclass_find("battery"), unit);
if (dev != NULL)
error = acpi_battery_get_battinfo(dev, &batt);
else
error = ENXIO;
}
if (error)
return (1);
app->ap_batt_stat = acpi_capm_convert_battstate(&batt);
app->ap_batt_flag = acpi_capm_convert_battflags(&batt);
app->ap_batt_life = batt.cap;
app->ap_batt_time = (batt.min == -1) ? -1 : batt.min * 60;
if (acpi_acad_get_acline(&acline))
app->ap_acline = APM_UNKNOWN;
else
app->ap_acline = acline; /* on/off */
return (0);
}
/* Create single-use devices for /dev/apm and /dev/apmctl. */
static void
apm_clone(void *arg, struct ucred *cred, char *name, int namelen,
struct cdev **dev)
{
int ctl_dev, unit;
if (*dev != NULL)
return;
if (strcmp(name, "apmctl") == 0)
ctl_dev = TRUE;
else if (strcmp(name, "apm") == 0)
ctl_dev = FALSE;
else
return;
/* Always create a new device and unit number. */
unit = -1;
if (clone_create(&apm_clones, &apm_cdevsw, &unit, dev, 0)) {
if (ctl_dev) {
*dev = make_dev(&apm_cdevsw, unit,
UID_ROOT, GID_OPERATOR, 0660, "apmctl%d", unit);
} else {
*dev = make_dev(&apm_cdevsw, unit,
UID_ROOT, GID_OPERATOR, 0664, "apm%d", unit);
}
if (*dev != NULL) {
dev_ref(*dev);
(*dev)->si_flags |= SI_CHEAPCLONE;
}
}
}
/* Create a struct for tracking per-device suspend notification. */
static struct apm_clone_data *
apm_create_clone(struct cdev *dev, struct acpi_softc *acpi_sc)
{
struct apm_clone_data *clone;
clone = malloc(sizeof(*clone), M_APMDEV, M_WAITOK);
clone->cdev = dev;
clone->acpi_sc = acpi_sc;
clone->notify_status = APM_EV_NONE;
bzero(&clone->sel_read, sizeof(clone->sel_read));
knlist_init_mtx(&clone->sel_read.si_note, &acpi_mutex);
/*
* The acpi device is always managed by devd(8) and is considered
* writable (i.e., ack is required to allow suspend to proceed.)
*/
if (strcmp("acpi", devtoname(dev)) == 0)
clone->flags = ACPI_EVF_DEVD | ACPI_EVF_WRITE;
else
clone->flags = ACPI_EVF_NONE;
ACPI_LOCK(acpi);
STAILQ_INSERT_TAIL(&acpi_sc->apm_cdevs, clone, entries);
ACPI_UNLOCK(acpi);
return (clone);
}
static int
apmopen(struct cdev *dev, int flag, int fmt, struct thread *td)
{
struct acpi_softc *acpi_sc;
struct apm_clone_data *clone;
acpi_sc = devclass_get_softc(devclass_find("acpi"), 0);
clone = apm_create_clone(dev, acpi_sc);
dev->si_drv1 = clone;
/* If the device is opened for write, record that. */
if ((flag & FWRITE) != 0)
clone->flags |= ACPI_EVF_WRITE;
return (0);
}
static int
apmclose(struct cdev *dev, int flag, int fmt, struct thread *td)
{
struct apm_clone_data *clone;
struct acpi_softc *acpi_sc;
clone = dev->si_drv1;
acpi_sc = clone->acpi_sc;
/* We are about to lose a reference so check if suspend should occur */
if (acpi_sc->acpi_next_sstate != 0 &&
clone->notify_status != APM_EV_ACKED)
acpi_AckSleepState(clone, 0);
/* Remove this clone's data from the list and free it. */
ACPI_LOCK(acpi);
STAILQ_REMOVE(&acpi_sc->apm_cdevs, clone, apm_clone_data, entries);
knlist_destroy(&clone->sel_read.si_note);
ACPI_UNLOCK(acpi);
free(clone, M_APMDEV);
destroy_dev_sched(dev);
return (0);
}
static int
apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
{
int error;
struct apm_clone_data *clone;
struct acpi_softc *acpi_sc;
struct apm_info info;
struct apm_event_info *ev_info;
apm_info_old_t aiop;
error = 0;
clone = dev->si_drv1;
acpi_sc = clone->acpi_sc;
switch (cmd) {
case APMIO_SUSPEND:
if ((flag & FWRITE) == 0)
return (EPERM);
if (acpi_sc->acpi_next_sstate == 0) {
if (acpi_sc->acpi_suspend_sx != ACPI_STATE_S5) {
error = acpi_ReqSleepState(acpi_sc,
acpi_sc->acpi_suspend_sx);
} else {
printf(
"power off via apm suspend not supported\n");
error = ENXIO;
}
} else
error = acpi_AckSleepState(clone, 0);
break;
case APMIO_STANDBY:
if ((flag & FWRITE) == 0)
return (EPERM);
if (acpi_sc->acpi_next_sstate == 0) {
if (acpi_sc->acpi_standby_sx != ACPI_STATE_S5) {
error = acpi_ReqSleepState(acpi_sc,
acpi_sc->acpi_standby_sx);
} else {
printf(
"power off via apm standby not supported\n");
error = ENXIO;
}
} else
error = acpi_AckSleepState(clone, 0);
break;
case APMIO_NEXTEVENT:
printf("apm nextevent start\n");
ACPI_LOCK(acpi);
if (acpi_sc->acpi_next_sstate != 0 && clone->notify_status ==
APM_EV_NONE) {
ev_info = (struct apm_event_info *)addr;
if (acpi_sc->acpi_next_sstate <= ACPI_STATE_S3)
ev_info->type = PMEV_STANDBYREQ;
else
ev_info->type = PMEV_SUSPENDREQ;
ev_info->index = 0;
clone->notify_status = APM_EV_NOTIFIED;
printf("apm event returning %d\n", ev_info->type);
} else
error = EAGAIN;
ACPI_UNLOCK(acpi);
break;
case APMIO_GETINFO_OLD:
if (acpi_capm_get_info(&info))
error = ENXIO;
aiop = (apm_info_old_t)addr;
aiop->ai_major = info.ai_major;
aiop->ai_minor = info.ai_minor;
aiop->ai_acline = info.ai_acline;
aiop->ai_batt_stat = info.ai_batt_stat;
aiop->ai_batt_life = info.ai_batt_life;
aiop->ai_status = info.ai_status;
break;
case APMIO_GETINFO:
if (acpi_capm_get_info((apm_info_t)addr))
error = ENXIO;
break;
case APMIO_GETPWSTATUS:
if (acpi_capm_get_pwstatus((apm_pwstatus_t)addr))
error = ENXIO;
break;
case APMIO_ENABLE:
if ((flag & FWRITE) == 0)
return (EPERM);
apm_active = 1;
break;
case APMIO_DISABLE:
if ((flag & FWRITE) == 0)
return (EPERM);
apm_active = 0;
break;
case APMIO_HALTCPU:
break;
case APMIO_NOTHALTCPU:
break;
case APMIO_DISPLAY:
if ((flag & FWRITE) == 0)
return (EPERM);
break;
case APMIO_BIOS:
if ((flag & FWRITE) == 0)
return (EPERM);
bzero(addr, sizeof(struct apm_bios_arg));
break;
default:
error = EINVAL;
break;
}
return (error);
}
static int
apmwrite(struct cdev *dev, struct uio *uio, int ioflag)
{
return (uio->uio_resid);
}
static int
apmpoll(struct cdev *dev, int events, struct thread *td)
{
struct apm_clone_data *clone;
int revents;
revents = 0;
ACPI_LOCK(acpi);
clone = dev->si_drv1;
if (clone->acpi_sc->acpi_next_sstate)
revents |= events & (POLLIN | POLLRDNORM);
else
selrecord(td, &clone->sel_read);
ACPI_UNLOCK(acpi);
return (revents);
}
static int
apmkqfilter(struct cdev *dev, struct knote *kn)
{
struct apm_clone_data *clone;
ACPI_LOCK(acpi);
clone = dev->si_drv1;
kn->kn_hook = clone;
kn->kn_fop = &apm_readfiltops;
knlist_add(&clone->sel_read.si_note, kn, 0);
ACPI_UNLOCK(acpi);
return (0);
}
static void
apmreadfiltdetach(struct knote *kn)
{
struct apm_clone_data *clone;
ACPI_LOCK(acpi);
clone = kn->kn_hook;
knlist_remove(&clone->sel_read.si_note, kn, 0);
ACPI_UNLOCK(acpi);
}
static int
apmreadfilt(struct knote *kn, long hint)
{
struct apm_clone_data *clone;
int sleeping;
ACPI_LOCK(acpi);
clone = kn->kn_hook;
sleeping = clone->acpi_sc->acpi_next_sstate ? 1 : 0;
ACPI_UNLOCK(acpi);
return (sleeping);
}
int
acpi_machdep_init(device_t dev)
{
struct acpi_softc *sc;
struct acpi_softc *sc;
sc = devclass_get_softc(devclass_find("acpi"), 0);
sc = device_get_softc(dev);
/* Create a clone for /dev/acpi also. */
STAILQ_INIT(&sc->apm_cdevs);
sc->acpi_clone = apm_create_clone(sc->acpi_dev_t, sc);
clone_setup(&apm_clones);
EVENTHANDLER_REGISTER(dev_clone, apm_clone, 0, 1000);
acpi_apm_init(sc);
if (intr_model != ACPI_INTR_PIC)
acpi_SetIntrModel(intr_model);
@ -534,12 +86,14 @@ acpi_SetDefaultIntrModel(int model)
int
acpi_machdep_quirks(int *quirks)
{
return (0);
}
void
acpi_cpu_c1()
{
__asm __volatile("sti; hlt");
}

View file

@ -76,12 +76,6 @@ ENTRY(acpi_restorecpu)
movl WAKEUP_CTX(efer), %eax
wrmsr
/* Restore PAT. */
movl $MSR_PAT, %ecx
movl WAKEUP_CTX(pat), %eax
movl 4 + WAKEUP_CTX(pat), %edx
wrmsr
/* Restore fast syscall stuff. */
movl $MSR_STAR, %ecx
movl WAKEUP_CTX(star), %eax

View file

@ -274,8 +274,6 @@ wakeup_gdt:
ALIGN_DATA
wakeup_efer:
.quad 0
wakeup_pat:
.quad 0
wakeup_star:
.quad 0
wakeup_lstar:

View file

@ -41,13 +41,13 @@ __FBSDID("$FreeBSD$");
#include <vm/pmap.h>
#include <machine/intr_machdep.h>
#include <machine/mca.h>
#include <x86/mca.h>
#include <machine/pcb.h>
#include <machine/pmap.h>
#include <machine/specialreg.h>
#ifdef SMP
#include <machine/apicreg.h>
#include <x86/apicreg.h>
#include <machine/smp.h>
#include <machine/vmparam.h>
#endif
@ -278,6 +278,7 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
for (;;)
ia32_pause();
} else {
pmap_init_pat();
PCPU_SET(switchtime, 0);
PCPU_SET(switchticks, ticks);
#ifdef SMP
@ -372,7 +373,6 @@ acpi_install_wakeup_handler(struct acpi_softc *sc)
WAKECODE_FIXUP(wakeup_ctx, vm_offset_t,
WAKECODE_VADDR(sc) + wakeup_ctx);
WAKECODE_FIXUP(wakeup_efer, uint64_t, rdmsr(MSR_EFER));
WAKECODE_FIXUP(wakeup_pat, uint64_t, rdmsr(MSR_PAT));
WAKECODE_FIXUP(wakeup_star, uint64_t, rdmsr(MSR_STAR));
WAKECODE_FIXUP(wakeup_lstar, uint64_t, rdmsr(MSR_LSTAR));
WAKECODE_FIXUP(wakeup_cstar, uint64_t, rdmsr(MSR_CSTAR));

View file

@ -1,573 +0,0 @@
/*-
* Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/smp.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/apicreg.h>
#include <machine/intr_machdep.h>
#include <machine/apicvar.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <contrib/dev/acpica/include/actables.h>
#include <dev/acpica/acpivar.h>
#include <dev/pci/pcivar.h>
/* These two arrays are indexed by APIC IDs. */
struct ioapic_info {
void *io_apic;
UINT32 io_vector;
} ioapics[MAX_APIC_ID + 1];
struct lapic_info {
u_int la_enabled:1;
u_int la_acpi_id:8;
} lapics[MAX_APIC_ID + 1];
static int madt_found_sci_override;
static ACPI_TABLE_MADT *madt;
static vm_paddr_t madt_physaddr;
static vm_offset_t madt_length;
MALLOC_DEFINE(M_MADT, "madt_table", "ACPI MADT Table Items");
static enum intr_polarity interrupt_polarity(UINT16 IntiFlags, UINT8 Source);
static enum intr_trigger interrupt_trigger(UINT16 IntiFlags, UINT8 Source);
static int madt_find_cpu(u_int acpi_id, u_int *apic_id);
static int madt_find_interrupt(int intr, void **apic, u_int *pin);
static void madt_parse_apics(ACPI_SUBTABLE_HEADER *entry, void *arg);
static void madt_parse_interrupt_override(
ACPI_MADT_INTERRUPT_OVERRIDE *intr);
static void madt_parse_ints(ACPI_SUBTABLE_HEADER *entry,
void *arg __unused);
static void madt_parse_local_nmi(ACPI_MADT_LOCAL_APIC_NMI *nmi);
static void madt_parse_nmi(ACPI_MADT_NMI_SOURCE *nmi);
static int madt_probe(void);
static int madt_probe_cpus(void);
static void madt_probe_cpus_handler(ACPI_SUBTABLE_HEADER *entry,
void *arg __unused);
static void madt_register(void *dummy);
static int madt_setup_local(void);
static int madt_setup_io(void);
static void madt_walk_table(acpi_subtable_handler *handler, void *arg);
static struct apic_enumerator madt_enumerator = {
"MADT",
madt_probe,
madt_probe_cpus,
madt_setup_local,
madt_setup_io
};
/*
* Look for an ACPI Multiple APIC Description Table ("APIC")
*/
static int
madt_probe(void)
{
madt_physaddr = acpi_find_table(ACPI_SIG_MADT);
if (madt_physaddr == 0)
return (ENXIO);
return (0);
}
/*
* Run through the MP table enumerating CPUs.
*/
static int
madt_probe_cpus(void)
{
madt = acpi_map_table(madt_physaddr, ACPI_SIG_MADT);
madt_length = madt->Header.Length;
KASSERT(madt != NULL, ("Unable to re-map MADT"));
madt_walk_table(madt_probe_cpus_handler, NULL);
acpi_unmap_table(madt);
madt = NULL;
return (0);
}
/*
* Initialize the local APIC on the BSP.
*/
static int
madt_setup_local(void)
{
madt = pmap_mapbios(madt_physaddr, madt_length);
lapic_init(madt->Address);
printf("ACPI APIC Table: <%.*s %.*s>\n",
(int)sizeof(madt->Header.OemId), madt->Header.OemId,
(int)sizeof(madt->Header.OemTableId), madt->Header.OemTableId);
/*
* We ignore 64-bit local APIC override entries. Should we
* perhaps emit a warning here if we find one?
*/
return (0);
}
/*
* Enumerate I/O APICs and setup interrupt sources.
*/
static int
madt_setup_io(void)
{
void *ioapic;
u_int pin;
int i;
/* Try to initialize ACPI so that we can access the FADT. */
i = acpi_Startup();
if (ACPI_FAILURE(i)) {
printf("MADT: ACPI Startup failed with %s\n",
AcpiFormatException(i));
printf("Try disabling either ACPI or apic support.\n");
panic("Using MADT but ACPI doesn't work");
}
/* First, we run through adding I/O APIC's. */
madt_walk_table(madt_parse_apics, NULL);
/* Second, we run through the table tweaking interrupt sources. */
madt_walk_table(madt_parse_ints, NULL);
/*
* If there was not an explicit override entry for the SCI,
* force it to use level trigger and active-low polarity.
*/
if (!madt_found_sci_override) {
if (madt_find_interrupt(AcpiGbl_FADT.SciInterrupt, &ioapic,
&pin) != 0)
printf("MADT: Could not find APIC for SCI IRQ %u\n",
AcpiGbl_FADT.SciInterrupt);
else {
printf(
"MADT: Forcing active-low polarity and level trigger for SCI\n");
ioapic_set_polarity(ioapic, pin, INTR_POLARITY_LOW);
ioapic_set_triggermode(ioapic, pin, INTR_TRIGGER_LEVEL);
}
}
/* Third, we register all the I/O APIC's. */
for (i = 0; i <= MAX_APIC_ID; i++)
if (ioapics[i].io_apic != NULL)
ioapic_register(ioapics[i].io_apic);
/* Finally, we throw the switch to enable the I/O APIC's. */
acpi_SetDefaultIntrModel(ACPI_INTR_APIC);
return (0);
}
static void
madt_register(void *dummy __unused)
{
apic_register_enumerator(&madt_enumerator);
}
SYSINIT(madt_register, SI_SUB_TUNABLES - 1, SI_ORDER_FIRST,
madt_register, NULL);
/*
* Call the handler routine for each entry in the MADT table.
*/
static void
madt_walk_table(acpi_subtable_handler *handler, void *arg)
{
acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
handler, arg);
}
static void
madt_probe_cpus_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
{
ACPI_MADT_LOCAL_APIC *proc;
struct lapic_info *la;
switch (entry->Type) {
case ACPI_MADT_TYPE_LOCAL_APIC:
/*
* The MADT does not include a BSP flag, so we have to
* let the MP code figure out which CPU is the BSP on
* its own.
*/
proc = (ACPI_MADT_LOCAL_APIC *)entry;
if (bootverbose)
printf("MADT: Found CPU APIC ID %u ACPI ID %u: %s\n",
proc->Id, proc->ProcessorId,
(proc->LapicFlags & ACPI_MADT_ENABLED) ?
"enabled" : "disabled");
if (!(proc->LapicFlags & ACPI_MADT_ENABLED))
break;
if (proc->Id > MAX_APIC_ID)
panic("%s: CPU ID %u too high", __func__, proc->Id);
la = &lapics[proc->Id];
KASSERT(la->la_enabled == 0,
("Duplicate local APIC ID %u", proc->Id));
la->la_enabled = 1;
la->la_acpi_id = proc->ProcessorId;
lapic_create(proc->Id, 0);
break;
}
}
/*
* Add an I/O APIC from an entry in the table.
*/
static void
madt_parse_apics(ACPI_SUBTABLE_HEADER *entry, void *arg __unused)
{
ACPI_MADT_IO_APIC *apic;
switch (entry->Type) {
case ACPI_MADT_TYPE_IO_APIC:
apic = (ACPI_MADT_IO_APIC *)entry;
if (bootverbose)
printf(
"MADT: Found IO APIC ID %u, Interrupt %u at %p\n",
apic->Id, apic->GlobalIrqBase,
(void *)(uintptr_t)apic->Address);
if (apic->Id > MAX_APIC_ID)
panic("%s: I/O APIC ID %u too high", __func__,
apic->Id);
if (ioapics[apic->Id].io_apic != NULL)
panic("%s: Double APIC ID %u", __func__, apic->Id);
if (apic->GlobalIrqBase >= FIRST_MSI_INT) {
printf("MADT: Ignoring bogus I/O APIC ID %u", apic->Id);
break;
}
ioapics[apic->Id].io_apic = ioapic_create(apic->Address,
apic->Id, apic->GlobalIrqBase);
ioapics[apic->Id].io_vector = apic->GlobalIrqBase;
break;
default:
break;
}
}
/*
* Determine properties of an interrupt source. Note that for ACPI these
* functions are only used for ISA interrupts, so we assume ISA bus values
* (Active Hi, Edge Triggered) for conforming values except for the ACPI
* SCI for which we use Active Lo, Level Triggered.
*/
static enum intr_polarity
interrupt_polarity(UINT16 IntiFlags, UINT8 Source)
{
switch (IntiFlags & ACPI_MADT_POLARITY_MASK) {
case ACPI_MADT_POLARITY_CONFORMS:
if (Source == AcpiGbl_FADT.SciInterrupt)
return (INTR_POLARITY_LOW);
else
return (INTR_POLARITY_HIGH);
case ACPI_MADT_POLARITY_ACTIVE_HIGH:
return (INTR_POLARITY_HIGH);
case ACPI_MADT_POLARITY_ACTIVE_LOW:
return (INTR_POLARITY_LOW);
default:
panic("Bogus Interrupt Polarity");
}
}
static enum intr_trigger
interrupt_trigger(UINT16 IntiFlags, UINT8 Source)
{
switch (IntiFlags & ACPI_MADT_TRIGGER_MASK) {
case ACPI_MADT_TRIGGER_CONFORMS:
if (Source == AcpiGbl_FADT.SciInterrupt)
return (INTR_TRIGGER_LEVEL);
else
return (INTR_TRIGGER_EDGE);
case ACPI_MADT_TRIGGER_EDGE:
return (INTR_TRIGGER_EDGE);
case ACPI_MADT_TRIGGER_LEVEL:
return (INTR_TRIGGER_LEVEL);
default:
panic("Bogus Interrupt Trigger Mode");
}
}
/*
* Find the local APIC ID associated with a given ACPI Processor ID.
*/
static int
madt_find_cpu(u_int acpi_id, u_int *apic_id)
{
int i;
for (i = 0; i <= MAX_APIC_ID; i++) {
if (!lapics[i].la_enabled)
continue;
if (lapics[i].la_acpi_id != acpi_id)
continue;
*apic_id = i;
return (0);
}
return (ENOENT);
}
/*
* Find the IO APIC and pin on that APIC associated with a given global
* interrupt.
*/
static int
madt_find_interrupt(int intr, void **apic, u_int *pin)
{
int i, best;
best = -1;
for (i = 0; i <= MAX_APIC_ID; i++) {
if (ioapics[i].io_apic == NULL ||
ioapics[i].io_vector > intr)
continue;
if (best == -1 ||
ioapics[best].io_vector < ioapics[i].io_vector)
best = i;
}
if (best == -1)
return (ENOENT);
*apic = ioapics[best].io_apic;
*pin = intr - ioapics[best].io_vector;
if (*pin > 32)
printf("WARNING: Found intpin of %u for vector %d\n", *pin,
intr);
return (0);
}
/*
* Parse an interrupt source override for an ISA interrupt.
*/
static void
madt_parse_interrupt_override(ACPI_MADT_INTERRUPT_OVERRIDE *intr)
{
void *new_ioapic, *old_ioapic;
u_int new_pin, old_pin;
enum intr_trigger trig;
enum intr_polarity pol;
char buf[64];
if (acpi_quirks & ACPI_Q_MADT_IRQ0 && intr->SourceIrq == 0 &&
intr->GlobalIrq == 2) {
if (bootverbose)
printf("MADT: Skipping timer override\n");
return;
}
if (bootverbose)
printf("MADT: Interrupt override: source %u, irq %u\n",
intr->SourceIrq, intr->GlobalIrq);
KASSERT(intr->Bus == 0, ("bus for interrupt overrides must be zero"));
if (madt_find_interrupt(intr->GlobalIrq, &new_ioapic, &new_pin) != 0) {
printf("MADT: Could not find APIC for vector %u (IRQ %u)\n",
intr->GlobalIrq, intr->SourceIrq);
return;
}
/*
* Lookup the appropriate trigger and polarity modes for this
* entry.
*/
trig = interrupt_trigger(intr->IntiFlags, intr->SourceIrq);
pol = interrupt_polarity(intr->IntiFlags, intr->SourceIrq);
/*
* If the SCI is identity mapped but has edge trigger and
* active-hi polarity or the force_sci_lo tunable is set,
* force it to use level/lo.
*/
if (intr->SourceIrq == AcpiGbl_FADT.SciInterrupt) {
madt_found_sci_override = 1;
if (getenv_string("hw.acpi.sci.trigger", buf, sizeof(buf))) {
if (tolower(buf[0]) == 'e')
trig = INTR_TRIGGER_EDGE;
else if (tolower(buf[0]) == 'l')
trig = INTR_TRIGGER_LEVEL;
else
panic(
"Invalid trigger %s: must be 'edge' or 'level'",
buf);
printf("MADT: Forcing SCI to %s trigger\n",
trig == INTR_TRIGGER_EDGE ? "edge" : "level");
}
if (getenv_string("hw.acpi.sci.polarity", buf, sizeof(buf))) {
if (tolower(buf[0]) == 'h')
pol = INTR_POLARITY_HIGH;
else if (tolower(buf[0]) == 'l')
pol = INTR_POLARITY_LOW;
else
panic(
"Invalid polarity %s: must be 'high' or 'low'",
buf);
printf("MADT: Forcing SCI to active %s polarity\n",
pol == INTR_POLARITY_HIGH ? "high" : "low");
}
}
/* Remap the IRQ if it is mapped to a different interrupt vector. */
if (intr->SourceIrq != intr->GlobalIrq) {
/*
* If the SCI is remapped to a non-ISA global interrupt,
* then override the vector we use to setup and allocate
* the interrupt.
*/
if (intr->GlobalIrq > 15 &&
intr->SourceIrq == AcpiGbl_FADT.SciInterrupt)
acpi_OverrideInterruptLevel(intr->GlobalIrq);
else
ioapic_remap_vector(new_ioapic, new_pin,
intr->SourceIrq);
if (madt_find_interrupt(intr->SourceIrq, &old_ioapic,
&old_pin) != 0)
printf("MADT: Could not find APIC for source IRQ %u\n",
intr->SourceIrq);
else if (ioapic_get_vector(old_ioapic, old_pin) ==
intr->SourceIrq)
ioapic_disable_pin(old_ioapic, old_pin);
}
/* Program the polarity and trigger mode. */
ioapic_set_triggermode(new_ioapic, new_pin, trig);
ioapic_set_polarity(new_ioapic, new_pin, pol);
}
/*
* Parse an entry for an NMI routed to an IO APIC.
*/
static void
madt_parse_nmi(ACPI_MADT_NMI_SOURCE *nmi)
{
void *ioapic;
u_int pin;
if (madt_find_interrupt(nmi->GlobalIrq, &ioapic, &pin) != 0) {
printf("MADT: Could not find APIC for vector %u\n",
nmi->GlobalIrq);
return;
}
ioapic_set_nmi(ioapic, pin);
if (!(nmi->IntiFlags & ACPI_MADT_TRIGGER_CONFORMS))
ioapic_set_triggermode(ioapic, pin,
interrupt_trigger(nmi->IntiFlags, 0));
if (!(nmi->IntiFlags & ACPI_MADT_TRIGGER_CONFORMS))
ioapic_set_polarity(ioapic, pin,
interrupt_polarity(nmi->IntiFlags, 0));
}
/*
* Parse an entry for an NMI routed to a local APIC LVT pin.
*/
static void
madt_parse_local_nmi(ACPI_MADT_LOCAL_APIC_NMI *nmi)
{
u_int apic_id, pin;
if (nmi->ProcessorId == 0xff)
apic_id = APIC_ID_ALL;
else if (madt_find_cpu(nmi->ProcessorId, &apic_id) != 0) {
if (bootverbose)
printf("MADT: Ignoring local NMI routed to "
"ACPI CPU %u\n", nmi->ProcessorId);
return;
}
if (nmi->Lint == 0)
pin = LVT_LINT0;
else
pin = LVT_LINT1;
lapic_set_lvt_mode(apic_id, pin, APIC_LVT_DM_NMI);
if (!(nmi->IntiFlags & ACPI_MADT_TRIGGER_CONFORMS))
lapic_set_lvt_triggermode(apic_id, pin,
interrupt_trigger(nmi->IntiFlags, 0));
if (!(nmi->IntiFlags & ACPI_MADT_POLARITY_CONFORMS))
lapic_set_lvt_polarity(apic_id, pin,
interrupt_polarity(nmi->IntiFlags, 0));
}
/*
* Parse interrupt entries.
*/
static void
madt_parse_ints(ACPI_SUBTABLE_HEADER *entry, void *arg __unused)
{
switch (entry->Type) {
case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE:
madt_parse_interrupt_override(
(ACPI_MADT_INTERRUPT_OVERRIDE *)entry);
break;
case ACPI_MADT_TYPE_NMI_SOURCE:
madt_parse_nmi((ACPI_MADT_NMI_SOURCE *)entry);
break;
case ACPI_MADT_TYPE_LOCAL_APIC_NMI:
madt_parse_local_nmi((ACPI_MADT_LOCAL_APIC_NMI *)entry);
break;
}
}
/*
* Setup per-CPU ACPI IDs.
*/
static void
madt_set_ids(void *dummy)
{
struct lapic_info *la;
struct pcpu *pc;
u_int i;
if (madt == NULL)
return;
CPU_FOREACH(i) {
pc = pcpu_find(i);
KASSERT(pc != NULL, ("no pcpu data for CPU %u", i));
la = &lapics[pc->pc_apic_id];
if (!la->la_enabled)
panic("APIC: CPU with APIC ID %u is not enabled",
pc->pc_apic_id);
pc->pc_acpi_id = la->la_acpi_id;
if (bootverbose)
printf("APIC: CPU %u has ACPI ID %u\n", i,
la->la_acpi_id);
}
}
SYSINIT(madt_set_ids, SI_SUB_CPU, SI_ORDER_ANY, madt_set_ids, NULL);

View file

@ -35,6 +35,10 @@ __FBSDID("$FreeBSD$");
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
@ -303,20 +307,21 @@ amd64_mrstoreone(void *arg)
struct mem_range_desc *mrd;
u_int64_t omsrv, msrv;
int i, j, msr;
u_int cr4save;
u_long cr0, cr4;
mrd = sc->mr_desc;
/* Disable PGE. */
cr4save = rcr4();
if (cr4save & CR4_PGE)
load_cr4(cr4save & ~CR4_PGE);
cr4 = rcr4();
load_cr4(cr4 & ~CR4_PGE);
/* Disable caches (CD = 1, NW = 0). */
load_cr0((rcr0() & ~CR0_NW) | CR0_CD);
cr0 = rcr0();
load_cr0((cr0 & ~CR0_NW) | CR0_CD);
/* Flushes caches and TLBs. */
wbinvd();
invltlb();
/* Disable MTRRs (E = 0). */
wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
@ -384,17 +389,16 @@ amd64_mrstoreone(void *arg)
wrmsr(msr + 1, msrv);
}
/* Flush caches, TLBs. */
/* Flush caches and TLBs. */
wbinvd();
invltlb();
/* Enable MTRRs. */
wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
/* Enable caches (CD = 0, NW = 0). */
load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
/* Restore PGE. */
load_cr4(cr4save);
/* Restore caches and PGE. */
load_cr0(cr0);
load_cr4(cr4);
}
/*
@ -527,9 +531,9 @@ static int
amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *targ;
int error = 0;
int error, i;
switch(*arg) {
switch (*arg) {
case MEMRANGE_SET_UPDATE:
/*
* Make sure that what's being asked for is even
@ -568,6 +572,21 @@ amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
return (EOPNOTSUPP);
}
/*
* Ensure that the direct map region does not contain any mappings
* that span MTRRs of different types. However, the fixed MTRRs can
* be ignored, because a large page mapping the first 1 MB of physical
* memory is a special case that the processor handles. The entire
* TLB will be invalidated by amd64_mrstore(), so pmap_demote_DMAP()
* needn't do it.
*/
i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
mrd = sc->mr_desc + i;
for (; i < sc->mr_ndesc; i++, mrd++) {
if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, FALSE);
}
/* Update the hardware. */
amd64_mrstore(sc);
@ -657,6 +676,21 @@ amd64_mrinit(struct mem_range_softc *sc)
if (mrd->mr_flags & MDF_ACTIVE)
mrd->mr_flags |= MDF_FIRMWARE;
}
/*
* Ensure that the direct map region does not contain any mappings
* that span MTRRs of different types. However, the fixed MTRRs can
* be ignored, because a large page mapping the first 1 MB of physical
* memory is a special case that the processor handles. Invalidate
* any old TLB entries that might hold inconsistent memory type
* information.
*/
i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
mrd = sc->mr_desc + i;
for (; i < sc->mr_ndesc; i++, mrd++) {
if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, TRUE);
}
}
/*

View file

@ -39,7 +39,7 @@
#include "opt_smp.h"
#include <machine/asmacros.h>
#include <machine/apicreg.h>
#include <x86/apicreg.h>
#include "assym.s"

View file

@ -276,7 +276,7 @@ load_dr:
do_tss: movq %rdx,PCPU(TSSP)
movq %rdx,%rcx
movq PCPU(TSS),%rax
movw %rcx,2(%rax)
movw %cx,2(%rax)
shrq $16,%rcx
movb %cl,4(%rax)
shrq $8,%rcx

View file

@ -1,363 +0,0 @@
/*-
* Copyright (c) 2002 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/sysctl.h>
#include <sys/kernel.h>
#include <sys/kerneldump.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/elf.h>
#include <machine/md_var.h>
CTASSERT(sizeof(struct kerneldumpheader) == 512);
int do_minidump = 1;
TUNABLE_INT("debug.minidump", &do_minidump);
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RW, &do_minidump, 0,
"Enable mini crash dumps");
/*
* Don't touch the first SIZEOF_METADATA bytes on the dump device. This
* is to protect us from metadata and to protect metadata from us.
*/
#define SIZEOF_METADATA (64*1024)
#define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
#define DEV_ALIGN(x) (((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
struct md_pa {
vm_paddr_t md_start;
vm_paddr_t md_size;
};
typedef int callback_t(struct md_pa *, int, void *);
static struct kerneldumpheader kdh;
static off_t dumplo, fileofs;
/* Handle buffered writes. */
static char buffer[DEV_BSIZE];
static size_t fragsz;
/* 20 phys_avail entry pairs correspond to 10 md_pa's */
static struct md_pa dump_map[10];
static void
md_pa_init(void)
{
int n, idx;
bzero(dump_map, sizeof(dump_map));
for (n = 0; n < sizeof(dump_map) / sizeof(dump_map[0]); n++) {
idx = n * 2;
if (dump_avail[idx] == 0 && dump_avail[idx + 1] == 0)
break;
dump_map[n].md_start = dump_avail[idx];
dump_map[n].md_size = dump_avail[idx + 1] - dump_avail[idx];
}
}
static struct md_pa *
md_pa_first(void)
{
return (&dump_map[0]);
}
static struct md_pa *
md_pa_next(struct md_pa *mdp)
{
mdp++;
if (mdp->md_size == 0)
mdp = NULL;
return (mdp);
}
static int
buf_write(struct dumperinfo *di, char *ptr, size_t sz)
{
size_t len;
int error;
while (sz) {
len = DEV_BSIZE - fragsz;
if (len > sz)
len = sz;
bcopy(ptr, buffer + fragsz, len);
fragsz += len;
ptr += len;
sz -= len;
if (fragsz == DEV_BSIZE) {
error = dump_write(di, buffer, 0, dumplo,
DEV_BSIZE);
if (error)
return error;
dumplo += DEV_BSIZE;
fragsz = 0;
}
}
return (0);
}
static int
buf_flush(struct dumperinfo *di)
{
int error;
if (fragsz == 0)
return (0);
error = dump_write(di, buffer, 0, dumplo, DEV_BSIZE);
dumplo += DEV_BSIZE;
fragsz = 0;
return (error);
}
#define PG2MB(pgs) ((pgs + (1 << 8) - 1) >> 8)
static int
cb_dumpdata(struct md_pa *mdp, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
vm_paddr_t a, pa;
void *va;
uint64_t pgs;
size_t counter, sz, chunk;
int i, c, error, twiddle;
u_int maxdumppgs;
error = 0; /* catch case in which chunk size is 0 */
counter = 0; /* Update twiddle every 16MB */
twiddle = 0;
va = 0;
pgs = mdp->md_size / PAGE_SIZE;
pa = mdp->md_start;
maxdumppgs = min(di->maxiosize / PAGE_SIZE, MAXDUMPPGS);
if (maxdumppgs == 0) /* seatbelt */
maxdumppgs = 1;
printf(" chunk %d: %ldMB (%ld pages)", seqnr, PG2MB(pgs), pgs);
while (pgs) {
chunk = pgs;
if (chunk > maxdumppgs)
chunk = maxdumppgs;
sz = chunk << PAGE_SHIFT;
counter += sz;
if (counter >> 24) {
printf(" %ld", PG2MB(pgs));
counter &= (1<<24) - 1;
}
for (i = 0; i < chunk; i++) {
a = pa + i * PAGE_SIZE;
va = pmap_kenter_temporary(trunc_page(a), i);
}
error = dump_write(di, va, 0, dumplo, sz);
if (error)
break;
dumplo += sz;
pgs -= chunk;
pa += sz;
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf(" (CTRL-C to abort) ");
}
printf(" ... %s\n", (error) ? "fail" : "ok");
return (error);
}
static int
cb_dumphdr(struct md_pa *mdp, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
Elf_Phdr phdr;
uint64_t size;
int error;
size = mdp->md_size;
bzero(&phdr, sizeof(phdr));
phdr.p_type = PT_LOAD;
phdr.p_flags = PF_R; /* XXX */
phdr.p_offset = fileofs;
phdr.p_vaddr = mdp->md_start;
phdr.p_paddr = mdp->md_start;
phdr.p_filesz = size;
phdr.p_memsz = size;
phdr.p_align = PAGE_SIZE;
error = buf_write(di, (char*)&phdr, sizeof(phdr));
fileofs += phdr.p_filesz;
return (error);
}
static int
cb_size(struct md_pa *mdp, int seqnr, void *arg)
{
uint64_t *sz = (uint64_t*)arg;
*sz += (uint64_t)mdp->md_size;
return (0);
}
static int
foreach_chunk(callback_t cb, void *arg)
{
struct md_pa *mdp;
int error, seqnr;
seqnr = 0;
mdp = md_pa_first();
while (mdp != NULL) {
error = (*cb)(mdp, seqnr++, arg);
if (error)
return (-error);
mdp = md_pa_next(mdp);
}
return (seqnr);
}
void
dumpsys(struct dumperinfo *di)
{
Elf_Ehdr ehdr;
uint64_t dumpsize;
off_t hdrgap;
size_t hdrsz;
int error;
if (do_minidump) {
minidumpsys(di);
return;
}
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
ehdr.e_ident[EI_MAG1] = ELFMAG1;
ehdr.e_ident[EI_MAG2] = ELFMAG2;
ehdr.e_ident[EI_MAG3] = ELFMAG3;
ehdr.e_ident[EI_CLASS] = ELF_CLASS;
#if BYTE_ORDER == LITTLE_ENDIAN
ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
#else
ehdr.e_ident[EI_DATA] = ELFDATA2MSB;
#endif
ehdr.e_ident[EI_VERSION] = EV_CURRENT;
ehdr.e_ident[EI_OSABI] = ELFOSABI_STANDALONE; /* XXX big picture? */
ehdr.e_type = ET_CORE;
ehdr.e_machine = EM_X86_64;
ehdr.e_phoff = sizeof(ehdr);
ehdr.e_flags = 0;
ehdr.e_ehsize = sizeof(ehdr);
ehdr.e_phentsize = sizeof(Elf_Phdr);
ehdr.e_shentsize = sizeof(Elf_Shdr);
md_pa_init();
/* Calculate dump size. */
dumpsize = 0L;
ehdr.e_phnum = foreach_chunk(cb_size, &dumpsize);
hdrsz = ehdr.e_phoff + ehdr.e_phnum * ehdr.e_phentsize;
fileofs = MD_ALIGN(hdrsz);
dumpsize += fileofs;
hdrgap = fileofs - DEV_ALIGN(hdrsz);
/* Determine dump offset on device. */
if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
error = ENOSPC;
goto fail;
}
dumplo = di->mediaoffset + di->mediasize - dumpsize;
dumplo -= sizeof(kdh) * 2;
mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION, dumpsize, di->blocksize);
printf("Dumping %llu MB (%d chunks)\n", (long long)dumpsize >> 20,
ehdr.e_phnum);
/* Dump leader */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
dumplo += sizeof(kdh);
/* Dump ELF header */
error = buf_write(di, (char*)&ehdr, sizeof(ehdr));
if (error)
goto fail;
/* Dump program headers */
error = foreach_chunk(cb_dumphdr, di);
if (error < 0)
goto fail;
buf_flush(di);
/*
* All headers are written using blocked I/O, so we know the
* current offset is (still) block aligned. Skip the alignement
* in the file to have the segment contents aligned at page
* boundary. We cannot use MD_ALIGN on dumplo, because we don't
* care and may very well be unaligned within the dump device.
*/
dumplo += hdrgap;
/* Dump memory chunks (updates dumplo) */
error = foreach_chunk(cb_dumpdata, di);
if (error < 0)
goto fail;
/* Dump trailer */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
return;
fail:
if (error < 0)
error = -error;
if (error == ECANCELED)
printf("\nDump aborted\n");
else if (error == ENOSPC)
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
}

View file

@ -113,14 +113,14 @@ static struct savefpu fpu_initialstate;
void
fpuinit(void)
{
register_t savecrit;
register_t saveintr;
u_int mxcsr;
u_short control;
/*
* It is too early for critical_enter() to work on AP.
*/
savecrit = intr_disable();
saveintr = intr_disable();
stop_emulating();
fninit();
control = __INITIAL_FPUCW__;
@ -137,7 +137,7 @@ fpuinit(void)
bzero(fpu_initialstate.sv_xmm, sizeof(fpu_initialstate.sv_xmm));
}
start_emulating();
intr_restore(savecrit);
intr_restore(saveintr);
}
/*
@ -426,9 +426,7 @@ fpudna(void)
fxrstor(&fpu_initialstate);
if (pcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
fldcw(pcb->pcb_initial_fpucw);
pcb->pcb_flags |= PCB_FPUINITDONE;
if (PCB_USER_FPU(pcb))
pcb->pcb_flags |= PCB_USERFPUINITDONE;
fpuuserinited(curthread);
} else
fxrstor(pcb->pcb_save);
critical_exit();
@ -448,60 +446,50 @@ fpudrop()
}
/*
* Get the state of the FPU without dropping ownership (if possible).
* It returns the FPU ownership status.
* Get the user state of the FPU into pcb->pcb_user_save without
* dropping ownership (if possible). It returns the FPU ownership
* status.
*/
int
fpugetuserregs(struct thread *td, struct savefpu *addr)
fpugetregs(struct thread *td)
{
struct pcb *pcb;
pcb = td->td_pcb;
if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
bcopy(&fpu_initialstate, addr, sizeof(fpu_initialstate));
addr->sv_env.en_cw = pcb->pcb_initial_fpucw;
return (_MC_FPOWNED_NONE);
bcopy(&fpu_initialstate, &pcb->pcb_user_save,
sizeof(fpu_initialstate));
pcb->pcb_user_save.sv_env.en_cw = pcb->pcb_initial_fpucw;
fpuuserinited(td);
return (_MC_FPOWNED_PCB);
}
critical_enter();
if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
fxsave(addr);
fxsave(&pcb->pcb_user_save);
critical_exit();
return (_MC_FPOWNED_FPU);
} else {
critical_exit();
bcopy(&pcb->pcb_user_save, addr, sizeof(*addr));
return (_MC_FPOWNED_PCB);
}
}
int
fpugetregs(struct thread *td, struct savefpu *addr)
void
fpuuserinited(struct thread *td)
{
struct pcb *pcb;
pcb = td->td_pcb;
if ((pcb->pcb_flags & PCB_FPUINITDONE) == 0) {
bcopy(&fpu_initialstate, addr, sizeof(fpu_initialstate));
addr->sv_env.en_cw = pcb->pcb_initial_fpucw;
return (_MC_FPOWNED_NONE);
}
critical_enter();
if (td == PCPU_GET(fpcurthread)) {
fxsave(addr);
critical_exit();
return (_MC_FPOWNED_FPU);
} else {
critical_exit();
bcopy(pcb->pcb_save, addr, sizeof(*addr));
return (_MC_FPOWNED_PCB);
}
if (PCB_USER_FPU(pcb))
pcb->pcb_flags |= PCB_FPUINITDONE;
pcb->pcb_flags |= PCB_USERFPUINITDONE;
}
/*
* Set the state of the FPU.
*/
void
fpusetuserregs(struct thread *td, struct savefpu *addr)
fpusetregs(struct thread *td, struct savefpu *addr)
{
struct pcb *pcb;
@ -514,31 +502,10 @@ fpusetuserregs(struct thread *td, struct savefpu *addr)
} else {
critical_exit();
bcopy(addr, &td->td_pcb->pcb_user_save, sizeof(*addr));
if (PCB_USER_FPU(pcb))
pcb->pcb_flags |= PCB_FPUINITDONE;
pcb->pcb_flags |= PCB_USERFPUINITDONE;
fpuuserinited(td);
}
}
void
fpusetregs(struct thread *td, struct savefpu *addr)
{
struct pcb *pcb;
pcb = td->td_pcb;
critical_enter();
if (td == PCPU_GET(fpcurthread)) {
fxrstor(addr);
critical_exit();
} else {
critical_exit();
bcopy(addr, td->td_pcb->pcb_save, sizeof(*addr));
}
if (PCB_USER_FPU(pcb))
pcb->pcb_flags |= PCB_USERFPUINITDONE;
pcb->pcb_flags |= PCB_FPUINITDONE;
}
/*
* On AuthenticAMD processors, the fxrstor instruction does not restore
* the x87's stored last instruction pointer, last data pointer, and last
@ -567,7 +534,7 @@ fpu_clean_state(void)
* the x87 stack, but we don't care since we're about to call
* fxrstor() anyway.
*/
__asm __volatile("ffree %%st(7); fld %0" : : "m" (dummy_variable));
__asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
}
/*

View file

@ -66,7 +66,7 @@ __FBSDID("$FreeBSD$");
#include <nfs/nfsproto.h>
#include <nfsclient/nfs.h>
#include <nfsclient/nfsdiskless.h>
#include <machine/apicreg.h>
#include <x86/apicreg.h>
#include <machine/cpu.h>
#include <machine/pcb.h>
#include <machine/sigframe.h>

View file

@ -458,7 +458,7 @@ intr_next_cpu(void)
/* Leave all interrupts on the BSP during boot. */
if (!assign_cpu)
return (cpu_apic_ids[0]);
return (PCPU_GET(apic_id));
mtx_lock_spin(&icu_lock);
apic_id = cpu_apic_ids[current_cpu];

View file

@ -112,7 +112,7 @@ __FBSDID("$FreeBSD$");
#include <machine/cpu.h>
#include <machine/cputypes.h>
#include <machine/intr_machdep.h>
#include <machine/mca.h>
#include <x86/mca.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/pc/bios.h>
@ -1762,11 +1762,15 @@ void
spinlock_enter(void)
{
struct thread *td;
register_t flags;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_flags = intr_disable();
td->td_md.md_spinlock_count++;
if (td->td_md.md_spinlock_count == 0) {
flags = intr_disable();
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_flags = flags;
} else
td->td_md.md_spinlock_count++;
critical_enter();
}
@ -1774,12 +1778,14 @@ void
spinlock_exit(void)
{
struct thread *td;
register_t flags;
td = curthread;
critical_exit();
flags = td->td_md.md_saved_flags;
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
intr_restore(td->td_md.md_saved_flags);
intr_restore(flags);
}
/*
@ -1969,6 +1975,9 @@ int
fill_fpregs(struct thread *td, struct fpreg *fpregs)
{
KASSERT(td == curthread || TD_IS_SUSPENDED(td),
("not suspended thread %p", td));
fpugetregs(td);
fill_fpregs_xmm(&td->td_pcb->pcb_user_save, fpregs);
return (0);
}
@ -1979,6 +1988,7 @@ set_fpregs(struct thread *td, struct fpreg *fpregs)
{
set_fpregs_xmm(fpregs, &td->td_pcb->pcb_user_save);
fpuuserinited(td);
return (0);
}
@ -2093,8 +2103,9 @@ static void
get_fpcontext(struct thread *td, mcontext_t *mcp)
{
mcp->mc_ownedfp = fpugetuserregs(td,
(struct savefpu *)&mcp->mc_fpstate);
mcp->mc_ownedfp = fpugetregs(td);
bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
sizeof(mcp->mc_fpstate));
mcp->mc_fpformat = fpuformat();
}
@ -2114,7 +2125,7 @@ set_fpcontext(struct thread *td, const mcontext_t *mcp)
mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
fpstate = (struct savefpu *)&mcp->mc_fpstate;
fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
fpusetuserregs(td, fpstate);
fpusetregs(td, fpstate);
} else
return (EINVAL);
return (0);

View file

@ -167,63 +167,91 @@ blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
}
/* A fake page table page, to avoid having to handle both 4K and 2M pages */
static pt_entry_t fakept[NPTEPG];
static pd_entry_t fakepd[NPDEPG];
void
minidumpsys(struct dumperinfo *di)
{
uint64_t dumpsize;
uint32_t ptesize;
uint32_t pmapsize;
vm_offset_t va;
int error;
uint64_t bits;
uint64_t *pdp, *pd, *pt, pa;
int i, j, k, bit;
int i, j, k, n, bit;
int retry_count;
struct minidumphdr mdhdr;
retry_count = 0;
retry:
retry_count++;
counter = 0;
/* Walk page table pages, set bits in vm_page_dump */
ptesize = 0;
pmapsize = 0;
pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR,
kernel_vm_end); va += NBPDR) {
i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
kernel_vm_end); ) {
/*
* We always write a page, even if it is zero. Each
* page written corresponds to 2MB of space
* page written corresponds to 1GB of space
*/
ptesize += PAGE_SIZE;
if ((pdp[i] & PG_V) == 0)
pmapsize += PAGE_SIZE;
i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
if ((pdp[i] & PG_V) == 0) {
va += NBPDP;
continue;
pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
j = ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
if ((pd[j] & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
/* This is an entire 2M page. */
pa = pd[j] & PG_PS_FRAME;
for (k = 0; k < NPTEPG; k++) {
}
/*
* 1GB page is represented as 512 2MB pages in a dump.
*/
if ((pdp[i] & PG_PS) != 0) {
va += NBPDP;
pa = pdp[i] & PG_PS_FRAME;
for (n = 0; n < NPDEPG * NPTEPG; n++) {
if (is_dumpable(pa))
dump_add_page(pa);
pa += PAGE_SIZE;
}
continue;
}
if ((pd[j] & PG_V) == PG_V) {
/* set bit for each valid page in this 2MB block */
pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
for (k = 0; k < NPTEPG; k++) {
if ((pt[k] & PG_V) == PG_V) {
pa = pt[k] & PG_FRAME;
pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
for (n = 0; n < NPDEPG; n++, va += NBPDR) {
j = (va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1);
if ((pd[j] & PG_V) == 0)
continue;
if ((pd[j] & PG_PS) != 0) {
/* This is an entire 2M page. */
pa = pd[j] & PG_PS_FRAME;
for (k = 0; k < NPTEPG; k++) {
if (is_dumpable(pa))
dump_add_page(pa);
pa += PAGE_SIZE;
}
continue;
}
pa = pd[j] & PG_FRAME;
/* set bit for this PTE page */
if (is_dumpable(pa))
dump_add_page(pa);
/* and for each valid page in this 2MB block */
pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
for (k = 0; k < NPTEPG; k++) {
if ((pt[k] & PG_V) == 0)
continue;
pa = pt[k] & PG_FRAME;
if (is_dumpable(pa))
dump_add_page(pa);
}
} else {
/* nothing, we're going to dump a null page */
}
}
/* Calculate dump size. */
dumpsize = ptesize;
dumpsize = pmapsize;
dumpsize += round_page(msgbufp->msg_size);
dumpsize += round_page(vm_page_dump_size);
for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
@ -244,7 +272,7 @@ minidumpsys(struct dumperinfo *di)
/* Determine dump offset on device. */
if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
error = ENOSPC;
error = E2BIG;
goto fail;
}
dumplo = di->mediaoffset + di->mediasize - dumpsize;
@ -257,7 +285,7 @@ minidumpsys(struct dumperinfo *di)
mdhdr.version = MINIDUMP_VERSION;
mdhdr.msgbufsize = msgbufp->msg_size;
mdhdr.bitmapsize = vm_page_dump_size;
mdhdr.ptesize = ptesize;
mdhdr.pmapsize = pmapsize;
mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
mdhdr.dmapbase = DMAP_MIN_ADDRESS;
mdhdr.dmapend = DMAP_MAX_ADDRESS;
@ -274,9 +302,9 @@ minidumpsys(struct dumperinfo *di)
dumplo += sizeof(kdh);
/* Dump my header */
bzero(&fakept, sizeof(fakept));
bcopy(&mdhdr, &fakept, sizeof(mdhdr));
error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
bzero(&fakepd, sizeof(fakepd));
bcopy(&mdhdr, &fakepd, sizeof(mdhdr));
error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
if (error)
goto fail;
@ -290,55 +318,49 @@ minidumpsys(struct dumperinfo *di)
if (error)
goto fail;
/* Dump kernel page table pages */
/* Dump kernel page directory pages */
bzero(fakepd, sizeof(fakepd));
pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR,
kernel_vm_end); va += NBPDR) {
kernel_vm_end); va += NBPDP) {
i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
/* We always write a page, even if it is zero */
if ((pdp[i] & PG_V) == 0) {
bzero(fakept, sizeof(fakept));
error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
if (error)
goto fail;
/* flush, in case we reuse fakept in the same block */
/* flush, in case we reuse fakepd in the same block */
error = blk_flush(di);
if (error)
goto fail;
continue;
}
/* 1GB page is represented as 512 2MB pages in a dump */
if ((pdp[i] & PG_PS) != 0) {
/* PDPE and PDP have identical layout in this case */
fakepd[0] = pdp[i];
for (j = 1; j < NPDEPG; j++)
fakepd[j] = fakepd[j - 1] + NBPDR;
error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
if (error)
goto fail;
/* flush, in case we reuse fakepd in the same block */
error = blk_flush(di);
if (error)
goto fail;
bzero(fakepd, sizeof(fakepd));
continue;
}
pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
j = ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
if ((pd[j] & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
/* This is a single 2M block. Generate a fake PTP */
pa = pd[j] & PG_PS_FRAME;
for (k = 0; k < NPTEPG; k++) {
fakept[k] = (pa + (k * PAGE_SIZE)) | PG_V | PG_RW | PG_A | PG_M;
}
error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
if (error)
goto fail;
/* flush, in case we reuse fakept in the same block */
error = blk_flush(di);
if (error)
goto fail;
continue;
}
if ((pd[j] & PG_V) == PG_V) {
pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
error = blk_write(di, (char *)pt, 0, PAGE_SIZE);
if (error)
goto fail;
} else {
bzero(fakept, sizeof(fakept));
error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
if (error)
goto fail;
/* flush, in case we reuse fakept in the same block */
error = blk_flush(di);
if (error)
goto fail;
}
error = blk_write(di, (char *)pd, 0, PAGE_SIZE);
if (error)
goto fail;
error = blk_flush(di);
if (error)
goto fail;
}
/* Dump memory chunks */
@ -374,12 +396,21 @@ minidumpsys(struct dumperinfo *di)
if (error < 0)
error = -error;
if (error == ECANCELED)
printf("\nDump aborted\n");
else if (error == ENOSPC)
printf("\nDump failed. Partition too small.\n");
printf("\n");
if (error == ENOSPC) {
printf("Dump map grown while dumping. ");
if (retry_count < 5) {
printf("Retrying...\n");
goto retry;
}
printf("Dump failed.\n");
}
else if (error == ECANCELED)
printf("Dump aborted\n");
else if (error == E2BIG)
printf("Dump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
printf("** DUMP FAILED (ERROR %d) **\n", error);
}
void

View file

@ -57,11 +57,11 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <machine/apicreg.h>
#include <x86/apicreg.h>
#include <machine/clock.h>
#include <machine/cputypes.h>
#include <machine/cpufunc.h>
#include <machine/mca.h>
#include <x86/mca.h>
#include <machine/md_var.h>
#include <machine/mp_watchdog.h>
#include <machine/pcb.h>
@ -239,6 +239,9 @@ topo_probe_0x4(void)
cpu_logical++;
}
KASSERT(cpu_cores >= 1 && cpu_logical >= 1,
("topo_probe_0x4 couldn't find BSP"));
cpu_cores /= cpu_logical;
hyperthreading_cpus = cpu_logical;
}
@ -310,7 +313,9 @@ topo_probe(void)
return;
logical_cpus_mask = 0;
if (cpu_vendor_id == CPU_VENDOR_AMD)
if (mp_ncpus <= 1)
cpu_cores = cpu_logical = 1;
else if (cpu_vendor_id == CPU_VENDOR_AMD)
topo_probe_amd();
else if (cpu_vendor_id == CPU_VENDOR_INTEL) {
/*
@ -332,10 +337,8 @@ topo_probe(void)
* Fallback: assume each logical CPU is in separate
* physical package. That is, no multi-core, no SMT.
*/
if (cpu_cores == 0)
cpu_cores = 1;
if (cpu_logical == 0)
cpu_logical = 1;
if (cpu_cores == 0 || cpu_logical == 0)
cpu_cores = cpu_logical = 1;
cpu_topo_probed = 1;
}
@ -419,7 +422,7 @@ cpu_add(u_int apic_id, char boot_cpu)
}
if (mp_ncpus < MAXCPU) {
mp_ncpus++;
mp_maxid = mp_ncpus -1;
mp_maxid = mp_ncpus - 1;
}
if (bootverbose)
printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
@ -442,7 +445,7 @@ cpu_mp_setmaxid(void)
else
KASSERT(mp_maxid >= mp_ncpus - 1,
("%s: counters out of sync: max %d, count %d", __func__,
mp_maxid, mp_ncpus));
mp_maxid, mp_ncpus));
}
int
@ -1407,6 +1410,7 @@ cpususpend_handler(void)
wbinvd();
atomic_set_int(&stopped_cpus, cpumask);
} else {
pmap_init_pat();
PCPU_SET(switchtime, 0);
PCPU_SET(switchticks, ticks);
}

View file

@ -44,7 +44,7 @@
#include <sys/systm.h>
#include <machine/smp.h>
#include <machine/apicreg.h>
#include <x86/apicreg.h>
#include <machine/apicvar.h>
#include <machine/mp_watchdog.h>

File diff suppressed because it is too large Load diff

View file

@ -1,704 +0,0 @@
/*-
* Copyright 1998 Massachusetts Institute of Technology
*
* Permission to use, copy, modify, and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that both the above copyright notice and this
* permission notice appear in all copies, that both the above
* copyright notice and this permission notice appear in all
* supporting documentation, and that the name of M.I.T. not be used
* in advertising or publicity pertaining to distribution of the
* software without specific, written prior permission. M.I.T. makes
* no representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied
* warranty.
*
* THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
* ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
* SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* This code implements a `root nexus' for Intel Architecture
* machines. The function of the root nexus is to serve as an
* attachment point for both processors and buses, and to manage
* resources which are common to all of them. In particular,
* this code implements the core resource managers for interrupt
* requests, DMA requests (which rightfully should be a part of the
* ISA code but it's easier to do it here for now), I/O port addresses,
* and I/O memory address space.
*/
#include "opt_isa.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/linker.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <machine/intr_machdep.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <machine/vmparam.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/pmap.h>
#include <machine/metadata.h>
#include <machine/nexusvar.h>
#include <machine/resource.h>
#include <machine/pc/bios.h>
#include "pcib_if.h"
#ifdef DEV_ISA
#include <isa/isavar.h>
#include <x86/isa/isa.h>
#endif
#include <sys/rtprio.h>
static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device");
#define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev))
struct rman irq_rman, drq_rman, port_rman, mem_rman;
static int nexus_probe(device_t);
static int nexus_attach(device_t);
static int nexus_print_all_resources(device_t dev);
static int nexus_print_child(device_t, device_t);
static device_t nexus_add_child(device_t bus, u_int order, const char *name,
int unit);
static struct resource *nexus_alloc_resource(device_t, device_t, int, int *,
u_long, u_long, u_long, u_int);
#ifdef SMP
static int nexus_bind_intr(device_t, device_t, struct resource *, int);
#endif
static int nexus_config_intr(device_t, int, enum intr_trigger,
enum intr_polarity);
static int nexus_describe_intr(device_t dev, device_t child,
struct resource *irq, void *cookie,
const char *descr);
static int nexus_activate_resource(device_t, device_t, int, int,
struct resource *);
static int nexus_deactivate_resource(device_t, device_t, int, int,
struct resource *);
static int nexus_release_resource(device_t, device_t, int, int,
struct resource *);
static int nexus_setup_intr(device_t, device_t, struct resource *, int flags,
driver_filter_t filter, void (*)(void *), void *,
void **);
static int nexus_teardown_intr(device_t, device_t, struct resource *,
void *);
static struct resource_list *nexus_get_reslist(device_t dev, device_t child);
static int nexus_set_resource(device_t, device_t, int, int, u_long, u_long);
static int nexus_get_resource(device_t, device_t, int, int, u_long *, u_long *);
static void nexus_delete_resource(device_t, device_t, int, int);
static int nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs);
static int nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs);
static int nexus_alloc_msix(device_t pcib, device_t dev, int *irq);
static int nexus_release_msix(device_t pcib, device_t dev, int irq);
static int nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data);
static device_method_t nexus_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, nexus_probe),
DEVMETHOD(device_attach, nexus_attach),
DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
/* Bus interface */
DEVMETHOD(bus_print_child, nexus_print_child),
DEVMETHOD(bus_add_child, nexus_add_child),
DEVMETHOD(bus_alloc_resource, nexus_alloc_resource),
DEVMETHOD(bus_release_resource, nexus_release_resource),
DEVMETHOD(bus_activate_resource, nexus_activate_resource),
DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource),
DEVMETHOD(bus_setup_intr, nexus_setup_intr),
DEVMETHOD(bus_teardown_intr, nexus_teardown_intr),
#ifdef SMP
DEVMETHOD(bus_bind_intr, nexus_bind_intr),
#endif
DEVMETHOD(bus_config_intr, nexus_config_intr),
DEVMETHOD(bus_describe_intr, nexus_describe_intr),
DEVMETHOD(bus_get_resource_list, nexus_get_reslist),
DEVMETHOD(bus_set_resource, nexus_set_resource),
DEVMETHOD(bus_get_resource, nexus_get_resource),
DEVMETHOD(bus_delete_resource, nexus_delete_resource),
/* pcib interface */
DEVMETHOD(pcib_alloc_msi, nexus_alloc_msi),
DEVMETHOD(pcib_release_msi, nexus_release_msi),
DEVMETHOD(pcib_alloc_msix, nexus_alloc_msix),
DEVMETHOD(pcib_release_msix, nexus_release_msix),
DEVMETHOD(pcib_map_msi, nexus_map_msi),
{ 0, 0 }
};
DEFINE_CLASS_0(nexus, nexus_driver, nexus_methods, 1);
static devclass_t nexus_devclass;
DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0);
static int
nexus_probe(device_t dev)
{
device_quiet(dev); /* suppress attach message for neatness */
return (BUS_PROBE_GENERIC);
}
void
nexus_init_resources(void)
{
int irq;
/*
* XXX working notes:
*
* - IRQ resource creation should be moved to the PIC/APIC driver.
* - DRQ resource creation should be moved to the DMAC driver.
* - The above should be sorted to probe earlier than any child busses.
*
* - Leave I/O and memory creation here, as child probes may need them.
* (especially eg. ACPI)
*/
/*
* IRQ's are on the mainboard on old systems, but on the ISA part
* of PCI->ISA bridges. There would be multiple sets of IRQs on
* multi-ISA-bus systems. PCI interrupts are routed to the ISA
* component, so in a way, PCI can be a partial child of an ISA bus(!).
* APIC interrupts are global though.
*/
irq_rman.rm_start = 0;
irq_rman.rm_type = RMAN_ARRAY;
irq_rman.rm_descr = "Interrupt request lines";
irq_rman.rm_end = NUM_IO_INTS - 1;
if (rman_init(&irq_rman))
panic("nexus_init_resources irq_rman");
/*
* We search for regions of existing IRQs and add those to the IRQ
* resource manager.
*/
for (irq = 0; irq < NUM_IO_INTS; irq++)
if (intr_lookup_source(irq) != NULL)
if (rman_manage_region(&irq_rman, irq, irq) != 0)
panic("nexus_init_resources irq_rman add");
/*
* ISA DMA on PCI systems is implemented in the ISA part of each
* PCI->ISA bridge and the channels can be duplicated if there are
* multiple bridges. (eg: laptops with docking stations)
*/
drq_rman.rm_start = 0;
drq_rman.rm_end = 7;
drq_rman.rm_type = RMAN_ARRAY;
drq_rman.rm_descr = "DMA request lines";
/* XXX drq 0 not available on some machines */
if (rman_init(&drq_rman)
|| rman_manage_region(&drq_rman,
drq_rman.rm_start, drq_rman.rm_end))
panic("nexus_init_resources drq_rman");
/*
* However, IO ports and Memory truely are global at this level,
* as are APIC interrupts (however many IO APICS there turn out
* to be on large systems..)
*/
port_rman.rm_start = 0;
port_rman.rm_end = 0xffff;
port_rman.rm_type = RMAN_ARRAY;
port_rman.rm_descr = "I/O ports";
if (rman_init(&port_rman)
|| rman_manage_region(&port_rman, 0, 0xffff))
panic("nexus_init_resources port_rman");
mem_rman.rm_start = 0;
mem_rman.rm_end = ~0u;
mem_rman.rm_type = RMAN_ARRAY;
mem_rman.rm_descr = "I/O memory addresses";
if (rman_init(&mem_rman)
|| rman_manage_region(&mem_rman, 0, ~0))
panic("nexus_init_resources mem_rman");
}
static int
nexus_attach(device_t dev)
{
nexus_init_resources();
bus_generic_probe(dev);
/*
* Explicitly add the legacy0 device here. Other platform
* types (such as ACPI), use their own nexus(4) subclass
* driver to override this routine and add their own root bus.
*/
if (BUS_ADD_CHILD(dev, 10, "legacy", 0) == NULL)
panic("legacy: could not attach");
bus_generic_attach(dev);
return 0;
}
static int
nexus_print_all_resources(device_t dev)
{
struct nexus_device *ndev = DEVTONX(dev);
struct resource_list *rl = &ndev->nx_resources;
int retval = 0;
if (STAILQ_FIRST(rl))
retval += printf(" at");
retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx");
retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
return retval;
}
static int
nexus_print_child(device_t bus, device_t child)
{
int retval = 0;
retval += bus_print_child_header(bus, child);
retval += nexus_print_all_resources(child);
if (device_get_flags(child))
retval += printf(" flags %#x", device_get_flags(child));
retval += printf(" on motherboard\n"); /* XXX "motherboard", ick */
return (retval);
}
static device_t
nexus_add_child(device_t bus, u_int order, const char *name, int unit)
{
device_t child;
struct nexus_device *ndev;
ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO);
if (!ndev)
return(0);
resource_list_init(&ndev->nx_resources);
child = device_add_child_ordered(bus, order, name, unit);
/* should we free this in nexus_child_detached? */
device_set_ivars(child, ndev);
return(child);
}
/*
* Allocate a resource on behalf of child. NB: child is usually going to be a
* child of one of our descendants, not a direct child of nexus0.
*/
static struct resource *
nexus_alloc_resource(device_t bus, device_t child, int type, int *rid,
u_long start, u_long end, u_long count, u_int flags)
{
struct nexus_device *ndev = DEVTONX(child);
struct resource *rv;
struct resource_list_entry *rle;
struct rman *rm;
int needactivate = flags & RF_ACTIVE;
/*
* If this is an allocation of the "default" range for a given RID, and
* we know what the resources for this device are (ie. they aren't maintained
* by a child bus), then work out the start/end values.
*/
if ((start == 0UL) && (end == ~0UL) && (count == 1)) {
if (ndev == NULL)
return(NULL);
rle = resource_list_find(&ndev->nx_resources, type, *rid);
if (rle == NULL)
return(NULL);
start = rle->start;
end = rle->end;
count = rle->count;
}
flags &= ~RF_ACTIVE;
switch (type) {
case SYS_RES_IRQ:
rm = &irq_rman;
break;
case SYS_RES_DRQ:
rm = &drq_rman;
break;
case SYS_RES_IOPORT:
rm = &port_rman;
break;
case SYS_RES_MEMORY:
rm = &mem_rman;
break;
default:
return 0;
}
rv = rman_reserve_resource(rm, start, end, count, flags, child);
if (rv == 0)
return 0;
rman_set_rid(rv, *rid);
if (needactivate) {
if (bus_activate_resource(child, type, *rid, rv)) {
rman_release_resource(rv);
return 0;
}
}
return rv;
}
static int
nexus_activate_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
/*
* If this is a memory resource, map it into the kernel.
*/
if (type == SYS_RES_MEMORY) {
void *vaddr;
vaddr = pmap_mapdev(rman_get_start(r), rman_get_size(r));
rman_set_virtual(r, vaddr);
rman_set_bustag(r, AMD64_BUS_SPACE_MEM);
rman_set_bushandle(r, (bus_space_handle_t) vaddr);
} else if (type == SYS_RES_IOPORT) {
rman_set_bustag(r, AMD64_BUS_SPACE_IO);
rman_set_bushandle(r, rman_get_start(r));
}
return (rman_activate_resource(r));
}
static int
nexus_deactivate_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
/*
* If this is a memory resource, unmap it.
*/
if (type == SYS_RES_MEMORY) {
pmap_unmapdev((vm_offset_t)rman_get_virtual(r),
rman_get_size(r));
}
return (rman_deactivate_resource(r));
}
static int
nexus_release_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
if (rman_get_flags(r) & RF_ACTIVE) {
int error = bus_deactivate_resource(child, type, rid, r);
if (error)
return error;
}
return (rman_release_resource(r));
}
/*
* Currently this uses the really grody interface from kern/kern_intr.c
* (which really doesn't belong in kern/anything.c). Eventually, all of
* the code in kern_intr.c and machdep_intr.c should get moved here, since
* this is going to be the official interface.
*/
static int
nexus_setup_intr(device_t bus, device_t child, struct resource *irq,
int flags, driver_filter_t filter, void (*ihand)(void *),
void *arg, void **cookiep)
{
int error;
/* somebody tried to setup an irq that failed to allocate! */
if (irq == NULL)
panic("nexus_setup_intr: NULL irq resource!");
*cookiep = 0;
if ((rman_get_flags(irq) & RF_SHAREABLE) == 0)
flags |= INTR_EXCL;
/*
* We depend here on rman_activate_resource() being idempotent.
*/
error = rman_activate_resource(irq);
if (error)
return (error);
error = intr_add_handler(device_get_nameunit(child),
rman_get_start(irq), filter, ihand, arg, flags, cookiep);
return (error);
}
static int
nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih)
{
return (intr_remove_handler(ih));
}
#ifdef SMP
static int
nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu)
{
return (intr_bind(rman_get_start(irq), cpu));
}
#endif
static int
nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
enum intr_polarity pol)
{
return (intr_config_intr(irq, trig, pol));
}
static int
nexus_describe_intr(device_t dev, device_t child, struct resource *irq,
void *cookie, const char *descr)
{
return (intr_describe(rman_get_start(irq), cookie, descr));
}
static struct resource_list *
nexus_get_reslist(device_t dev, device_t child)
{
struct nexus_device *ndev = DEVTONX(child);
return (&ndev->nx_resources);
}
static int
nexus_set_resource(device_t dev, device_t child, int type, int rid, u_long start, u_long count)
{
struct nexus_device *ndev = DEVTONX(child);
struct resource_list *rl = &ndev->nx_resources;
/* XXX this should return a success/failure indicator */
resource_list_add(rl, type, rid, start, start + count - 1, count);
return(0);
}
static int
nexus_get_resource(device_t dev, device_t child, int type, int rid, u_long *startp, u_long *countp)
{
struct nexus_device *ndev = DEVTONX(child);
struct resource_list *rl = &ndev->nx_resources;
struct resource_list_entry *rle;
rle = resource_list_find(rl, type, rid);
if (!rle)
return(ENOENT);
if (startp)
*startp = rle->start;
if (countp)
*countp = rle->count;
return(0);
}
static void
nexus_delete_resource(device_t dev, device_t child, int type, int rid)
{
struct nexus_device *ndev = DEVTONX(child);
struct resource_list *rl = &ndev->nx_resources;
resource_list_delete(rl, type, rid);
}
/* Called from the MSI code to add new IRQs to the IRQ rman. */
void
nexus_add_irq(u_long irq)
{
if (rman_manage_region(&irq_rman, irq, irq) != 0)
panic("%s: failed", __func__);
}
static int
nexus_alloc_msix(device_t pcib, device_t dev, int *irq)
{
return (msix_alloc(dev, irq));
}
static int
nexus_release_msix(device_t pcib, device_t dev, int irq)
{
return (msix_release(irq));
}
static int
nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs)
{
return (msi_alloc(dev, count, maxcount, irqs));
}
static int
nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs)
{
return (msi_release(irqs, count));
}
static int
nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data)
{
return (msi_map(irq, addr, data));
}
/* Placeholder for system RAM. */
static void
ram_identify(driver_t *driver, device_t parent)
{
if (resource_disabled("ram", 0))
return;
if (BUS_ADD_CHILD(parent, 0, "ram", 0) == NULL)
panic("ram_identify");
}
static int
ram_probe(device_t dev)
{
device_quiet(dev);
device_set_desc(dev, "System RAM");
return (0);
}
static int
ram_attach(device_t dev)
{
struct bios_smap *smapbase, *smap, *smapend;
struct resource *res;
caddr_t kmdp;
uint32_t smapsize;
int error, rid;
/* Retrieve the system memory map from the loader. */
kmdp = preload_search_by_type("elf kernel");
if (kmdp == NULL)
kmdp = preload_search_by_type("elf64 kernel");
smapbase = (struct bios_smap *)preload_search_info(kmdp,
MODINFO_METADATA | MODINFOMD_SMAP);
smapsize = *((u_int32_t *)smapbase - 1);
smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
rid = 0;
for (smap = smapbase; smap < smapend; smap++) {
if (smap->type != SMAP_TYPE_MEMORY || smap->length == 0)
continue;
error = bus_set_resource(dev, SYS_RES_MEMORY, rid, smap->base,
smap->length);
if (error)
panic("ram_attach: resource %d failed set with %d", rid,
error);
res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0);
if (res == NULL)
panic("ram_attach: resource %d failed to attach", rid);
rid++;
}
return (0);
}
static device_method_t ram_methods[] = {
/* Device interface */
DEVMETHOD(device_identify, ram_identify),
DEVMETHOD(device_probe, ram_probe),
DEVMETHOD(device_attach, ram_attach),
{ 0, 0 }
};
static driver_t ram_driver = {
"ram",
ram_methods,
1, /* no softc */
};
static devclass_t ram_devclass;
DRIVER_MODULE(ram, nexus, ram_driver, ram_devclass, 0, 0);
#ifdef DEV_ISA
/*
* Placeholder which claims PnP 'devices' which describe system
* resources.
*/
static struct isa_pnp_id sysresource_ids[] = {
{ 0x010cd041 /* PNP0c01 */, "System Memory" },
{ 0x020cd041 /* PNP0c02 */, "System Resource" },
{ 0 }
};
static int
sysresource_probe(device_t dev)
{
int result;
if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, sysresource_ids)) <= 0) {
device_quiet(dev);
}
return(result);
}
static int
sysresource_attach(device_t dev)
{
return(0);
}
static device_method_t sysresource_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sysresource_probe),
DEVMETHOD(device_attach, sysresource_attach),
DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
{ 0, 0 }
};
static driver_t sysresource_driver = {
"sysresource",
sysresource_methods,
1, /* no softc */
};
static devclass_t sysresource_devclass;
DRIVER_MODULE(sysresource, isa, sysresource_driver, sysresource_devclass, 0, 0);
#endif /* DEV_ISA */

View file

@ -180,14 +180,19 @@ static vm_paddr_t dmaplimit;
vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
pt_entry_t pg_nx;
static int pat_works = 0; /* Is page attribute table sane? */
SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
static int pat_works = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
"Is page attribute table fully functional?");
static int pg_ps_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
"Are large page mappings enabled?");
#define PAT_INDEX_SIZE 8
static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
static u_int64_t KPTphys; /* phys addr of kernel level 1 */
static u_int64_t KPDphys; /* phys addr of kernel level 2 */
u_int64_t KPDPphys; /* phys addr of kernel level 3 */
@ -447,6 +452,8 @@ allocpages(vm_paddr_t *firstaddr, int n)
return (ret);
}
CTASSERT(powerof2(NDMPML4E));
static void
create_pagetables(vm_paddr_t *firstaddr)
{
@ -462,7 +469,7 @@ create_pagetables(vm_paddr_t *firstaddr)
if (ndmpdp < 4) /* Minimum 4GB of dirmap */
ndmpdp = 4;
DMPDPphys = allocpages(firstaddr, NDMPML4E);
if (TRUE || (amd_feature & AMDID_PAGE1GB) == 0)
if ((amd_feature & AMDID_PAGE1GB) == 0)
DMPDphys = allocpages(firstaddr, ndmpdp);
dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
@ -494,11 +501,16 @@ create_pagetables(vm_paddr_t *firstaddr)
((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
}
/* Now set up the direct map space using either 2MB or 1GB pages */
/* Preset PG_M and PG_A because demotion expects it */
if (TRUE || (amd_feature & AMDID_PAGE1GB) == 0) {
/*
* Now, set up the direct map region using either 2MB or 1GB pages.
* Later, if pmap_mapdev{_attr}() uses the direct map for non-write-
* back memory, pmap_change_attr() will demote any 2MB or 1GB page
* mappings that are partially used.
*/
if ((amd_feature & AMDID_PAGE1GB) == 0) {
for (i = 0; i < NPDEPG * ndmpdp; i++) {
((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
/* Preset PG_M and PG_A because demotion expects it. */
((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS |
PG_G | PG_M | PG_A;
}
@ -512,6 +524,7 @@ create_pagetables(vm_paddr_t *firstaddr)
for (i = 0; i < ndmpdp; i++) {
((pdp_entry_t *)DMPDPphys)[i] =
(vm_paddr_t)i << PDPSHIFT;
/* Preset PG_M and PG_A because demotion expects it. */
((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_PS |
PG_G | PG_M | PG_A;
}
@ -521,9 +534,12 @@ create_pagetables(vm_paddr_t *firstaddr)
((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
/* Connect the Direct Map slot up to the PML4 */
((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;
((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U;
/* Connect the Direct Map slot(s) up to the PML4. */
for (i = 0; i < NDMPML4E; i++) {
((pdp_entry_t *)KPML4phys)[DMPML4I + i] = DMPDPphys +
(i << PAGE_SHIFT);
((pdp_entry_t *)KPML4phys)[DMPML4I + i] |= PG_RW | PG_V | PG_U;
}
/* Connect the KVA slot up to the PML4 */
((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
@ -602,31 +618,24 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
void
pmap_init_pat(void)
{
int pat_table[PAT_INDEX_SIZE];
uint64_t pat_msr;
char *sysenv;
static int pat_tested = 0;
u_long cr0, cr4;
int i;
/* Bail if this CPU doesn't implement PAT. */
if (!(cpu_feature & CPUID_PAT))
if ((cpu_feature & CPUID_PAT) == 0)
panic("no PAT??");
/*
* Some Apple Macs based on nVidia chipsets cannot enter ACPI mode
* via SMI# when we use upper 4 PAT entries for unknown reason.
*/
if (!pat_tested) {
pat_works = 1;
sysenv = getenv("smbios.system.product");
if (sysenv != NULL) {
if (strncmp(sysenv, "MacBook5,1", 10) == 0 ||
strncmp(sysenv, "MacBookPro5,5", 13) == 0 ||
strncmp(sysenv, "Macmini3,1", 10) == 0 ||
strncmp(sysenv, "iMac9,1", 7) == 0)
pat_works = 0;
freeenv(sysenv);
}
pat_tested = 1;
}
/* Set default PAT index table. */
for (i = 0; i < PAT_INDEX_SIZE; i++)
pat_table[i] = -1;
pat_table[PAT_WRITE_BACK] = 0;
pat_table[PAT_WRITE_THROUGH] = 1;
pat_table[PAT_UNCACHEABLE] = 3;
pat_table[PAT_WRITE_COMBINING] = 3;
pat_table[PAT_WRITE_PROTECTED] = 3;
pat_table[PAT_UNCACHED] = 3;
/* Initialize default PAT entries. */
pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
@ -641,20 +650,48 @@ pmap_init_pat(void)
if (pat_works) {
/*
* Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
* Program 4 and 5 as WP and WC.
* Leave 6 and 7 as UC- and UC.
* Program 5 and 6 as WP and WC.
* Leave 4 and 7 as WB and UC.
*/
pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
PAT_VALUE(5, PAT_WRITE_COMBINING);
pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
PAT_VALUE(6, PAT_WRITE_COMBINING);
pat_table[PAT_UNCACHED] = 2;
pat_table[PAT_WRITE_PROTECTED] = 5;
pat_table[PAT_WRITE_COMBINING] = 6;
} else {
/*
* Just replace PAT Index 2 with WC instead of UC-.
*/
pat_msr &= ~PAT_MASK(2);
pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
pat_table[PAT_WRITE_COMBINING] = 2;
}
/* Disable PGE. */
cr4 = rcr4();
load_cr4(cr4 & ~CR4_PGE);
/* Disable caches (CD = 1, NW = 0). */
cr0 = rcr0();
load_cr0((cr0 & ~CR0_NW) | CR0_CD);
/* Flushes caches and TLBs. */
wbinvd();
invltlb();
/* Update PAT and index table. */
wrmsr(MSR_PAT, pat_msr);
for (i = 0; i < PAT_INDEX_SIZE; i++)
pat_index[i] = pat_table[i];
/* Flush caches and TLBs again. */
wbinvd();
invltlb();
/* Restore caches and PGE. */
load_cr0(cr0);
load_cr4(cr4);
}
/*
@ -805,63 +842,24 @@ SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
static int
pmap_cache_bits(int mode, boolean_t is_pde)
{
int pat_flag, pat_index, cache_bits;
int cache_bits, pat_flag, pat_idx;
if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
panic("Unknown caching mode %d\n", mode);
/* The PAT bit is different for PTE's and PDE's. */
pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
/* Map the caching mode to a PAT index. */
if (pat_works) {
switch (mode) {
case PAT_UNCACHEABLE:
pat_index = 3;
break;
case PAT_WRITE_THROUGH:
pat_index = 1;
break;
case PAT_WRITE_BACK:
pat_index = 0;
break;
case PAT_UNCACHED:
pat_index = 2;
break;
case PAT_WRITE_COMBINING:
pat_index = 5;
break;
case PAT_WRITE_PROTECTED:
pat_index = 4;
break;
default:
panic("Unknown caching mode %d\n", mode);
}
} else {
switch (mode) {
case PAT_UNCACHED:
case PAT_UNCACHEABLE:
case PAT_WRITE_PROTECTED:
pat_index = 3;
break;
case PAT_WRITE_THROUGH:
pat_index = 1;
break;
case PAT_WRITE_BACK:
pat_index = 0;
break;
case PAT_WRITE_COMBINING:
pat_index = 2;
break;
default:
panic("Unknown caching mode %d\n", mode);
}
}
pat_idx = pat_index[mode];
/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
cache_bits = 0;
if (pat_index & 0x4)
if (pat_idx & 0x4)
cache_bits |= pat_flag;
if (pat_index & 0x2)
if (pat_idx & 0x2)
cache_bits |= PG_NC_PCD;
if (pat_index & 0x1)
if (pat_idx & 0x1)
cache_bits |= PG_NC_PWT;
return (cache_bits);
}
@ -1596,6 +1594,7 @@ pmap_pinit(pmap_t pmap)
{
vm_page_t pml4pg;
static vm_pindex_t color;
int i;
PMAP_LOCK_INIT(pmap);
@ -1613,7 +1612,10 @@ pmap_pinit(pmap_t pmap)
/* Wire in kernel global address entries. */
pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U;
for (i = 0; i < NDMPML4E; i++) {
pmap->pm_pml4[DMPML4I + i] = (DMPDPphys + (i << PAGE_SHIFT)) |
PG_RW | PG_V | PG_U;
}
/* install self-referential address mapping entry(s) */
pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
@ -1862,6 +1864,7 @@ void
pmap_release(pmap_t pmap)
{
vm_page_t m;
int i;
KASSERT(pmap->pm_stats.resident_count == 0,
("pmap_release: pmap resident count %ld != 0",
@ -1872,7 +1875,8 @@ pmap_release(pmap_t pmap)
m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
pmap->pm_pml4[KPML4I] = 0; /* KVA */
pmap->pm_pml4[DMPML4I] = 0; /* Direct Map */
for (i = 0; i < NDMPML4E; i++) /* Direct Map */
pmap->pm_pml4[DMPML4I + i] = 0;
pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
m->wire_count--;
@ -4947,6 +4951,54 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
return (error);
}
/*
* Demotes any mapping within the direct map region that covers more than the
* specified range of physical addresses. This range's size must be a power
* of two and its starting address must be a multiple of its size. Since the
* demotion does not change any attributes of the mapping, a TLB invalidation
* is not mandatory. The caller may, however, request a TLB invalidation.
*/
void
pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
{
pdp_entry_t *pdpe;
pd_entry_t *pde;
vm_offset_t va;
boolean_t changed;
if (len == 0)
return;
KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
KASSERT((base & (len - 1)) == 0,
("pmap_demote_DMAP: base is not a multiple of len"));
if (len < NBPDP && base < dmaplimit) {
va = PHYS_TO_DMAP(base);
changed = FALSE;
PMAP_LOCK(kernel_pmap);
pdpe = pmap_pdpe(kernel_pmap, va);
if ((*pdpe & PG_V) == 0)
panic("pmap_demote_DMAP: invalid PDPE");
if ((*pdpe & PG_PS) != 0) {
if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
panic("pmap_demote_DMAP: PDPE failed");
changed = TRUE;
}
if (len < NBPDR) {
pde = pmap_pdpe_to_pde(pdpe, va);
if ((*pde & PG_V) == 0)
panic("pmap_demote_DMAP: invalid PDE");
if ((*pde & PG_PS) != 0) {
if (!pmap_demote_pde(kernel_pmap, pde, va))
panic("pmap_demote_DMAP: PDE failed");
changed = TRUE;
}
}
if (changed && invalidate)
pmap_invalidate_page(kernel_pmap, va);
PMAP_UNLOCK(kernel_pmap);
}
}
/*
* perform the pmap work for mincore
*/

View file

@ -699,25 +699,6 @@ ENTRY(longjmp)
ret
END(longjmp)
/*
* Support for BB-profiling (gcc -a). The kernbb program will extract
* the data from the kernel.
*/
.data
ALIGN_DATA
.globl bbhead
bbhead:
.quad 0
.text
NON_GPROF_ENTRY(__bb_init_func)
movq $1,(%rdi)
movq bbhead,%rax
movq %rax,32(%rdi)
movq %rdi,bbhead
NON_GPROF_RET
/*
* Support for reading MSRs in the safe manner.
*/

View file

@ -83,7 +83,7 @@ __FBSDID("$FreeBSD$");
#include <machine/cpu.h>
#include <machine/intr_machdep.h>
#include <machine/mca.h>
#include <x86/mca.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
#ifdef SMP
@ -415,9 +415,8 @@ trap(struct trapframe *frame)
* This check also covers the images
* without the ABI-tag ELF note.
*/
if (SV_CURPROC_ABI() ==
SV_ABI_FREEBSD &&
p->p_osrel >= 700004) {
if (SV_CURPROC_ABI() == SV_ABI_FREEBSD
&& p->p_osrel >= P_OSREL_SIGSEGV) {
i = SIGSEGV;
ucode = SEGV_ACCERR;
} else {

View file

@ -146,6 +146,9 @@ tsc_levels_changed(void *arg, int unit)
int count, error;
uint64_t max_freq;
if (tsc_is_invariant)
return;
/* Only use values from the first CPU, assuming all are equal. */
if (unit != 0)
return;

View file

@ -145,13 +145,18 @@ set_regs32(struct thread *td, struct reg32 *regs)
int
fill_fpregs32(struct thread *td, struct fpreg32 *regs)
{
struct save87 *sv_87 = (struct save87 *)regs;
struct env87 *penv_87 = &sv_87->sv_env;
struct savefpu *sv_fpu = &td->td_pcb->pcb_user_save;
struct envxmm *penv_xmm = &sv_fpu->sv_env;
struct savefpu *sv_fpu;
struct save87 *sv_87;
struct env87 *penv_87;
struct envxmm *penv_xmm;
int i;
bzero(regs, sizeof(*regs));
sv_87 = (struct save87 *)regs;
penv_87 = &sv_87->sv_env;
fpugetregs(td);
sv_fpu = &td->td_pcb->pcb_user_save;
penv_xmm = &sv_fpu->sv_env;
/* FPU control/status */
penv_87->en_cw = penv_xmm->en_cw;
@ -200,6 +205,7 @@ set_fpregs32(struct thread *td, struct fpreg32 *regs)
sv_fpu->sv_fp[i].fp_acc = sv_87->sv_ac[i];
for (i = 8; i < 16; ++i)
bzero(&sv_fpu->sv_fp[i].fp_acc, sizeof(sv_fpu->sv_fp[i].fp_acc));
fpuuserinited(td);
return (0);
}

View file

@ -99,8 +99,9 @@ ia32_get_fpcontext(struct thread *td, struct ia32_mcontext *mcp)
* 64bit instruction and data pointers. Ignore the difference
* for now, it should be irrelevant for most applications.
*/
mcp->mc_ownedfp = fpugetuserregs(td,
(struct savefpu *)&mcp->mc_fpstate);
mcp->mc_ownedfp = fpugetregs(td);
bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
sizeof(mcp->mc_fpstate));
mcp->mc_fpformat = fpuformat();
}
@ -117,7 +118,7 @@ ia32_set_fpcontext(struct thread *td, const struct ia32_mcontext *mcp)
fpstate_drop(td);
else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
fpusetuserregs(td, (struct savefpu *)&mcp->mc_fpstate);
fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate);
} else
return (EINVAL);
return (0);

View file

@ -1,53 +1,6 @@
/*-
* Copyright (c) 2002 David E. O'Brien. All rights reserved.
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department and Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)param.h 8.1 (Berkeley) 6/10/93
* $FreeBSD$
* This file is in the public domain.
*/
/* $FreeBSD$ */
#ifndef _AMD64_INCLUDE__ALIGN_H_
#define _AMD64_INCLUDE__ALIGN_H_
/*
* Round p (pointer or byte index) up to a correctly-aligned value
* for all data types (int, long, ...). The result is u_long and
* must be cast to any desired pointer type.
*/
#define _ALIGNBYTES (sizeof(long) - 1)
#define _ALIGN(p) (((u_long)(p) + _ALIGNBYTES) &~ _ALIGNBYTES)
#endif /* !_AMD64_INCLUDE__ALIGN_H_ */
#include <x86/_align.h>

View file

@ -1,264 +1,6 @@
/*-
* APM (Advanced Power Management) BIOS Device Driver
*
* Copyright (c) 1994-1995 by HOSOKAWA, Tatsumi <hosokawa@mt.cs.keio.ac.jp>
*
* This software may be used, modified, copied, and distributed, in
* both source and binary form provided that the above copyright and
* these terms are retained. Under no circumstances is the author
* responsible for the proper functioning of this software, nor does
* the author assume any responsibility for damages incurred with its
* use.
*
* Aug, 1994 Implemented on FreeBSD 1.1.5.1R (Toshiba AVS001WD)
*
* $FreeBSD$
* This file is in the public domain.
*/
/* $FreeBSD$ */
#ifndef _MACHINE_APM_BIOS_H_
#define _MACHINE_APM_BIOS_H_
#ifndef _KERNEL
#include <sys/types.h>
#endif
#include <sys/ioccom.h>
/* BIOS id */
#define APM_BIOS 0x53
#define APM_INT 0x15
/* APM flags */
#define APM_16BIT_SUPPORT 0x01
#define APM_32BIT_SUPPORT 0x02
#define APM_CPUIDLE_SLOW 0x04
#define APM_DISABLED 0x08
#define APM_DISENGAGED 0x10
/* APM initializer physical address */
#define APM_OURADDR 0x00080000
/* APM functions */
#define APM_INSTCHECK 0x00
#define APM_REALCONNECT 0x01
#define APM_PROT16CONNECT 0x02
#define APM_PROT32CONNECT 0x03
#define APM_DISCONNECT 0x04
#define APM_CPUIDLE 0x05
#define APM_CPUBUSY 0x06
#define APM_SETPWSTATE 0x07
#define APM_ENABLEDISABLEPM 0x08
#define APM_RESTOREDEFAULT 0x09
#define APM_GETPWSTATUS 0x0a
#define APM_GETPMEVENT 0x0b
#define APM_GETPWSTATE 0x0c
#define APM_ENABLEDISABLEDPM 0x0d
#define APM_DRVVERSION 0x0e
#define APM_ENGAGEDISENGAGEPM 0x0f
#define APM_GETCAPABILITIES 0x10
#define APM_RESUMETIMER 0x11
#define APM_RESUMEONRING 0x12
#define APM_TIMERREQUESTS 0x13
#define APM_OEMFUNC 0x80
/* error code */
#define APME_OK 0x00
#define APME_PMDISABLED 0x01
#define APME_REALESTABLISHED 0x02
#define APME_NOTCONNECTED 0x03
#define APME_PROT16ESTABLISHED 0x05
#define APME_PROT16NOTSUPPORTED 0x06
#define APME_PROT32ESTABLISHED 0x07
#define APME_PROT32NOTDUPPORTED 0x08
#define APME_UNKNOWNDEVICEID 0x09
#define APME_OUTOFRANGE 0x0a
#define APME_NOTENGAGED 0x0b
#define APME_CANTENTERSTATE 0x60
#define APME_NOPMEVENT 0x80
#define APME_NOAPMPRESENT 0x86
/* device code */
#define PMDV_APMBIOS 0x0000
#define PMDV_ALLDEV 0x0001
#define PMDV_DISP0 0x0100
#define PMDV_DISP1 0x0101
#define PMDV_DISPALL 0x01ff
#define PMDV_2NDSTORAGE0 0x0200
#define PMDV_2NDSTORAGE1 0x0201
#define PMDV_2NDSTORAGE2 0x0202
#define PMDV_2NDSTORAGE3 0x0203
#define PMDV_PARALLEL0 0x0300
#define PMDV_PARALLEL1 0x0301
#define PMDV_SERIAL0 0x0400
#define PMDV_SERIAL1 0x0401
#define PMDV_SERIAL2 0x0402
#define PMDV_SERIAL3 0x0403
#define PMDV_SERIAL4 0x0404
#define PMDV_SERIAL5 0x0405
#define PMDV_SERIAL6 0x0406
#define PMDV_SERIAL7 0x0407
#define PMDV_NET0 0x0500
#define PMDV_NET1 0x0501
#define PMDV_NET2 0x0502
#define PMDV_NET3 0x0503
#define PMDV_PCMCIA0 0x0600
#define PMDV_PCMCIA1 0x0601
#define PMDV_PCMCIA2 0x0602
#define PMDV_PCMCIA3 0x0603
/* 0x0700 - 0x7fff Reserved */
#define PMDV_BATT_BASE 0x8000
#define PMDV_BATT0 0x8001
#define PMDV_BATT1 0x8002
#define PMDV_BATT_ALL 0x80ff
/* 0x8100 - 0xdfff Reserved */
/* 0xe000 - 0xefff OEM-defined power device IDs */
/* 0xf000 - 0xffff Reserved */
/* Power state */
#define PMST_APMENABLED 0x0000
#define PMST_STANDBY 0x0001
#define PMST_SUSPEND 0x0002
#define PMST_OFF 0x0003
#define PMST_LASTREQNOTIFY 0x0004
#define PMST_LASTREQREJECT 0x0005
/* 0x0006 - 0x001f Reserved system states */
/* 0x0020 - 0x003f OEM-defined system states */
/* 0x0040 - 0x007f OEM-defined device states */
/* 0x0080 - 0xffff Reserved device states */
#if !defined(ASSEMBLER) && !defined(INITIALIZER)
/* C definitions */
struct apmhook {
struct apmhook *ah_next;
int (*ah_fun)(void *ah_arg);
void *ah_arg;
const char *ah_name;
int ah_order;
};
#define APM_HOOK_NONE (-1)
#define APM_HOOK_SUSPEND 0
#define APM_HOOK_RESUME 1
#define NAPM_HOOK 2
#ifdef _KERNEL
void apm_suspend(int state);
struct apmhook *apm_hook_establish (int apmh, struct apmhook *);
void apm_hook_disestablish (int apmh, struct apmhook *);
void apm_cpu_idle(void);
void apm_cpu_busy(void);
#endif
#endif /* !ASSEMBLER && !INITIALIZER */
#define APM_MIN_ORDER 0x00
#define APM_MID_ORDER 0x80
#define APM_MAX_ORDER 0xff
/* power management event code */
#define PMEV_NOEVENT 0x0000
#define PMEV_STANDBYREQ 0x0001
#define PMEV_SUSPENDREQ 0x0002
#define PMEV_NORMRESUME 0x0003
#define PMEV_CRITRESUME 0x0004
#define PMEV_BATTERYLOW 0x0005
#define PMEV_POWERSTATECHANGE 0x0006
#define PMEV_UPDATETIME 0x0007
#define PMEV_CRITSUSPEND 0x0008
#define PMEV_USERSTANDBYREQ 0x0009
#define PMEV_USERSUSPENDREQ 0x000a
#define PMEV_STANDBYRESUME 0x000b
#define PMEV_CAPABILITIESCHANGE 0x000c
/* 0x000d - 0x00ff Reserved system events */
/* 0x0100 - 0x01ff Reserved device events */
/* 0x0200 - 0x02ff OEM-defined APM events */
/* 0x0300 - 0xffff Reserved */
#define PMEV_DEFAULT 0xffffffff /* used for customization */
#if !defined(ASSEMBLER) && !defined(INITIALIZER)
/*
* Old apm_info structure, returned by the APMIO_GETINFO_OLD ioctl. This
* is for backward compatibility with old executables.
*/
typedef struct apm_info_old {
u_int ai_major; /* APM major version */
u_int ai_minor; /* APM minor version */
u_int ai_acline; /* AC line status */
u_int ai_batt_stat; /* Battery status */
u_int ai_batt_life; /* Remaining battery life */
u_int ai_status; /* Status of APM support (enabled/disabled) */
} *apm_info_old_t;
/*
* Structure returned by the APMIO_GETINFO ioctl.
*
* In the comments below, the parenthesized numbers indicate the minimum
* value of ai_infoversion for which each field is valid.
*/
typedef struct apm_info {
u_int ai_infoversion; /* Indicates which fields are valid */
u_int ai_major; /* APM major version (0) */
u_int ai_minor; /* APM minor version (0) */
u_int ai_acline; /* AC line status (0) */
u_int ai_batt_stat; /* Battery status (0) */
u_int ai_batt_life; /* Remaining battery life in percent (0) */
int ai_batt_time; /* Remaining battery time in seconds (0) */
u_int ai_status; /* True if enabled (0) */
u_int ai_batteries; /* Number of batteries (1) */
u_int ai_capabilities;/* APM Capabilities (1) */
u_int ai_spare[6]; /* For future expansion */
} *apm_info_t;
/* Battery flag */
#define APM_BATT_HIGH 0x01
#define APM_BATT_LOW 0x02
#define APM_BATT_CRITICAL 0x04
#define APM_BATT_CHARGING 0x08
#define APM_BATT_NOT_PRESENT 0x10
#define APM_BATT_NO_SYSTEM 0x80
typedef struct apm_pwstatus {
u_int ap_device; /* Device code of battery */
u_int ap_acline; /* AC line status (0) */
u_int ap_batt_stat; /* Battery status (0) */
u_int ap_batt_flag; /* Battery flag (0) */
u_int ap_batt_life; /* Remaining battery life in percent (0) */
int ap_batt_time; /* Remaining battery time in seconds (0) */
} *apm_pwstatus_t;
struct apm_bios_arg {
uint32_t eax;
uint32_t ebx;
uint32_t ecx;
uint32_t edx;
uint32_t esi;
uint32_t edi;
};
struct apm_event_info {
u_int type;
u_int index;
u_int spare[8];
};
#define APMIO_SUSPEND _IO('P', 1)
#define APMIO_GETINFO_OLD _IOR('P', 2, struct apm_info_old)
#define APMIO_ENABLE _IO('P', 5)
#define APMIO_DISABLE _IO('P', 6)
#define APMIO_HALTCPU _IO('P', 7)
#define APMIO_NOTHALTCPU _IO('P', 8)
#define APMIO_DISPLAY _IOW('P', 9, int)
#define APMIO_BIOS _IOWR('P', 10, struct apm_bios_arg)
#define APMIO_GETINFO _IOR('P', 11, struct apm_info)
#define APMIO_STANDBY _IO('P', 12)
#define APMIO_GETPWSTATUS _IOWR('P', 13, struct apm_pwstatus)
/* for /dev/apmctl */
#define APMIO_NEXTEVENT _IOR('A', 100, struct apm_event_info)
#define APMIO_REJECTLASTREQ _IO('P', 101)
#endif /* !ASSEMBLER && !INITIALIZER */
#endif /* !_MACHINE_APM_BIOS_H_ */
#include <x86/apm_bios.h>

View file

@ -112,12 +112,11 @@ void fpudna(void);
void fpudrop(void);
void fpuexit(struct thread *td);
int fpuformat(void);
int fpugetregs(struct thread *td, struct savefpu *addr);
int fpugetuserregs(struct thread *td, struct savefpu *addr);
int fpugetregs(struct thread *td);
void fpuinit(void);
void fpusetregs(struct thread *td, struct savefpu *addr);
void fpusetuserregs(struct thread *td, struct savefpu *addr);
int fputrap(void);
void fpuuserinited(struct thread *td);
int fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx,
u_int flags);
int fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx);

View file

@ -30,14 +30,14 @@
#define _MACHINE_MINIDUMP_H_ 1
#define MINIDUMP_MAGIC "minidump FreeBSD/amd64"
#define MINIDUMP_VERSION 1
#define MINIDUMP_VERSION 2
struct minidumphdr {
char magic[24];
uint32_t version;
uint32_t msgbufsize;
uint32_t bitmapsize;
uint32_t ptesize;
uint32_t pmapsize;
uint64_t kernbase;
uint64_t dmapbase;
uint64_t dmapend;

View file

@ -1,35 +0,0 @@
/*-
* Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Berkeley Software Design Inc's name may not be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
* $FreeBSD$
*/
#ifndef _MACHINE_MUTEX_H_
#define _MACHINE_MUTEX_H_
#endif /* __MACHINE_MUTEX_H */

View file

@ -125,15 +125,21 @@
#define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
#define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */
#define NDMPML4E 1 /* number of dmap PML4 slots */
/*
* NDMPML4E is the number of PML4 entries that are used to implement the
* direct map. It must be a power of two.
*/
#define NDMPML4E 2
/*
* The *PDI values control the layout of virtual memory
* The *PDI values control the layout of virtual memory. The starting address
* of the direct map, which is controlled by DMPML4I, must be a multiple of
* its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.)
*/
#define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */
#define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */
#define DMPML4I (KPML4I-1) /* Next 512GB down for direct map */
#define DMPML4I rounddown(KPML4I - NDMPML4E, NDMPML4E) /* Below KVM */
#define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
@ -307,6 +313,7 @@ extern vm_offset_t virtual_end;
void pmap_bootstrap(vm_paddr_t *);
int pmap_change_attr(vm_offset_t, vm_size_t, int);
void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
void pmap_init_pat(void);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void *pmap_kenter_temporary(vm_paddr_t pa, int i);

View file

@ -77,17 +77,17 @@
#error this file needs to be ported to your compiler
#endif /* !__GNUCLIKE_ASM */
#else /* !GUPROF */
#define MCOUNT_DECL(s) u_long s;
#define MCOUNT_DECL(s) register_t s;
#ifdef SMP
extern int mcount_lock;
#define MCOUNT_ENTER(s) { s = read_rflags(); disable_intr(); \
#define MCOUNT_ENTER(s) { s = intr_disable(); \
while (!atomic_cmpset_acq_int(&mcount_lock, 0, 1)) \
/* nothing */ ; }
#define MCOUNT_EXIT(s) { atomic_store_rel_int(&mcount_lock, 0); \
write_rflags(s); }
intr_restore(s); }
#else
#define MCOUNT_ENTER(s) { s = read_rflags(); disable_intr(); }
#define MCOUNT_EXIT(s) (write_rflags(s))
#define MCOUNT_ENTER(s) { s = intr_disable(); }
#define MCOUNT_EXIT(s) (intr_restore(s))
#endif
#endif /* GUPROF */

View file

@ -135,6 +135,15 @@
#define CPUID2_POPCNT 0x00800000
#define CPUID2_AESNI 0x02000000
/*
* Important bits in the Thermal and Power Management flags
* CPUID.6 EAX and ECX.
*/
#define CPUTPM1_SENSOR 0x00000001
#define CPUTPM1_TURBO 0x00000002
#define CPUTPM1_ARAT 0x00000004
#define CPUTPM2_EFFREQ 0x00000001
/*
* Important bits in the AMD extended cpuid flags
*/
@ -206,6 +215,7 @@
#define AMDPM_100MHZ_STEPS 0x00000040
#define AMDPM_HW_PSTATE 0x00000080
#define AMDPM_TSC_INVARIANT 0x00000100
#define AMDPM_CPB 0x00000200
/*
* AMD extended function 8000_0008h ecx info
@ -239,6 +249,8 @@
#define MSR_BIOS_SIGN 0x08b
#define MSR_PERFCTR0 0x0c1
#define MSR_PERFCTR1 0x0c2
#define MSR_MPERF 0x0e7
#define MSR_APERF 0x0e8
#define MSR_IA32_EXT_CONFIG 0x0ee /* Undocumented. Core Solo/Duo only */
#define MSR_MTRRcap 0x0fe
#define MSR_BBL_CR_ADDR 0x116
@ -503,6 +515,7 @@
#define MSR_PERFCTR2 0xc0010006
#define MSR_PERFCTR3 0xc0010007
#define MSR_SYSCFG 0xc0010010
#define MSR_HWCR 0xc0010015
#define MSR_IORRBASE0 0xc0010016
#define MSR_IORRMASK0 0xc0010017
#define MSR_IORRBASE1 0xc0010018

View file

@ -163,8 +163,9 @@
* 0x0000000000000000 - 0x00007fffffffffff user map
* 0x0000800000000000 - 0xffff7fffffffffff does not exist (hole)
* 0xffff800000000000 - 0xffff804020100fff recursive page table (512GB slot)
* 0xffff804020101000 - 0xfffffeffffffffff unused
* 0xffffff0000000000 - 0xffffff7fffffffff 512GB direct map mappings
* 0xffff804020101000 - 0xfffffdffffffffff unused
* 0xfffffe0000000000 - 0xfffffeffffffffff 1TB direct map
* 0xffffff0000000000 - 0xffffff7fffffffff unused
* 0xffffff8000000000 - 0xffffffffffffffff 512GB kernel map
*
* Within the kernel map:
@ -176,7 +177,7 @@
#define VM_MIN_KERNEL_ADDRESS KVADDR(KPML4I, NPDPEPG-512, 0, 0)
#define DMAP_MIN_ADDRESS KVADDR(DMPML4I, 0, 0, 0)
#define DMAP_MAX_ADDRESS KVADDR(DMPML4I+1, 0, 0, 0)
#define DMAP_MAX_ADDRESS KVADDR(DMPML4I + NDMPML4E, 0, 0, 0)
#define KERNBASE KVADDR(KPML4I, KPDPI, 0, 0)

View file

@ -159,7 +159,7 @@ _startC(void)
#if defined(FLASHADDR) && defined(LOADERRAMADDR)
unsigned int pc;
__asm __volatile("adr %0, _start\n"
__asm __volatile("mov %0, pc\n"
: "=r" (pc));
if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) ||
(FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) {
@ -173,11 +173,13 @@ _startC(void)
*/
unsigned int target_addr;
unsigned int tmp_sp;
uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR
+ (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000;
target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR;
tmp_sp = target_addr + 0x100000 +
(unsigned int)&_end - (unsigned int)&_start;
memcpy((char *)target_addr, (char *)pc,
memcpy((char *)target_addr, (char *)src_addr,
(unsigned int)&_end - (unsigned int)&_start);
/* Temporary set the sp and jump to the new location. */
__asm __volatile(

View file

@ -493,11 +493,15 @@ void
spinlock_enter(void)
{
struct thread *td;
register_t cspr;
td = curthread;
if (td->td_md.md_spinlock_count == 0)
td->td_md.md_saved_cspr = disable_interrupts(I32_bit | F32_bit);
td->td_md.md_spinlock_count++;
if (td->td_md.md_spinlock_count == 0) {
cspr = disable_interrupts(I32_bit | F32_bit);
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_cspr = cspr;
} else
td->td_md.md_spinlock_count++;
critical_enter();
}
@ -505,12 +509,14 @@ void
spinlock_exit(void)
{
struct thread *td;
register_t cspr;
td = curthread;
critical_exit();
cspr = td->td_md.md_saved_cspr;
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0)
restore_interrupts(td->td_md.md_saved_cspr);
restore_interrupts(cspr);
}
/*

View file

@ -87,6 +87,10 @@ device ad7418 # AD7418 on I2C bus
device avila_led
device gpio
device gpioled
device avila_gpio # GPIO pins on J8
device ata
device atadisk # ATA disk drives
device avila_ata # Gateworks CF/IDE support

View file

@ -41,6 +41,9 @@ hint.ata_avila.0.at="ixp0"
# Front Panel LED
hint.led_avila.0.at="ixp0"
# GPIO pins
hint.gpio_avila.0.at="ixp0"
# Analog Devices AD7418 temperature sensor
hint.ad7418.0.at="iicbus0"
hint.ad7418.0.addr=0x50

View file

@ -7,3 +7,7 @@ hint.ds1672_rtc.0.addr=0xd0
# National Semiconductor LM75 temperature sensor sitting on the I2C bus
hint.lm75.0.at="iicbus0"
hint.lm75.0.addr=0x9e
# Atmel SPIflash sitting on the spibus
hint.at45d.0.at="spibus0"
hint.at45d.0.addr=0x00

View file

@ -90,6 +90,10 @@ device ad7418 # AD7418 on I2C bus
device cambria_fled # Font Panel LED on I2C bus
device cambria_led # 8-LED latch
device gpio
device gpioled
device cambria_gpio # GPIO pins on J11
device ata
device atadisk # ATA disk drives
device avila_ata # Gateworks CF/IDE support

View file

@ -54,6 +54,10 @@ hint.fled.0.addr=0x5a
# Octal LED Latch
hint.led_cambria.0.at="ixp0"
# GPIO pins
hint.gpio_cambria.0.at="iicbus0"
hint.gpio_cambria.0.addr=0x56
# Analog Devices AD7418 temperature sensor
hint.ad7418.0.at="iicbus0"
hint.ad7418.0.addr=0x50

View file

@ -70,7 +70,7 @@
*/
#if (defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
defined(CPU_IXP12X0) || defined(CPU_XSCALE_IXP425) || defined(CPU_FA526))
defined(CPU_IXP12X0) || defined(CPU_FA526))
#define ARM_ARCH_4 1
#else
#define ARM_ARCH_4 0
@ -79,7 +79,8 @@
#if (defined(CPU_ARM9E) || defined(CPU_ARM10) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_FA626TE))
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_FA626TE))
#define ARM_ARCH_5 1
#else
#define ARM_ARCH_5 0
@ -141,9 +142,9 @@
#define ARM_MMU_SA1 0
#endif
#if(defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_XSCALE_80219)) || defined(CPU_XSCALE_81342)
#if (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342))
#define ARM_MMU_XSCALE 1
#else
#define ARM_MMU_XSCALE 0
@ -162,7 +163,7 @@
*/
#if (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_80219)) || defined(CPU_XSCALE_81342)
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342))
#define ARM_XSCALE_PMU 1
#else
#define ARM_XSCALE_PMU 0

View file

@ -1,32 +0,0 @@
/*-
* Copyright (c) 2001 Jake Burkholder.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_MUTEX_H_
#define _MACHINE_MUTEX_H_
#endif /* !_MACHINE_MUTEX_H_ */

View file

@ -0,0 +1,360 @@
/*-
* Copyright (c) 2009, Oleksandr Tymoshenko <gonzo@FreeBSD.org>
* Copyright (c) 2009, Luiz Otavio O Souza.
* Copyright (c) 2010, Andrew Thompson <thompsa@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* GPIO driver for Gateworks Avilia
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/gpio.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <arm/xscale/ixp425/ixp425reg.h>
#include <arm/xscale/ixp425/ixp425var.h>
#include "gpio_if.h"
#define GPIO_SET_BITS(sc, reg, bits) \
GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, (reg)) | (bits))
#define GPIO_CLEAR_BITS(sc, reg, bits) \
GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, (reg)) & ~(bits))
struct avila_gpio_softc {
device_t sc_dev;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_gpio_ioh;
uint32_t sc_valid;
struct gpio_pin sc_pins[IXP4XX_GPIO_PINS];
};
struct avila_gpio_pin {
const char *name;
int pin;
int caps;
};
#define GPIO_PIN_IO (GPIO_PIN_INPUT|GPIO_PIN_OUTPUT)
static struct avila_gpio_pin avila_gpio_pins[] = {
{ "GPIO0", 0, GPIO_PIN_IO },
{ "GPIO1", 1, GPIO_PIN_IO },
{ "GPIO2", 2, GPIO_PIN_IO },
{ "GPIO3", 3, GPIO_PIN_IO },
{ "GPIO4", 4, GPIO_PIN_IO },
/*
* The following pins are connected to system devices and should not
* really be frobbed.
*/
#if 0
{ "SER_ENA", 5, GPIO_PIN_IO },
{ "I2C_SCL", 6, GPIO_PIN_IO },
{ "I2C_SDA", 7, GPIO_PIN_IO },
{ "PCI_INTD", 8, GPIO_PIN_IO },
{ "PCI_INTC", 9, GPIO_PIN_IO },
{ "PCI_INTB", 10, GPIO_PIN_IO },
{ "PCI_INTA", 11, GPIO_PIN_IO },
{ "ATA_INT", 12, GPIO_PIN_IO },
{ "PCI_RST", 13, GPIO_PIN_IO },
{ "PCI_CLK", 14, GPIO_PIN_OUTPUT },
{ "EX_CLK", 15, GPIO_PIN_OUTPUT },
#endif
};
#undef GPIO_PIN_IO
/*
* Helpers
*/
static void avila_gpio_pin_configure(struct avila_gpio_softc *sc,
struct gpio_pin *pin, uint32_t flags);
static int avila_gpio_pin_flags(struct avila_gpio_softc *sc, uint32_t pin);
/*
* Driver stuff
*/
static int avila_gpio_probe(device_t dev);
static int avila_gpio_attach(device_t dev);
static int avila_gpio_detach(device_t dev);
/*
* GPIO interface
*/
static int avila_gpio_pin_max(device_t dev, int *maxpin);
static int avila_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps);
static int avila_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t
*flags);
static int avila_gpio_pin_getname(device_t dev, uint32_t pin, char *name);
static int avila_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags);
static int avila_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value);
static int avila_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val);
static int avila_gpio_pin_toggle(device_t dev, uint32_t pin);
static int
avila_gpio_pin_flags(struct avila_gpio_softc *sc, uint32_t pin)
{
uint32_t v;
v = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR) & (1 << pin);
return (v ? GPIO_PIN_INPUT : GPIO_PIN_OUTPUT);
}
static void
avila_gpio_pin_configure(struct avila_gpio_softc *sc, struct gpio_pin *pin,
unsigned int flags)
{
uint32_t mask;
mask = 1 << pin->gp_pin;
/*
* Manage input/output
*/
if (flags & (GPIO_PIN_INPUT|GPIO_PIN_OUTPUT)) {
IXP4XX_GPIO_LOCK(sc);
pin->gp_flags &= ~(GPIO_PIN_INPUT|GPIO_PIN_OUTPUT);
if (flags & GPIO_PIN_OUTPUT) {
pin->gp_flags |= GPIO_PIN_OUTPUT;
GPIO_CLEAR_BITS(sc, IXP425_GPIO_GPOER, mask);
}
else {
pin->gp_flags |= GPIO_PIN_INPUT;
GPIO_SET_BITS(sc, IXP425_GPIO_GPOER, mask);
}
IXP4XX_GPIO_UNLOCK(sc);
}
}
static int
avila_gpio_pin_max(device_t dev, int *maxpin)
{
*maxpin = IXP4XX_GPIO_PINS - 1;
return (0);
}
static int
avila_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
{
struct avila_gpio_softc *sc = device_get_softc(dev);
if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & (1 << pin)))
return (EINVAL);
*caps = sc->sc_pins[pin].gp_caps;
return (0);
}
static int
avila_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
{
struct avila_gpio_softc *sc = device_get_softc(dev);
if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & (1 << pin)))
return (EINVAL);
IXP4XX_GPIO_LOCK(sc);
/* refresh since we do not own all the pins */
sc->sc_pins[pin].gp_flags = avila_gpio_pin_flags(sc, pin);
*flags = sc->sc_pins[pin].gp_flags;
IXP4XX_GPIO_UNLOCK(sc);
return (0);
}
static int
avila_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
{
struct avila_gpio_softc *sc = device_get_softc(dev);
if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & (1 << pin)))
return (EINVAL);
memcpy(name, sc->sc_pins[pin].gp_name, GPIOMAXNAME);
return (0);
}
static int
avila_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
{
struct avila_gpio_softc *sc = device_get_softc(dev);
uint32_t mask = 1 << pin;
if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & mask))
return (EINVAL);
/* Filter out unwanted flags */
if ((flags &= sc->sc_pins[pin].gp_caps) != flags)
return (EINVAL);
/* Can't mix input/output together */
if ((flags & (GPIO_PIN_INPUT|GPIO_PIN_OUTPUT)) ==
(GPIO_PIN_INPUT|GPIO_PIN_OUTPUT))
return (EINVAL);
avila_gpio_pin_configure(sc, &sc->sc_pins[pin], flags);
return (0);
}
static int
avila_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value)
{
struct avila_gpio_softc *sc = device_get_softc(dev);
uint32_t mask = 1 << pin;
if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & mask))
return (EINVAL);
IXP4XX_GPIO_LOCK(sc);
if (value)
GPIO_SET_BITS(sc, IXP425_GPIO_GPOUTR, mask);
else
GPIO_CLEAR_BITS(sc, IXP425_GPIO_GPOUTR, mask);
IXP4XX_GPIO_UNLOCK(sc);
return (0);
}
static int
avila_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val)
{
struct avila_gpio_softc *sc = device_get_softc(dev);
if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & (1 << pin)))
return (EINVAL);
IXP4XX_GPIO_LOCK(sc);
*val = (GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR) & (1 << pin)) ? 1 : 0;
IXP4XX_GPIO_UNLOCK(sc);
return (0);
}
static int
avila_gpio_pin_toggle(device_t dev, uint32_t pin)
{
struct avila_gpio_softc *sc = device_get_softc(dev);
uint32_t mask = 1 << pin;
int res;
if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & mask))
return (EINVAL);
IXP4XX_GPIO_LOCK(sc);
res = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR) & mask;
if (res)
GPIO_CLEAR_BITS(sc, IXP425_GPIO_GPOUTR, mask);
else
GPIO_SET_BITS(sc, IXP425_GPIO_GPOUTR, mask);
IXP4XX_GPIO_UNLOCK(sc);
return (0);
}
static int
avila_gpio_probe(device_t dev)
{
device_set_desc(dev, "Gateworks Avila GPIO driver");
return (0);
}
static int
avila_gpio_attach(device_t dev)
{
#define N(a) (sizeof(a) / sizeof(a[0]))
struct avila_gpio_softc *sc = device_get_softc(dev);
struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
int i;
sc->sc_dev = dev;
sc->sc_iot = sa->sc_iot;
sc->sc_gpio_ioh = sa->sc_gpio_ioh;
for (i = 0; i < N(avila_gpio_pins); i++) {
struct avila_gpio_pin *p = &avila_gpio_pins[i];
strncpy(sc->sc_pins[p->pin].gp_name, p->name, GPIOMAXNAME);
sc->sc_pins[p->pin].gp_pin = p->pin;
sc->sc_pins[p->pin].gp_caps = p->caps;
sc->sc_pins[p->pin].gp_flags = avila_gpio_pin_flags(sc, p->pin);
sc->sc_valid |= 1 << p->pin;
}
device_add_child(dev, "gpioc", device_get_unit(dev));
device_add_child(dev, "gpiobus", device_get_unit(dev));
return (bus_generic_attach(dev));
#undef N
}
static int
avila_gpio_detach(device_t dev)
{
bus_generic_detach(dev);
return(0);
}
static device_method_t gpio_avila_methods[] = {
DEVMETHOD(device_probe, avila_gpio_probe),
DEVMETHOD(device_attach, avila_gpio_attach),
DEVMETHOD(device_detach, avila_gpio_detach),
/* GPIO protocol */
DEVMETHOD(gpio_pin_max, avila_gpio_pin_max),
DEVMETHOD(gpio_pin_getname, avila_gpio_pin_getname),
DEVMETHOD(gpio_pin_getflags, avila_gpio_pin_getflags),
DEVMETHOD(gpio_pin_getcaps, avila_gpio_pin_getcaps),
DEVMETHOD(gpio_pin_setflags, avila_gpio_pin_setflags),
DEVMETHOD(gpio_pin_get, avila_gpio_pin_get),
DEVMETHOD(gpio_pin_set, avila_gpio_pin_set),
DEVMETHOD(gpio_pin_toggle, avila_gpio_pin_toggle),
{0, 0},
};
static driver_t gpio_avila_driver = {
"gpio_avila",
gpio_avila_methods,
sizeof(struct avila_gpio_softc),
};
static devclass_t gpio_avila_devclass;
DRIVER_MODULE(gpio_avila, ixp, gpio_avila_driver, gpio_avila_devclass, 0, 0);

View file

@ -52,12 +52,14 @@ led_func(void *arg, int onoff)
struct led_avila_softc *sc = arg;
uint32_t reg;
IXP4XX_GPIO_LOCK();
reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPOUTR);
if (onoff)
reg &= ~GPIO_LED_STATUS_BIT;
else
reg |= GPIO_LED_STATUS_BIT;
GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPOUTR, reg);
IXP4XX_GPIO_UNLOCK();
}
static int

View file

@ -0,0 +1,471 @@
/*-
* Copyright (c) 2010, Andrew Thompson <thompsa@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* GPIO driver for Gateworks Cambria
*
* Note:
* The Cambria PLD does not set the i2c ack bit after each write, if we used the
* regular iicbus interface it would abort the xfer after the address byte
* times out and not write our latch. To get around this we grab the iicbus and
* then do our own bit banging. This is a comprimise to changing all the iicbb
* device methods to allow a flag to be passed down and is similir to how Linux
* does it.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/gpio.h>
#include <arm/xscale/ixp425/ixp425reg.h>
#include <arm/xscale/ixp425/ixp425var.h>
#include <arm/xscale/ixp425/ixdp425reg.h>
#include <dev/iicbus/iiconf.h>
#include <dev/iicbus/iicbus.h>
#include "iicbb_if.h"
#include "gpio_if.h"
#define IIC_M_WR 0 /* write operation */
#define PLD_ADDR 0xac /* slave address */
#define I2C_DELAY 10
#define GPIO_CONF_CLR(sc, reg, mask) \
GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, reg) &~ (mask))
#define GPIO_CONF_SET(sc, reg, mask) \
GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, reg) | (mask))
#define GPIO_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define GPIO_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define GPIO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
#define GPIO_PINS 5
struct cambria_gpio_softc {
device_t sc_dev;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_gpio_ioh;
struct mtx sc_mtx;
struct gpio_pin sc_pins[GPIO_PINS];
uint8_t sc_latch;
};
struct cambria_gpio_pin {
const char *name;
int pin;
int flags;
};
extern struct ixp425_softc *ixp425_softc;
static struct cambria_gpio_pin cambria_gpio_pins[GPIO_PINS] = {
{ "GPIO0", 0, GPIO_PIN_OUTPUT },
{ "GPIO1", 1, GPIO_PIN_OUTPUT },
{ "GPIO2", 2, GPIO_PIN_OUTPUT },
{ "GPIO3", 3, GPIO_PIN_OUTPUT },
{ "GPIO4", 4, GPIO_PIN_OUTPUT },
};
/*
* Helpers
*/
static int cambria_gpio_read(struct cambria_gpio_softc *, uint32_t, unsigned int *);
static int cambria_gpio_write(struct cambria_gpio_softc *);
/*
* Driver stuff
*/
static int cambria_gpio_probe(device_t dev);
static int cambria_gpio_attach(device_t dev);
static int cambria_gpio_detach(device_t dev);
/*
* GPIO interface
*/
static int cambria_gpio_pin_max(device_t dev, int *maxpin);
static int cambria_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps);
static int cambria_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t
*flags);
static int cambria_gpio_pin_getname(device_t dev, uint32_t pin, char *name);
static int cambria_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags);
static int cambria_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value);
static int cambria_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val);
static int cambria_gpio_pin_toggle(device_t dev, uint32_t pin);
static int
i2c_getsda(struct cambria_gpio_softc *sc)
{
uint32_t reg;
IXP4XX_GPIO_LOCK();
GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT);
reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR);
IXP4XX_GPIO_UNLOCK();
return (reg & GPIO_I2C_SDA_BIT);
}
static void
i2c_setsda(struct cambria_gpio_softc *sc, int val)
{
IXP4XX_GPIO_LOCK();
GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, GPIO_I2C_SDA_BIT);
if (val)
GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT);
else
GPIO_CONF_CLR(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT);
IXP4XX_GPIO_UNLOCK();
DELAY(I2C_DELAY);
}
static void
i2c_setscl(struct cambria_gpio_softc *sc, int val)
{
IXP4XX_GPIO_LOCK();
GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, GPIO_I2C_SCL_BIT);
if (val)
GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT);
else
GPIO_CONF_CLR(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT);
IXP4XX_GPIO_UNLOCK();
DELAY(I2C_DELAY);
}
static void
i2c_sendstart(struct cambria_gpio_softc *sc)
{
i2c_setsda(sc, 1);
i2c_setscl(sc, 1);
i2c_setsda(sc, 0);
i2c_setscl(sc, 0);
}
static void
i2c_sendstop(struct cambria_gpio_softc *sc)
{
i2c_setscl(sc, 1);
i2c_setsda(sc, 1);
i2c_setscl(sc, 0);
i2c_setsda(sc, 0);
}
static void
i2c_sendbyte(struct cambria_gpio_softc *sc, u_char data)
{
int i;
for (i=7; i>=0; i--) {
i2c_setsda(sc, data & (1<<i));
i2c_setscl(sc, 1);
i2c_setscl(sc, 0);
}
i2c_setscl(sc, 1);
i2c_getsda(sc);
i2c_setscl(sc, 0);
}
static u_char
i2c_readbyte(struct cambria_gpio_softc *sc)
{
int i;
unsigned char data=0;
for (i=7; i>=0; i--)
{
i2c_setscl(sc, 1);
if (i2c_getsda(sc))
data |= (1<<i);
i2c_setscl(sc, 0);
}
return data;
}
static int
cambria_gpio_read(struct cambria_gpio_softc *sc, uint32_t pin, unsigned int *val)
{
device_t dev = sc->sc_dev;
int error;
error = iicbus_request_bus(device_get_parent(dev), dev,
IIC_DONTWAIT);
if (error)
return (error);
i2c_sendstart(sc);
i2c_sendbyte(sc, PLD_ADDR | LSB);
*val = (i2c_readbyte(sc) & (1 << pin)) != 0;
i2c_sendstop(sc);
iicbus_release_bus(device_get_parent(dev), dev);
return (0);
}
static int
cambria_gpio_write(struct cambria_gpio_softc *sc)
{
device_t dev = sc->sc_dev;
int error;
error = iicbus_request_bus(device_get_parent(dev), dev,
IIC_DONTWAIT);
if (error)
return (error);
i2c_sendstart(sc);
i2c_sendbyte(sc, PLD_ADDR & ~LSB);
i2c_sendbyte(sc, sc->sc_latch);
i2c_sendstop(sc);
iicbus_release_bus(device_get_parent(dev), dev);
return (0);
}
static int
cambria_gpio_pin_max(device_t dev, int *maxpin)
{
*maxpin = GPIO_PINS - 1;
return (0);
}
static int
cambria_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
if (pin >= GPIO_PINS)
return (EINVAL);
*caps = sc->sc_pins[pin].gp_caps;
return (0);
}
static int
cambria_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
if (pin >= GPIO_PINS)
return (EINVAL);
*flags = sc->sc_pins[pin].gp_flags;
return (0);
}
static int
cambria_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
if (pin >= GPIO_PINS)
return (EINVAL);
memcpy(name, sc->sc_pins[pin].gp_name, GPIOMAXNAME);
return (0);
}
static int
cambria_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
int error;
if (pin >= GPIO_PINS)
return (EINVAL);
/* Filter out unwanted flags */
if ((flags &= sc->sc_pins[pin].gp_caps) != flags)
return (EINVAL);
/* Can't mix input/output together */
if ((flags & (GPIO_PIN_INPUT|GPIO_PIN_OUTPUT)) ==
(GPIO_PIN_INPUT|GPIO_PIN_OUTPUT))
return (EINVAL);
GPIO_LOCK(sc);
sc->sc_pins[pin].gp_flags = flags;
sc->sc_latch |= (1 << pin);
error = cambria_gpio_write(sc);
GPIO_UNLOCK(sc);
return (error);
}
static int
cambria_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
int error;
if (pin >= GPIO_PINS || sc->sc_pins[pin].gp_flags != GPIO_PIN_OUTPUT)
return (EINVAL);
GPIO_LOCK(sc);
if (value)
sc->sc_latch |= (1 << pin);
else
sc->sc_latch &= ~(1 << pin);
error = cambria_gpio_write(sc);
GPIO_UNLOCK(sc);
return (error);
}
static int
cambria_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
int error = 0;
if (pin >= GPIO_PINS)
return (EINVAL);
GPIO_LOCK(sc);
if (sc->sc_pins[pin].gp_flags == GPIO_PIN_OUTPUT)
*val = (sc->sc_latch & (1 << pin)) ? 1 : 0;
else
error = cambria_gpio_read(sc, pin, val);
GPIO_UNLOCK(sc);
return (error);
}
static int
cambria_gpio_pin_toggle(device_t dev, uint32_t pin)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
int error;
if (pin >= GPIO_PINS || sc->sc_pins[pin].gp_flags != GPIO_PIN_OUTPUT)
return (EINVAL);
GPIO_LOCK(sc);
sc->sc_latch ^= (1 << pin);
error = cambria_gpio_write(sc);
GPIO_UNLOCK(sc);
return (error);
}
static int
cambria_gpio_probe(device_t dev)
{
device_set_desc(dev, "Gateworks Cambria GPIO driver");
return (0);
}
static int
cambria_gpio_attach(device_t dev)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
int pin;
sc->sc_dev = dev;
sc->sc_iot = ixp425_softc->sc_iot;
sc->sc_gpio_ioh = ixp425_softc->sc_gpio_ioh;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
for (pin = 0; pin < GPIO_PINS; pin++) {
struct cambria_gpio_pin *p = &cambria_gpio_pins[pin];
strncpy(sc->sc_pins[pin].gp_name, p->name, GPIOMAXNAME);
sc->sc_pins[pin].gp_pin = pin;
sc->sc_pins[pin].gp_caps = GPIO_PIN_INPUT|GPIO_PIN_OUTPUT;
sc->sc_pins[pin].gp_flags = 0;
cambria_gpio_pin_setflags(dev, pin, p->flags);
}
device_add_child(dev, "gpioc", device_get_unit(dev));
device_add_child(dev, "gpiobus", device_get_unit(dev));
return (bus_generic_attach(dev));
}
static int
cambria_gpio_detach(device_t dev)
{
struct cambria_gpio_softc *sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->sc_mtx), ("gpio mutex not initialized"));
bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return(0);
}
static device_method_t cambria_gpio_methods[] = {
DEVMETHOD(device_probe, cambria_gpio_probe),
DEVMETHOD(device_attach, cambria_gpio_attach),
DEVMETHOD(device_detach, cambria_gpio_detach),
/* GPIO protocol */
DEVMETHOD(gpio_pin_max, cambria_gpio_pin_max),
DEVMETHOD(gpio_pin_getname, cambria_gpio_pin_getname),
DEVMETHOD(gpio_pin_getflags, cambria_gpio_pin_getflags),
DEVMETHOD(gpio_pin_getcaps, cambria_gpio_pin_getcaps),
DEVMETHOD(gpio_pin_setflags, cambria_gpio_pin_setflags),
DEVMETHOD(gpio_pin_get, cambria_gpio_pin_get),
DEVMETHOD(gpio_pin_set, cambria_gpio_pin_set),
DEVMETHOD(gpio_pin_toggle, cambria_gpio_pin_toggle),
{0, 0},
};
static driver_t cambria_gpio_driver = {
"gpio_cambria",
cambria_gpio_methods,
sizeof(struct cambria_gpio_softc),
};
static devclass_t cambria_gpio_devclass;
extern devclass_t gpiobus_devclass, gpioc_devclass;
extern driver_t gpiobus_driver, gpioc_driver;
DRIVER_MODULE(gpio_cambria, iicbus, cambria_gpio_driver, cambria_gpio_devclass, 0, 0);
DRIVER_MODULE(gpiobus, gpio_cambria, gpiobus_driver, gpiobus_devclass, 0, 0);
DRIVER_MODULE(gpioc, gpio_cambria, gpioc_driver, gpioc_devclass, 0, 0);
MODULE_VERSION(gpio_cambria, 1);
MODULE_DEPEND(gpio_cambria, iicbus, 1, 1, 1);

View file

@ -2,7 +2,9 @@
arm/xscale/ixp425/avila_machdep.c standard
arm/xscale/ixp425/avila_ata.c optional avila_ata
arm/xscale/ixp425/avila_led.c optional avila_led
arm/xscale/ixp425/avila_gpio.c optional avila_gpio
arm/xscale/ixp425/cambria_exp_space.c standard
arm/xscale/ixp425/cambria_fled.c optional cambria_fled
arm/xscale/ixp425/cambria_led.c optional cambria_led
arm/xscale/ixp425/cambria_gpio.c optional cambria_gpio
arm/xscale/ixp425/ixdp425_pci.c optional pci

View file

@ -66,6 +66,8 @@ uint32_t intr_steer2 = 0;
struct ixp425_softc *ixp425_softc = NULL;
struct mtx ixp425_gpio_mtx;
static int ixp425_probe(device_t);
static void ixp425_identify(driver_t *, device_t);
static int ixp425_attach(device_t);
@ -164,6 +166,7 @@ ixp425_set_gpio(struct ixp425_softc *sc, int pin, int type)
{
uint32_t gpiotr = GPIO_CONF_READ_4(sc, GPIO_TYPE_REG(pin));
IXP4XX_GPIO_LOCK();
/* clear interrupt type */
GPIO_CONF_WRITE_4(sc, GPIO_TYPE_REG(pin),
gpiotr &~ GPIO_TYPE(pin, GPIO_TYPE_MASK));
@ -176,6 +179,7 @@ ixp425_set_gpio(struct ixp425_softc *sc, int pin, int type)
/* configure gpio line as an input */
GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPOER,
GPIO_CONF_READ_4(sc, IXP425_GPIO_GPOER) | (1<<pin));
IXP4XX_GPIO_UNLOCK();
}
static __inline void
@ -313,6 +317,7 @@ ixp425_attach(device_t dev)
}
arm_post_filter = ixp425_post_filter;
mtx_init(&ixp425_gpio_mtx, "gpio", NULL, MTX_DEF);
if (bus_space_map(sc->sc_iot, IXP425_GPIO_HWBASE, IXP425_GPIO_SIZE,
0, &sc->sc_gpio_ioh))
panic("%s: unable to map GPIO registers", __func__);

View file

@ -106,11 +106,11 @@ ixpiic_getscl(device_t dev)
struct ixpiic_softc *sc = ixpiic_sc;
uint32_t reg;
mtx_lock(&Giant);
IXP4XX_GPIO_LOCK();
GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT);
reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR);
mtx_unlock(&Giant);
IXP4XX_GPIO_UNLOCK();
return (reg & GPIO_I2C_SCL_BIT);
}
@ -120,11 +120,11 @@ ixpiic_getsda(device_t dev)
struct ixpiic_softc *sc = ixpiic_sc;
uint32_t reg;
mtx_lock(&Giant);
IXP4XX_GPIO_LOCK();
GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT);
reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR);
mtx_unlock(&Giant);
IXP4XX_GPIO_UNLOCK();
return (reg & GPIO_I2C_SDA_BIT);
}
@ -133,13 +133,13 @@ ixpiic_setsda(device_t dev, int val)
{
struct ixpiic_softc *sc = ixpiic_sc;
mtx_lock(&Giant);
IXP4XX_GPIO_LOCK();
GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, GPIO_I2C_SDA_BIT);
if (val)
GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT);
else
GPIO_CONF_CLR(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT);
mtx_unlock(&Giant);
IXP4XX_GPIO_UNLOCK();
DELAY(I2C_DELAY);
}
@ -148,13 +148,13 @@ ixpiic_setscl(device_t dev, int val)
{
struct ixpiic_softc *sc = ixpiic_sc;
mtx_lock(&Giant);
IXP4XX_GPIO_LOCK();
GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, GPIO_I2C_SCL_BIT);
if (val)
GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT);
else
GPIO_CONF_CLR(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT);
mtx_unlock(&Giant);
IXP4XX_GPIO_UNLOCK();
DELAY(I2C_DELAY);
}

View file

@ -317,7 +317,7 @@ ixpqmgr_attach(device_t dev)
sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
ixpqmgr_rebuild(sc); /* build inital priority table */
ixpqmgr_rebuild(sc); /* build initial priority table */
aqm_reset(sc); /* reset h/w */
return (0);
}
@ -775,7 +775,7 @@ ixpqmgr_intr(void *arg)
*
* The search will end when all the bits of the interrupt
* register are cleared. There is no need to maintain
* a seperate value and test it at each iteration.
* a separate value and test it at each iteration.
*/
if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
priorityTableIndex = 0;

View file

@ -323,6 +323,8 @@
#define GPIO_TYPE(b,v) ((v) << (((b) & 0x7) * 3))
#define GPIO_TYPE_REG(b) (((b)&8)?IXP425_GPIO_GPIT2R:IXP425_GPIO_GPIT1R)
#define IXP4XX_GPIO_PINS 16
/*
* Expansion Bus Configuration Space.
*/

View file

@ -93,6 +93,9 @@ struct ixppcib_softc {
bus_space_write_4(sc->sc_iot, sc->sc_gpio_ioh, reg, data)
#define GPIO_CONF_READ_4(sc, reg) \
bus_space_read_4(sc->sc_iot, sc->sc_gpio_ioh, reg)
#define IXP4XX_GPIO_LOCK() mtx_lock(&ixp425_gpio_mtx)
#define IXP4XX_GPIO_UNLOCK() mtx_unlock(&ixp425_gpio_mtx)
extern struct mtx ixp425_gpio_mtx;
extern struct bus_space ixp425_bs_tag;
extern struct bus_space ixp425_a4x_bs_tag;

View file

@ -88,7 +88,7 @@ board_init(void)
/*
* This should be called just before starting the kernel. This is so
* that one can undo incompatable hardware settings.
* that one can undo incompatible hardware settings.
*/
void
clr_board(void)
@ -504,7 +504,7 @@ cfaltwait(u_int8_t mask)
while (tout <= 5000000) {
status = cfaltread8(CF_ALT_STATUS);
if (status == 0xff) {
printf("cfaltwait: master: no status, reselectin\n");
printf("cfaltwait: master: no status, reselecting\n");
cfwrite8(CF_DRV_HEAD, CF_D_IBM);
DELAY(1);
status = cfread8(CF_STATUS);

View file

@ -1,6 +1,5 @@
/* $FreeBSD$ */
OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
SECTIONS

View file

@ -453,7 +453,8 @@ __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, u_int64_t off)
}
result = archsw.arch_readin(ef->fd, lastaddr, shdr[i].sh_size);
if (result < 0 || (size_t)result != shdr[i].sh_size) {
printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not read symbols - skipped!");
printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not read symbols - skipped! (%ju != %ju)", (uintmax_t)result,
(uintmax_t)shdr[i].sh_size);
lastaddr = ssym;
ssym = 0;
goto nosyms;

View file

@ -334,12 +334,6 @@ Some options may require a value,
while others define behavior just by being set.
Both types of builtin variables are described below.
.Bl -tag -width bootfile
.It Va acpi_load
Unset this to disable automatic loading of the ACPI module.
See also
.Va hint.acpi.0.disabled
in
.Xr device.hints 5 .
.It Va autoboot_delay
Number of seconds
.Ic autoboot

View file

@ -97,7 +97,7 @@
memory {
device_type = "memory";
reg = <0x0 0x8000000>; // 128M at 0x0
reg = <0x0 0x10000000>; // 256M at 0x0
};
localbus@e0005000 {

View file

@ -140,14 +140,16 @@ at-xy ." `--{__________) "
fbsdbw-logo
;
: acpienabled? ( -- flag )
s" acpi_load" getenv
: acpipresent? ( -- flag )
s" hint.acpi.0.rsdp" getenv
dup -1 = if
drop false exit
then
s" YES" compare-insensitive 0<> if
false exit
then
2drop
true
;
: acpienabled? ( -- flag )
s" hint.acpi.0.disabled" getenv
dup -1 <> if
s" 0" compare 0<> if
@ -182,11 +184,18 @@ at-xy ." `--{__________) "
printmenuitem ." Boot FreeBSD [default]" bootkey !
s" arch-i386" environment? if
drop
printmenuitem ." Boot FreeBSD with ACPI " bootacpikey !
acpienabled? if
." disabled"
acpipresent? if
printmenuitem ." Boot FreeBSD with ACPI " bootacpikey !
acpienabled? if
." disabled"
else
." enabled"
then
else
." enabled"
menuidx @
1+ dup
menuidx !
-2 bootacpikey !
then
else
-2 bootacpikey !
@ -254,11 +263,9 @@ set-current
dup bootkey @ = if 0 boot then
dup bootacpikey @ = if
acpienabled? if
s" acpi_load" unsetenv
s" 1" s" hint.acpi.0.disabled" setenv
s" 1" s" loader.acpi_disabled_by_user" setenv
else
s" YES" s" acpi_load" setenv
s" 0" s" hint.acpi.0.disabled" setenv
then
0 boot
@ -266,7 +273,6 @@ set-current
dup bootsafekey @ = if
s" arch-i386" environment? if
drop
s" acpi_load" unsetenv
s" 1" s" hint.acpi.0.disabled" setenv
s" 1" s" loader.acpi_disabled_by_user" setenv
s" 1" s" hint.apic.0.disabled" setenv

View file

@ -239,7 +239,7 @@ error: callw putstr # Display message
xorb %ah,%ah # BIOS: Get
int $0x16 # keypress
movw $0x1234, BDA_BOOT # Do a warm boot
ljmp $0xffff,$0x0 # reboot the machine
ljmp $0xf000,$0xfff0 # reboot the machine
/*
* Display a null-terminated string using the BIOS output.
*/

View file

@ -40,7 +40,7 @@ __FBSDID("$FreeBSD$");
#include "actbl.h"
/*
* Detect ACPI and export information about the APCI BIOS into the
* Detect ACPI and export information about the ACPI BIOS into the
* environment.
*/
@ -56,8 +56,6 @@ biosacpi_detect(void)
char buf[24];
int revision;
/* XXX check the BIOS datestamp */
/* locate and validate the RSDP */
if ((rsdp = biosacpi_find_rsdp()) == NULL)
return;
@ -82,9 +80,6 @@ biosacpi_detect(void)
sprintf(buf, "%d", rsdp->Length);
setenv("hint.acpi.0.xsdt_length", buf, 1);
}
/* XXX other tables? */
setenv("acpi_load", "YES", 1);
}
/*

View file

@ -32,37 +32,13 @@ __FBSDID("$FreeBSD$");
*
*/
#include <stand.h>
#include <string.h>
#include "bootstrap.h"
#include "libi386.h"
/*
* Use voodoo to load modules required by current hardware.
*/
int
i386_autoload(void)
{
int error;
int disabled;
char *rv;
/* XXX use PnP to locate stuff here */
/* autoload ACPI support */
/* XXX should be in 4th keyed off acpi_load */
disabled = 0;
rv = getenv("hint.acpi.0.disabled");
if (rv != NULL && strncmp(rv, "0", 1) != 0) {
disabled = 1;
}
if (getenv("acpi_load") && (!disabled)) {
error = mod_load("acpi", NULL, 0, NULL);
if (error != 0)
printf("ACPI autoload failed - %s\n", strerror(error));
}
return(0);
}

View file

@ -1,21 +1,3 @@
################################################################################
# TACPI DControl ACPI module behaviour
$acpi_load
If set, the ACPI module will be loaded. Clear it with
unset acpi_load
$hint.acpi.0.disabled="1"
If set, the ACPI module won't be loaded.
Note that the ACPI autodetection sets a number of hints to
pass information to the ACPI module. These should not be
overridden, or system behaviour will be undefined.
################################################################################
# Treboot DReboot the system

View file

@ -41,19 +41,23 @@ extern char bootprog_rev[];
extern char bootprog_date[];
extern char bootprog_maker[];
u_int32_t acells;
u_int32_t acells, scells;
static char bootargs[128];
#define HEAP_SIZE 0x80000
#define OF_puts(fd, text) OF_write(fd, text, strlen(text))
void
init_heap(void)
{
void *base;
ihandle_t stdout;
if ((base = ofw_alloc_heap(HEAP_SIZE)) == (void *)0xffffffff) {
printf("Heap memory claim failed!\n");
OF_getprop(chosen, "stdout", &stdout, sizeof(stdout));
OF_puts(stdout, "Heap memory claim failed!\n");
OF_enter();
}
@ -64,25 +68,20 @@ uint64_t
memsize(void)
{
phandle_t memoryp;
struct ofw_reg reg[4];
struct ofw_reg2 reg2[8];
int i;
u_int64_t sz, memsz;
cell_t reg[24];
int i, sz;
u_int64_t memsz;
memsz = 0;
memoryp = OF_instance_to_package(memory);
if (acells == 1) {
sz = OF_getprop(memoryp, "reg", &reg, sizeof(reg));
sz /= sizeof(struct ofw_reg);
sz = OF_getprop(memoryp, "reg", &reg, sizeof(reg));
sz /= sizeof(reg[0]);
for (i = 0, memsz = 0; i < sz; i++)
memsz += reg[i].size;
} else if (acells == 2) {
sz = OF_getprop(memoryp, "reg", &reg2, sizeof(reg2));
sz /= sizeof(struct ofw_reg2);
for (i = 0, memsz = 0; i < sz; i++)
memsz += reg2[i].size;
for (i = 0; i < sz; i += (acells + scells)) {
if (scells > 1)
memsz += (uint64_t)reg[i + acells] << 32;
memsz += reg[i + acells + scells - 1];
}
return (memsz);
@ -105,13 +104,9 @@ main(int (*openfirm)(void *))
root = OF_finddevice("/");
acells = 1;
scells = acells = 1;
OF_getprop(root, "#address-cells", &acells, sizeof(acells));
/*
* Set up console.
*/
cons_probe();
OF_getprop(root, "#size-cells", &scells, sizeof(scells));
/*
* Initialise the heap as early as possible. Once this is done,
@ -120,6 +115,11 @@ main(int (*openfirm)(void *))
*/
init_heap();
/*
* Set up console.
*/
cons_probe();
/*
* March through the device switch probing for things.
*/

View file

@ -91,16 +91,22 @@ ofw_mapmem(vm_offset_t dest, const size_t len)
return (ENOMEM);
}
if (OF_call_method("claim", mmu, 3, 1, destp, dlen, 0, &addr) == -1) {
printf("ofw_mapmem: virtual claim failed\n");
return (ENOMEM);
}
if (OF_call_method("map", mmu, 4, 0, destp, destp, dlen, 0) == -1) {
printf("ofw_mapmem: map failed\n");
return (ENOMEM);
}
/*
* We only do virtual memory management when real_mode is false.
*/
if (real_mode == 0) {
if (OF_call_method("claim", mmu, 3, 1, destp, dlen, 0, &addr)
== -1) {
printf("ofw_mapmem: virtual claim failed\n");
return (ENOMEM);
}
if (OF_call_method("map", mmu, 4, 0, destp, destp, dlen, 0)
== -1) {
printf("ofw_mapmem: map failed\n");
return (ENOMEM);
}
}
last_dest = (vm_offset_t) destp;
last_len = dlen;

View file

@ -118,13 +118,19 @@ ofw_memmap(int acells)
void *
ofw_alloc_heap(unsigned int size)
{
phandle_t memoryp;
struct ofw_reg available;
phandle_t memoryp, root;
cell_t available[4];
cell_t acells;
root = OF_finddevice("/");
acells = 1;
OF_getprop(root, "#address-cells", &acells, sizeof(acells));
memoryp = OF_instance_to_package(memory);
OF_getprop(memoryp, "available", &available, sizeof(available));
OF_getprop(memoryp, "available", available, sizeof(available));
heap_base = OF_claim((void *)available.base, size, sizeof(register_t));
heap_base = OF_claim((void *)available[acells-1], size,
sizeof(register_t));
if (heap_base != (void *)-1) {
heap_size = size;

View file

@ -90,11 +90,11 @@ ofwn_probe(struct netif *nif, void *machdep_hint)
static int
ofwn_put(struct iodesc *desc, void *pkt, size_t len)
{
struct ether_header *eh;
size_t sendlen;
ssize_t rv;
#if defined(NETIF_DEBUG)
struct ether_header *eh;
printf("netif_put: desc=0x%x pkt=0x%x len=%d\n", desc, pkt, len);
eh = pkt;
printf("dst: %s ", ether_sprintf(eh->ether_dhost));
@ -230,7 +230,10 @@ ofwn_init(struct iodesc *desc, void *machdep_hint)
static void
ofwn_end(struct netif *nif)
{
#ifdef BROKEN
/* dma-free freezes at least some Apple ethernet controllers */
OF_call_method("dma-free", netinstance, 2, 0, dmabuf, MAXPHYS);
#endif
OF_close(netinstance);
}

View file

@ -69,21 +69,38 @@ int (*openfirmware)(void *);
phandle_t chosen;
ihandle_t mmu;
ihandle_t memory;
int real_mode = 0;
/* Initialiser */
void
OF_init(int (*openfirm)(void *))
{
phandle_t options;
char mode[sizeof("true")];
openfirmware = openfirm;
if ((chosen = OF_finddevice("/chosen")) == -1)
OF_exit();
if (OF_getprop(chosen, "memory", &memory, sizeof(memory)) == -1)
OF_exit();
if (OF_getprop(chosen, "memory", &memory, sizeof(memory)) == -1) {
memory = OF_open("/memory");
if (memory == -1)
memory = OF_open("/memory@0");
if (memory == -1)
OF_exit();
}
if (OF_getprop(chosen, "mmu", &mmu, sizeof(mmu)) == -1)
OF_exit();
/*
* Check if we run in real mode. If so, we do not need to map
* memory later on.
*/
options = OF_finddevice("/options");
if (OF_getprop(options, "real-mode?", mode, sizeof(mode)) > 0 &&
strcmp(mode, "true") == 0)
real_mode = 1;
}
/*

View file

@ -72,6 +72,7 @@ typedef unsigned long int cell_t;
extern int (*openfirmware)(void *);
extern phandle_t chosen;
extern ihandle_t memory, mmu;
extern int real_mode;
/*
* This isn't actually an Open Firmware function, but it seemed like the right

View file

@ -1,35 +0,0 @@
/*-
* Copyright (c) 2003 TAKAHASHI Yoshihiro <nyan@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
int
i386_autoload(void)
{
return 0;
}

View file

@ -50,7 +50,20 @@ _start: \n\
addi %r1,%r1,stack@l \n\
addi %r1,%r1,8192 \n\
\n\
b startup \n\
/* Clear the .bss!!! */ \n\
li %r0,0 \n\
lis %r8,_edata@ha \n\
addi %r8,%r8,_edata@l\n\
lis %r9,_end@ha \n\
addi %r9,%r9,_end@l \n\
\n\
1: cmpw 0,%r8,%r9 \n\
bge 2f \n\
stw %r0,0(%r8) \n\
addi %r8,%r8,4 \n\
b 1b \n\
\n\
2: b startup \n\
");
void

View file

@ -811,15 +811,15 @@ main(int (*openfirm)(void *))
archsw.arch_autoload = sparc64_autoload;
archsw.arch_maphint = sparc64_maphint;
if (init_heap() == (vm_offset_t)-1)
OF_exit();
setheap((void *)heapva, (void *)(heapva + HEAPSZ));
/*
* Probe for a console.
*/
cons_probe();
if (init_heap() == (vm_offset_t)-1)
panic("%s: can't claim heap", __func__);
setheap((void *)heapva, (void *)(heapva + HEAPSZ));
if ((root = OF_peer(0)) == -1)
panic("%s: can't get root phandle", __func__);
OF_getprop(root, "compatible", compatible, sizeof(compatible));

View file

@ -963,6 +963,8 @@ device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
xpt_action((union ccb *)&cts);
if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
caps &= cts.xport_specific.sata.caps;
else
caps = 0;
/* Store result to SIM. */
bzero(&cts, sizeof(cts));
xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
@ -1103,6 +1105,8 @@ device_fail: if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
xpt_action((union ccb *)&cts);
if (cts.xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
caps &= cts.xport_specific.sata.caps;
else
caps = 0;
/* Store result to SIM. */
bzero(&cts, sizeof(cts));
xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);

View file

@ -37,12 +37,14 @@ __FBSDID("$FreeBSD$");
#else /* _KERNEL */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <camlib.h>
#endif /* _KERNEL */
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/smp_all.h>
#include <sys/sbuf.h>
#ifdef _KERNEL
@ -83,6 +85,8 @@ const struct cam_status_entry cam_status_table[] = {
{ CAM_REQ_TOO_BIG, "The request was too large for this host" },
{ CAM_REQUEUE_REQ, "Unconditionally Re-queue Request", },
{ CAM_ATA_STATUS_ERROR, "ATA Status Error" },
{ CAM_SCSI_IT_NEXUS_LOST,"Initiator/Target Nexus Lost" },
{ CAM_SMP_STATUS_ERROR, "SMP Status Error" },
{ CAM_IDE, "Initiator Detected Error Message Received" },
{ CAM_RESRC_UNAVAIL, "Resource Unavailable" },
{ CAM_UNACKED_EVENT, "Unacknowledged Event by Host" },
@ -263,6 +267,21 @@ cam_error_string(struct cam_device *device, union ccb *ccb, char *str,
break;
}
break;
case XPT_SMP_IO:
switch (proto_flags & CAM_EPF_LEVEL_MASK) {
case CAM_EPF_NONE:
break;
case CAM_EPF_ALL:
proto_flags |= CAM_ESMF_PRINT_FULL_CMD;
/* FALLTHROUGH */
case CAM_EPF_NORMAL:
case CAM_EPF_MINIMAL:
proto_flags |= CAM_ESMF_PRINT_STATUS;
/* FALLTHROUGH */
default:
break;
}
break;
default:
break;
}
@ -289,6 +308,12 @@ cam_error_string(struct cam_device *device, union ccb *ccb, char *str,
#endif /* _KERNEL/!_KERNEL */
sbuf_printf(&sb, "\n");
break;
case XPT_SMP_IO:
smp_command_sbuf(&ccb->smpio, &sb, path_str, 79 -
strlen(path_str), (proto_flags &
CAM_ESMF_PRINT_FULL_CMD) ? 79 : 0);
sbuf_printf(&sb, "\n");
break;
default:
break;
}
@ -355,6 +380,19 @@ cam_error_string(struct cam_device *device, union ccb *ccb, char *str,
#endif /* _KERNEL/!_KERNEL */
}
break;
case XPT_SMP_IO:
if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
CAM_SMP_STATUS_ERROR)
break;
if (proto_flags & CAM_ESF_PRINT_STATUS) {
sbuf_cat(&sb, path_str);
sbuf_printf(&sb, "SMP status: %s (%#x)\n",
smp_error_desc(ccb->smpio.smp_response[2]),
ccb->smpio.smp_response[2]);
}
/* There is no SMP equivalent to SCSI sense. */
break;
default:
break;
}

View file

@ -147,6 +147,7 @@ typedef enum {
*/
CAM_ATA_STATUS_ERROR, /* ATA error, look at error code in CCB */
CAM_SCSI_IT_NEXUS_LOST, /* Initiator/Target Nexus lost. */
CAM_SMP_STATUS_ERROR, /* SMP error, look at error code in CCB */
CAM_IDE = 0x33, /* Initiator Detected Error */
CAM_RESRC_UNAVAIL, /* Resource Unavailable */
CAM_UNACKED_EVENT, /* Unacknowledged Event by Host */
@ -197,6 +198,12 @@ typedef enum {
CAM_ESF_PRINT_SENSE = 0x20
} cam_error_scsi_flags;
typedef enum {
CAM_ESMF_PRINT_NONE = 0x00,
CAM_ESMF_PRINT_STATUS = 0x10,
CAM_ESMF_PRINT_FULL_CMD = 0x20,
} cam_error_smp_flags;
typedef enum {
CAM_EAF_PRINT_NONE = 0x00,
CAM_EAF_PRINT_STATUS = 0x10,

View file

@ -66,7 +66,7 @@ typedef enum {
*/
CAM_SCATTER_VALID = 0x00000010,/* Scatter/gather list is valid */
CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */
CAM_DIR_RESV = 0x00000000,/* Data direction (00:reserved) */
CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */
CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */
CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */
CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */
@ -144,6 +144,8 @@ typedef enum {
/* Device statistics (error counts, etc.) */
XPT_FREEZE_QUEUE = 0x0d,
/* Freeze device queue */
XPT_GDEV_ADVINFO = 0x0e,
/* Advanced device information */
/* SCSI Control Functions: 0x10->0x1F */
XPT_ABORT = 0x10,
/* Abort the specified CCB */
@ -185,6 +187,9 @@ typedef enum {
* Set SIM specific knob values.
*/
XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED,
/* Serial Management Protocol */
XPT_SCAN_TGT = 0x1E | XPT_FC_QUEUED | XPT_FC_USER_CCB
| XPT_FC_XPT_ONLY,
/* Scan Target */
@ -608,6 +613,32 @@ struct ccb_pathstats {
struct timeval last_reset; /* Time of last bus reset/loop init */
};
typedef enum {
SMP_FLAG_NONE = 0x00,
SMP_FLAG_REQ_SG = 0x01,
SMP_FLAG_RSP_SG = 0x02
} ccb_smp_pass_flags;
/*
* Serial Management Protocol CCB
* XXX Currently the semantics for this CCB are that it is executed either
* by the addressed device, or that device's parent (i.e. an expander for
* any device on an expander) if the addressed device doesn't support SMP.
* Later, once we have the ability to probe SMP-only devices and put them
* in CAM's topology, the CCB will only be executed by the addressed device
* if possible.
*/
struct ccb_smpio {
struct ccb_hdr ccb_h;
uint8_t *smp_request;
int smp_request_len;
uint16_t smp_request_sglist_cnt;
uint8_t *smp_response;
int smp_response_len;
uint16_t smp_response_sglist_cnt;
ccb_smp_pass_flags flags;
};
typedef union {
u_int8_t *sense_ptr; /*
* Pointer to storage
@ -1053,6 +1084,26 @@ struct ccb_eng_exec { /* This structure must match SCSIIO size */
#define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */
/*
* CCB for getting advanced device information. This operates in a fashion
* similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer
* type requested, and provide a buffer size/buffer to write to. If the
* buffer is too small, the handler will set GDEVAI_FLAG_MORE.
*/
struct ccb_getdev_advinfo {
struct ccb_hdr ccb_h;
uint32_t flags;
#define CGDAI_FLAG_TRANSPORT 0x1
#define CGDAI_FLAG_PROTO 0x2
uint32_t buftype; /* IN: Type of data being requested */
/* NB: buftype is interpreted on a per-transport basis */
#define CGDAI_TYPE_SCSI_DEVID 1
off_t bufsiz; /* IN: Size of external buffer */
#define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */
off_t provsiz; /* OUT: Size required/used */
uint8_t *buf; /* IN/OUT: Buffer for requested data */
};
/*
* Union of all CCB types for kernel space allocation. This union should
* never be used for manipulating CCBs - its only use is for the allocation
@ -1087,9 +1138,11 @@ union ccb {
struct ccb_notify_acknowledge cna2;
struct ccb_eng_inq cei;
struct ccb_eng_exec cee;
struct ccb_smpio smpio;
struct ccb_rescan crcn;
struct ccb_debug cdbg;
struct ccb_ataio ataio;
struct ccb_getdev_advinfo cgdai;
};
__BEGIN_DECLS
@ -1115,6 +1168,13 @@ cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries,
u_int8_t *data_ptr, u_int32_t dxfer_len,
u_int32_t timeout);
static __inline void
cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
uint8_t *smp_request, int smp_request_len,
uint8_t *smp_response, int smp_response_len,
uint32_t timeout);
static __inline void
cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
@ -1172,6 +1232,32 @@ cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries,
ataio->tag_action = tag_action;
}
static __inline void
cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
uint8_t *smp_request, int smp_request_len,
uint8_t *smp_response, int smp_response_len,
uint32_t timeout)
{
#ifdef _KERNEL
KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH,
("direction != CAM_DIR_BOTH"));
KASSERT((smp_request != NULL) && (smp_response != NULL),
("need valid request and response buffers"));
KASSERT((smp_request_len != 0) && (smp_response_len != 0),
("need non-zero request and response lengths"));
#endif /*_KERNEL*/
smpio->ccb_h.func_code = XPT_SMP_IO;
smpio->ccb_h.flags = flags;
smpio->ccb_h.retry_count = retries;
smpio->ccb_h.cbfcnp = cbfcnp;
smpio->ccb_h.timeout = timeout;
smpio->smp_request = smp_request;
smpio->smp_request_len = smp_request_len;
smpio->smp_response = smp_response;
smpio->smp_response_len = smp_response_len;
}
void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
__END_DECLS

View file

@ -648,6 +648,21 @@ cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
numbufs = 1;
break;
case XPT_SMP_IO:
data_ptrs[0] = &ccb->smpio.smp_request;
lengths[0] = ccb->smpio.smp_request_len;
dirs[0] = CAM_DIR_OUT;
data_ptrs[1] = &ccb->smpio.smp_response;
lengths[1] = ccb->smpio.smp_response_len;
dirs[1] = CAM_DIR_IN;
numbufs = 2;
break;
case XPT_GDEV_ADVINFO:
data_ptrs[0] = (uint8_t **)&ccb->cgdai.buf;
lengths[0] = ccb->cgdai.bufsiz;
dirs[0] = CAM_DIR_IN;
numbufs = 1;
break;
default:
return(EINVAL);
break; /* NOTREACHED */
@ -787,6 +802,15 @@ cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
data_ptrs[0] = &ccb->ataio.data_ptr;
numbufs = min(mapinfo->num_bufs_used, 1);
break;
case XPT_SMP_IO:
numbufs = min(mapinfo->num_bufs_used, 2);
data_ptrs[0] = &ccb->smpio.smp_request;
data_ptrs[1] = &ccb->smpio.smp_response;
break;
case XPT_GDEV_ADVINFO:
numbufs = min(mapinfo->num_bufs_used, 1);
data_ptrs[0] = (uint8_t **)&ccb->cgdai.buf;
break;
default:
/* allow ourselves to be swapped once again */
PRELE(curproc);
@ -1055,6 +1079,7 @@ camperiphsensedone(struct cam_periph *periph, union ccb *done_ccb)
saved_ccb->ccb_h.status |=
CAM_AUTOSENSE_FAIL;
}
saved_ccb->csio.sense_resid = done_ccb->csio.resid;
bcopy(saved_ccb, done_ccb, sizeof(union ccb));
xpt_free_ccb(saved_ccb);
break;
@ -1211,7 +1236,7 @@ camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
scsi_request_sense(&done_ccb->csio, /*retries*/1,
camperiphsensedone,
&save_ccb->csio.sense_data,
sizeof(save_ccb->csio.sense_data),
save_ccb->csio.sense_len,
CAM_TAG_ACTION_NONE,
/*sense_len*/SSD_FULL_SIZE,
/*timeout*/5000);
@ -1602,7 +1627,7 @@ camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
scsi_request_sense(&ccb->csio, /*retries*/1,
camperiphsensedone,
&orig_ccb->csio.sense_data,
sizeof(orig_ccb->csio.sense_data),
orig_ccb->csio.sense_len,
CAM_TAG_ACTION_NONE,
/*sense_len*/SSD_FULL_SIZE,
/*timeout*/5000);

View file

@ -2386,6 +2386,7 @@ xpt_action_default(union ccb *start_ccb)
/* FALLTHROUGH */
case XPT_RESET_DEV:
case XPT_ENG_EXEC:
case XPT_SMP_IO:
{
struct cam_path *path = start_ccb->ccb_h.path;
int frozen;

View file

@ -93,6 +93,10 @@ struct cam_ed {
cam_xport transport;
u_int transport_version;
struct scsi_inquiry_data inq_data;
uint8_t *supported_vpds;
uint8_t supported_vpds_len;
uint32_t device_id_len;
uint8_t *device_id;
struct ata_params ident_data;
u_int8_t inq_flags; /*
* Current settings for inquiry flags.

View file

@ -3552,6 +3552,34 @@ scsi_calc_syncparam(u_int period)
return (period/400);
}
uint8_t *
scsi_get_sas_addr(struct scsi_vpd_device_id *id, uint32_t len)
{
uint8_t *bufp, *buf_end;
struct scsi_vpd_id_descriptor *descr;
struct scsi_vpd_id_naa_basic *naa;
bufp = buf_end = (uint8_t *)id;
bufp += SVPD_DEVICE_ID_HDR_LEN;
buf_end += len;
while (bufp < buf_end) {
descr = (struct scsi_vpd_id_descriptor *)bufp;
bufp += SVPD_DEVICE_ID_DESC_HDR_LEN;
/* Right now, we only care about SAS NAA IEEE Reg addrs */
if (((descr->id_type & SVPD_ID_PIV) != 0)
&& (descr->proto_codeset >> SVPD_ID_PROTO_SHIFT) ==
SCSI_PROTO_SAS
&& (descr->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_NAA){
naa = (struct scsi_vpd_id_naa_basic *)bufp;
if ((naa->naa >> 4) == SVPD_ID_NAA_IEEE_REG)
return bufp;
}
bufp += descr->length;
}
return NULL;
}
void
scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),

View file

@ -796,13 +796,29 @@ struct scsi_vpd_supported_page_list
{
u_int8_t device;
u_int8_t page_code;
#define SVPD_SUPPORTED_PAGE_LIST 0x00
#define SVPD_SUPPORTED_PAGE_LIST 0x00
#define SVPD_SUPPORTED_PAGES_HDR_LEN 4
u_int8_t reserved;
u_int8_t length; /* number of VPD entries */
#define SVPD_SUPPORTED_PAGES_SIZE 251
u_int8_t list[SVPD_SUPPORTED_PAGES_SIZE];
};
/*
* This structure is more suited to target operation, because the
* number of supported pages is left to the user to allocate.
*/
struct scsi_vpd_supported_pages
{
u_int8_t device;
u_int8_t page_code;
u_int8_t reserved;
#define SVPD_SUPPORTED_PAGES 0x00
u_int8_t length;
u_int8_t page_list[0];
};
struct scsi_vpd_unit_serial_number
{
u_int8_t device;
@ -814,6 +830,148 @@ struct scsi_vpd_unit_serial_number
u_int8_t serial_num[SVPD_SERIAL_NUM_SIZE];
};
struct scsi_vpd_device_id
{
u_int8_t device;
u_int8_t page_code;
#define SVPD_DEVICE_ID 0x83
#define SVPD_DEVICE_ID_MAX_SIZE 0xffff
#define SVPD_DEVICE_ID_HDR_LEN 4
#define SVPD_DEVICE_ID_DESC_HDR_LEN 4
u_int8_t length[2];
u_int8_t desc_list[0];
};
struct scsi_vpd_id_descriptor
{
u_int8_t proto_codeset;
#define SCSI_PROTO_FC 0x00
#define SCSI_PROTO_SPI 0x01
#define SCSI_PROTO_SSA 0x02
#define SCSI_PROTO_1394 0x03
#define SCSI_PROTO_RDMA 0x04
#define SCSI_PROTO_iSCSI 0x05
#define SCSI_PROTO_SAS 0x06
#define SVPD_ID_PROTO_SHIFT 4
#define SVPD_ID_CODESET_BINARY 0x01
#define SVPD_ID_CODESET_ASCII 0x02
u_int8_t id_type;
#define SVPD_ID_PIV 0x80
#define SVPD_ID_ASSOC_LUN 0x00
#define SVPD_ID_ASSOC_PORT 0x10
#define SVPD_ID_ASSOC_TARGET 0x20
#define SVPD_ID_TYPE_VENDOR 0x00
#define SVPD_ID_TYPE_T10 0x01
#define SVPD_ID_TYPE_EUI64 0x02
#define SVPD_ID_TYPE_NAA 0x03
#define SVPD_ID_TYPE_RELTARG 0x04
#define SVPD_ID_TYPE_TPORTGRP 0x05
#define SVPD_ID_TYPE_LUNGRP 0x06
#define SVPD_ID_TYPE_MD5_LUN_ID 0x07
#define SVPD_ID_TYPE_SCSI_NAME 0x08
#define SVPD_ID_TYPE_MASK 0x0f
u_int8_t reserved;
u_int8_t length;
u_int8_t identifier[0];
};
struct scsi_vpd_id_t10
{
u_int8_t vendor[8];
u_int8_t vendor_spec_id[0];
};
struct scsi_vpd_id_eui64
{
u_int8_t ieee_company_id[3];
u_int8_t extension_id[5];
};
struct scsi_vpd_id_naa_basic
{
uint8_t naa;
/* big endian, packed:
uint8_t naa : 4;
uint8_t naa_desig : 4;
*/
#define SVPD_ID_NAA_IEEE_EXT 0x02
#define SVPD_ID_NAA_LOCAL_REG 0x03
#define SVPD_ID_NAA_IEEE_REG 0x05
#define SVPD_ID_NAA_IEEE_REG_EXT 0x06
uint8_t naa_data[0];
};
struct scsi_vpd_id_naa_ieee_extended_id
{
uint8_t naa;
uint8_t vendor_specific_id_a;
uint8_t ieee_company_id[3];
uint8_t vendor_specific_id_b[4];
};
struct scsi_vpd_id_naa_local_reg
{
uint8_t naa;
uint8_t local_value[7];
};
struct scsi_vpd_id_naa_ieee_reg
{
uint8_t naa;
uint8_t reg_value[7];
/* big endian, packed:
uint8_t naa_basic : 4;
uint8_t ieee_company_id_0 : 4;
uint8_t ieee_company_id_1[2];
uint8_t ieee_company_id_2 : 4;
uint8_t vendor_specific_id_0 : 4;
uint8_t vendor_specific_id_1[4];
*/
};
struct scsi_vpd_id_naa_ieee_reg_extended
{
uint8_t naa;
uint8_t reg_value[15];
/* big endian, packed:
uint8_t naa_basic : 4;
uint8_t ieee_company_id_0 : 4;
uint8_t ieee_company_id_1[2];
uint8_t ieee_company_id_2 : 4;
uint8_t vendor_specific_id_0 : 4;
uint8_t vendor_specific_id_1[4];
uint8_t vendor_specific_id_ext[8];
*/
};
struct scsi_vpd_id_rel_trgt_port_id
{
uint8_t obsolete[2];
uint8_t rel_trgt_port_id[2];
};
struct scsi_vpd_id_trgt_port_grp_id
{
uint8_t reserved[2];
uint8_t trgt_port_grp[2];
};
struct scsi_vpd_id_lun_grp_id
{
uint8_t reserved[2];
uint8_t log_unit_grp[2];
};
struct scsi_vpd_id_md5_lun_id
{
uint8_t lun_id[16];
};
struct scsi_vpd_id_scsi_name
{
uint8_t name_string[256];
};
struct scsi_read_capacity
{
u_int8_t opcode;
@ -1164,7 +1322,8 @@ void scsi_print_inquiry(struct scsi_inquiry_data *inq_data);
u_int scsi_calc_syncsrate(u_int period_factor);
u_int scsi_calc_syncparam(u_int period);
uint8_t * scsi_get_sas_addr(struct scsi_vpd_device_id *id, uint32_t len);
void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *,
union ccb *),

View file

@ -524,8 +524,8 @@ passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
* We only attempt to map the user memory into kernel space
* if they haven't passed in a physical memory pointer,
* and if there is actually an I/O operation to perform.
* Right now cam_periph_mapmem() only supports SCSI and device
* match CCBs. For the SCSI CCBs, we only pass the CCB in if
* cam_periph_mapmem() supports SCSI, ATA, SMP, ADVINFO and device
* match CCBs. For the SCSI and ATA CCBs, we only pass the CCB in if
* there's actually data to map. cam_periph_mapmem() will do the
* right thing, even if there isn't data to map, but since CCBs
* without data are a reasonably common occurance (e.g. test unit
@ -535,7 +535,9 @@ passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
&& (((ccb->ccb_h.func_code == XPT_SCSI_IO ||
ccb->ccb_h.func_code == XPT_ATA_IO)
&& ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))
|| (ccb->ccb_h.func_code == XPT_DEV_MATCH))) {
|| (ccb->ccb_h.func_code == XPT_DEV_MATCH)
|| (ccb->ccb_h.func_code == XPT_SMP_IO)
|| (ccb->ccb_h.func_code == XPT_GDEV_ADVINFO))) {
bzero(&mapinfo, sizeof(mapinfo));

View file

@ -68,7 +68,7 @@ struct scsi_quirk_entry {
struct scsi_inquiry_pattern inq_pat;
u_int8_t quirks;
#define CAM_QUIRK_NOLUNS 0x01
#define CAM_QUIRK_NOSERIAL 0x02
#define CAM_QUIRK_NOVPDS 0x02
#define CAM_QUIRK_HILUNS 0x04
#define CAM_QUIRK_NOHILUNS 0x08
#define CAM_QUIRK_NORPTLUNS 0x10
@ -134,8 +134,9 @@ typedef enum {
PROBE_FULL_INQUIRY,
PROBE_REPORT_LUNS,
PROBE_MODE_SENSE,
PROBE_SERIAL_NUM_0,
PROBE_SERIAL_NUM_1,
PROBE_SUPPORTED_VPD_LIST,
PROBE_DEVICE_ID,
PROBE_SERIAL_NUM,
PROBE_TUR_FOR_NEGOTIATION,
PROBE_INQUIRY_BASIC_DV1,
PROBE_INQUIRY_BASIC_DV2,
@ -149,8 +150,9 @@ static char *probe_action_text[] = {
"PROBE_FULL_INQUIRY",
"PROBE_REPORT_LUNS",
"PROBE_MODE_SENSE",
"PROBE_SERIAL_NUM_0",
"PROBE_SERIAL_NUM_1",
"PROBE_SUPPORTED_VPD_LIST",
"PROBE_DEVICE_ID",
"PROBE_SERIAL_NUM",
"PROBE_TUR_FOR_NEGOTIATION",
"PROBE_INQUIRY_BASIC_DV1",
"PROBE_INQUIRY_BASIC_DV2",
@ -463,7 +465,7 @@ static struct scsi_quirk_entry scsi_quirk_table[] =
T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
" TDC 3600", "U07:"
},
CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
CAM_QUIRK_NOVPDS, /*mintags*/0, /*maxtags*/0
},
{
/*
@ -696,6 +698,21 @@ probeschedule(struct cam_periph *periph)
xpt_schedule(periph, CAM_PRIORITY_XPT);
}
static int
device_has_vpd(struct cam_ed *device, uint8_t page_id)
{
int i, num_pages;
struct scsi_vpd_supported_pages *vpds;
vpds = (struct scsi_vpd_supported_pages *)device->supported_vpds;
num_pages = device->supported_vpds_len - SVPD_SUPPORTED_PAGES_HDR_LEN;
for (i = 0;i < num_pages;i++)
if (vpds->page_list[i] == page_id)
return 1;
return 0;
}
static void
probestart(struct cam_periph *periph, union ccb *start_ccb)
{
@ -810,7 +827,8 @@ probestart(struct cam_periph *periph, union ccb *start_ccb)
if (INQ_DATA_TQ_ENABLED(inq_buf))
PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
else
PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
PROBE_SET_ACTION(softc,
PROBE_SUPPORTED_VPD_LIST);
goto again;
}
scsi_report_luns(csio, 5, probedone, MSG_SIMPLE_Q_TAG,
@ -843,19 +861,20 @@ probestart(struct cam_periph *periph, union ccb *start_ccb)
}
xpt_print(periph->path, "Unable to mode sense control page - "
"malloc failure\n");
PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST);
}
/* FALLTHROUGH */
case PROBE_SERIAL_NUM_0:
case PROBE_SUPPORTED_VPD_LIST:
{
struct scsi_vpd_supported_page_list *vpd_list = NULL;
struct scsi_vpd_supported_page_list *vpd_list;
struct cam_ed *device;
vpd_list = NULL;
device = periph->path->device;
if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOSERIAL) == 0) {
if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOVPDS) == 0)
vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT,
M_NOWAIT | M_ZERO);
}
if (vpd_list != NULL) {
scsi_inquiry(csio,
@ -878,7 +897,39 @@ probestart(struct cam_periph *periph, union ccb *start_ccb)
probedone(periph, start_ccb);
return;
}
case PROBE_SERIAL_NUM_1:
case PROBE_DEVICE_ID:
{
struct scsi_vpd_device_id *devid;
struct cam_ed *device;
devid = NULL;
device = periph->path->device;
if (device_has_vpd(device, SVPD_DEVICE_ID))
devid = malloc(SVPD_DEVICE_ID_MAX_SIZE, M_CAMXPT,
M_NOWAIT | M_ZERO);
if (devid != NULL) {
scsi_inquiry(csio,
/*retries*/4,
probedone,
MSG_SIMPLE_Q_TAG,
(uint8_t *)devid,
SVPD_DEVICE_ID_MAX_SIZE,
/*evpd*/TRUE,
SVPD_DEVICE_ID,
SSD_MIN_SIZE,
/*timeout*/60 * 1000);
break;
}
/*
* We'll have to do without, let our probedone
* routine finish up for us.
*/
start_ccb->csio.data_ptr = NULL;
probedone(periph, start_ccb);
return;
}
case PROBE_SERIAL_NUM:
{
struct scsi_vpd_unit_serial_number *serial_buf;
struct cam_ed* device;
@ -891,8 +942,10 @@ probestart(struct cam_periph *periph, union ccb *start_ccb)
device->serial_num_len = 0;
}
serial_buf = (struct scsi_vpd_unit_serial_number *)
malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO);
if (device_has_vpd(device, SVPD_UNIT_SERIAL_NUMBER))
serial_buf = (struct scsi_vpd_unit_serial_number *)
malloc(sizeof(*serial_buf), M_CAMXPT,
M_NOWAIT|M_ZERO);
if (serial_buf != NULL) {
scsi_inquiry(csio,
@ -1046,6 +1099,8 @@ proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
return (1);
}
#define CCB_COMPLETED_OK(ccb) (((ccb).status & CAM_STATUS_MASK) == CAM_REQ_CMP)
static void
probedone(struct cam_periph *periph, union ccb *done_ccb)
{
@ -1133,7 +1188,7 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
PROBE_MODE_SENSE);
else
PROBE_SET_ACTION(softc,
PROBE_SERIAL_NUM_0);
PROBE_SUPPORTED_VPD_LIST);
if (path->device->flags & CAM_DEV_UNCONFIGURED) {
path->device->flags &= ~CAM_DEV_UNCONFIGURED;
@ -1290,7 +1345,8 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
if (INQ_DATA_TQ_ENABLED(inq_buf))
PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
else
PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
PROBE_SET_ACTION(softc,
PROBE_SUPPORTED_VPD_LIST);
xpt_release_ccb(done_ccb);
xpt_schedule(periph, priority);
return;
@ -1326,35 +1382,82 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
}
xpt_release_ccb(done_ccb);
free(mode_hdr, M_CAMXPT);
PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0);
PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST);
xpt_schedule(periph, priority);
return;
}
case PROBE_SERIAL_NUM_0:
case PROBE_SUPPORTED_VPD_LIST:
{
struct ccb_scsiio *csio;
struct scsi_vpd_supported_page_list *page_list;
int length, serialnum_supported, i;
serialnum_supported = 0;
csio = &done_ccb->csio;
page_list =
(struct scsi_vpd_supported_page_list *)csio->data_ptr;
if (path->device->supported_vpds != NULL) {
free(path->device->supported_vpds, M_CAMXPT);
path->device->supported_vpds = NULL;
path->device->supported_vpds_len = 0;
}
if (page_list == NULL) {
/*
* Don't process the command as it was never sent
*/
} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
&& (page_list->length > 0)) {
length = min(page_list->length,
SVPD_SUPPORTED_PAGES_SIZE);
for (i = 0; i < length; i++) {
if (page_list->list[i] ==
SVPD_UNIT_SERIAL_NUMBER) {
serialnum_supported = 1;
break;
}
} else if (CCB_COMPLETED_OK(csio->ccb_h)) {
/* Got vpd list */
path->device->supported_vpds_len = page_list->length +
SVPD_SUPPORTED_PAGES_HDR_LEN;
path->device->supported_vpds = (uint8_t *)page_list;
xpt_release_ccb(done_ccb);
PROBE_SET_ACTION(softc, PROBE_DEVICE_ID);
xpt_schedule(periph, priority);
return;
} else if (cam_periph_error(done_ccb, 0,
SF_RETRY_UA|SF_NO_PRINT,
&softc->saved_ccb) == ERESTART) {
return;
} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
/* Don't wedge the queue */
xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
/*run_queue*/TRUE);
}
if (page_list)
free(page_list, M_CAMXPT);
/* No VPDs available, skip to device check. */
csio->data_ptr = NULL;
goto probe_device_check;
}
case PROBE_DEVICE_ID:
{
struct scsi_vpd_device_id *devid;
struct ccb_scsiio *csio;
uint32_t length = 0;
csio = &done_ccb->csio;
devid = (struct scsi_vpd_device_id *)csio->data_ptr;
/* Clean up from previous instance of this device */
if (path->device->device_id != NULL) {
path->device->device_id_len = 0;
free(path->device->device_id, M_CAMXPT);
path->device->device_id = NULL;
}
if (devid == NULL) {
/* Don't process the command as it was never sent */
} else if (CCB_COMPLETED_OK(csio->ccb_h)) {
length = scsi_2btoul(devid->length);
if (length != 0) {
/*
* NB: device_id_len is actual response
* size, not buffer size.
*/
path->device->device_id_len = length +
SVPD_DEVICE_ID_HDR_LEN;
path->device->device_id = (uint8_t *)devid;
}
} else if (cam_periph_error(done_ccb, 0,
SF_RETRY_UA|SF_NO_PRINT,
@ -1366,21 +1469,17 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
/*run_queue*/TRUE);
}
if (page_list != NULL)
free(page_list, M_CAMXPT);
if (serialnum_supported) {
xpt_release_ccb(done_ccb);
PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_1);
xpt_schedule(periph, priority);
return;
}
csio->data_ptr = NULL;
/* FALLTHROUGH */
/* Free the device id space if we don't use it */
if (devid && length == 0)
free(devid, M_CAMXPT);
xpt_release_ccb(done_ccb);
PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM);
xpt_schedule(periph, priority);
return;
}
case PROBE_SERIAL_NUM_1:
probe_device_check:
case PROBE_SERIAL_NUM:
{
struct ccb_scsiio *csio;
struct scsi_vpd_unit_serial_number *serial_buf;
@ -1395,13 +1494,6 @@ probedone(struct cam_periph *periph, union ccb *done_ccb)
serial_buf =
(struct scsi_vpd_unit_serial_number *)csio->data_ptr;
/* Clean up from previous instance of this device */
if (path->device->serial_num != NULL) {
free(path->device->serial_num, M_CAMXPT);
path->device->serial_num = NULL;
path->device->serial_num_len = 0;
}
if (serial_buf == NULL) {
/*
* Don't process the command as it was never sent
@ -2227,6 +2319,10 @@ scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
device->queue_flags = 0;
device->serial_num = NULL;
device->serial_num_len = 0;
device->device_id = NULL;
device->device_id_len = 0;
device->supported_vpds = NULL;
device->supported_vpds_len = 0;
/*
* XXX should be limited by number of CCBs this bus can
@ -2336,6 +2432,31 @@ scsi_devise_transport(struct cam_path *path)
xpt_action((union ccb *)&cts);
}
static void
scsi_getdev_advinfo(union ccb *start_ccb)
{
struct cam_ed *device;
struct ccb_getdev_advinfo *cgdai;
off_t amt;
device = start_ccb->ccb_h.path->device;
cgdai = &start_ccb->cgdai;
switch(cgdai->buftype) {
case CGDAI_TYPE_SCSI_DEVID:
cgdai->provsiz = device->device_id_len;
if (device->device_id_len == 0)
break;
amt = device->device_id_len;
if (cgdai->provsiz > cgdai->bufsiz)
amt = cgdai->bufsiz;
bcopy(device->device_id, cgdai->buf, amt);
break;
default:
break;
}
start_ccb->ccb_h.status = CAM_REQ_CMP;
}
static void
scsi_action(union ccb *start_ccb)
{
@ -2365,6 +2486,11 @@ scsi_action(union ccb *start_ccb)
(*(sim->sim_action))(sim, start_ccb);
break;
}
case XPT_GDEV_ADVINFO:
{
scsi_getdev_advinfo(start_ccb);
break;
}
default:
xpt_action_default(start_ccb);
break;

620
sys/cam/scsi/smp_all.c Normal file
View file

@ -0,0 +1,620 @@
/*-
* Copyright (c) 2010 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/users/kenm/FreeBSD-test/sys/cam/scsi/smp_all.c#4 $
*/
/*
* Serial Management Protocol helper functions.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/types.h>
#ifdef _KERNEL
#include <sys/systm.h>
#include <sys/libkern.h>
#include <sys/kernel.h>
#else /* _KERNEL */
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#endif /* _KERNEL */
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_xpt.h>
#include <cam/scsi/smp_all.h>
#include <sys/sbuf.h>
#ifndef _KERNEL
#include <camlib.h>
#endif
static char *smp_yesno(int val);
static char *
smp_yesno(int val)
{
char *str;
if (val)
str = "Yes";
else
str = "No";
return (str);
}
struct smp_error_table_entry {
uint8_t function_result;
const char *desc;
};
/* List current as of SPL Revision 7 */
static struct smp_error_table_entry smp_error_table[] = {
{SMP_FR_ACCEPTED, "SMP Function Accepted"},
{SMP_FR_UNKNOWN_FUNC, "Unknown SMP Function"},
{SMP_FR_FUNCTION_FAILED, "SMP Function Failed"},
{SMP_FR_INVALID_REQ_FRAME_LEN, "Invalid Request Frame Length"},
{SMP_FR_INVALID_EXP_CHG_CNT, "Invalid Expander Change Count"},
{SMP_FR_BUSY, "Busy"},
{SMP_FR_INCOMPLETE_DESC_LIST, "Incomplete Descriptor List"},
{SMP_FR_PHY_DOES_NOT_EXIST, "Phy Does Not Exist"},
{SMP_FR_INDEX_DOES_NOT_EXIST, "Index Does Not Exist"},
{SMP_FR_PHY_DOES_NOT_SUP_SATA, "Phy Does Not Support SATA"},
{SMP_FR_UNKNOWN_PHY_OP, "Unknown Phy Operation"},
{SMP_FR_UNKNOWN_PHY_TEST_FUNC, "Unknown Phy Test Function"},
{SMP_FR_PHY_TEST_FUNC_INPROG, "Phy Test Function In Progress"},
{SMP_FR_PHY_VACANT, "Phy Vacant"},
{SMP_FR_UNKNOWN_PHY_EVENT_SRC, "Unknown Phy Event Source"},
{SMP_FR_UNKNOWN_DESC_TYPE, "Unknown Descriptor Type"},
{SMP_FR_UNKNOWN_PHY_FILTER, "Unknown Phy Filter"},
{SMP_FR_AFFILIATION_VIOLATION, "Affiliation Violation"},
{SMP_FR_SMP_ZONE_VIOLATION, "SMP Zone Violation"},
{SMP_FR_NO_MGMT_ACCESS_RIGHTS, "No Management Access Rights"},
{SMP_FR_UNKNOWN_ED_ZONING_VAL, "Unknown Enable Disable Zoning Value"},
{SMP_FR_ZONE_LOCK_VIOLATION, "Zone Lock Violation"},
{SMP_FR_NOT_ACTIVATED, "Not Activated"},
{SMP_FR_ZG_OUT_OF_RANGE, "Zone Group Out of Range"},
{SMP_FR_NO_PHYS_PRESENCE, "No Physical Presence"},
{SMP_FR_SAVING_NOT_SUP, "Saving Not Supported"},
{SMP_FR_SRC_ZONE_DNE, "Source Zone Group Does Not Exist"},
{SMP_FR_DISABLED_PWD_NOT_SUP, "Disabled Password Not Supported"}
};
const char *
smp_error_desc(int function_result)
{
int i;
for (i = 0; i < (sizeof(smp_error_table)/sizeof(smp_error_table[0]));
i++){
if (function_result == smp_error_table[i].function_result)
return (smp_error_table[i].desc);
}
return ("Reserved Function Result");
}
/* List current as of SPL Revision 7 */
struct smp_cmd_table_entry {
uint8_t cmd_num;
const char *desc;
} smp_cmd_table[] = {
{SMP_FUNC_REPORT_GENERAL, "REPORT GENERAL"},
{SMP_FUNC_REPORT_MANUF_INFO, "REPORT MANUFACTURER INFORMATION"},
{SMP_FUNC_REPORT_SC_STATUS, "REPORT SELF-CONFIGURATION STATUS"},
{SMP_FUNC_REPORT_ZONE_PERM_TBL, "REPORT ZONE PERMISSION TABLE"},
{SMP_FUNC_REPORT_BROADCAST, "REPORT BROADCAST"},
{SMP_FUNC_DISCOVER, "DISCOVER"},
{SMP_FUNC_REPORT_PHY_ERR_LOG, "REPORT PHY ERROR LOG"},
{SMP_FUNC_REPORT_PHY_SATA, "REPORT PHY SATA"},
{SMP_FUNC_REPORT_ROUTE_INFO, "REPORT ROUTE INFORMATION"},
{SMP_FUNC_REPORT_PHY_EVENT, "REPORT PHY EVENT"},
{SMP_FUNC_DISCOVER_LIST, "DISCOVER LIST"},
{SMP_FUNC_REPORT_PHY_EVENT_LIST, "REPORT PHY EVENT LIST"},
{SMP_FUNC_REPORT_EXP_RTL, "REPORT EXPANDER ROUTE TABLE LIST"},
{SMP_FUNC_CONFIG_GENERAL, "CONFIGURE GENERAL"},
{SMP_FUNC_ENABLE_DISABLE_ZONING, "ENABLE DISABLE ZONING"},
{SMP_FUNC_ZONED_BROADCAST, "ZONED BROADCAST"},
{SMP_FUNC_ZONE_LOCK, "ZONE LOCK"},
{SMP_FUNC_ZONE_ACTIVATE, "ZONE ACTIVATE"},
{SMP_FUNC_ZONE_UNLOCK, "ZONE UNLOCK"},
{SMP_FUNC_CONFIG_ZM_PWD, "CONFIGURE ZONE MANAGER PASSWORD"},
{SMP_FUNC_CONFIG_ZONE_PHY_INFO, "CONFIGURE ZONE PHY INFORMATION"},
{SMP_FUNC_CONFIG_ZONE_PERM_TBL, "CONFIGURE ZONE PERMISSION TABLE"},
{SMP_FUNC_CONFIG_ROUTE_INFO, "CONFIGURE ROUTE INFORMATION"},
{SMP_FUNC_PHY_CONTROL, "PHY CONTROL"},
{SMP_FUNC_PHY_TEST_FUNC, "PHY TEST FUNCTION"},
{SMP_FUNC_CONFIG_PHY_EVENT, "CONFIGURE PHY EVENT"}
};
const char *
smp_command_desc(uint8_t cmd_num)
{
int i;
for (i = 0; i < (sizeof(smp_cmd_table)/sizeof(smp_cmd_table[0])) &&
smp_cmd_table[i].cmd_num <= cmd_num; i++) {
if (cmd_num == smp_cmd_table[i].cmd_num)
return (smp_cmd_table[i].desc);
}
/*
* 0x40 to 0x7f and 0xc0 to 0xff are the vendor specific SMP
* command ranges.
*/
if (((cmd_num >= 0x40) && (cmd_num <= 0x7f))
|| (cmd_num >= 0xc0)) {
return ("Vendor Specific SMP Command");
} else {
return ("Unknown SMP Command");
}
}
/*
* Decode a SMP request buffer into a string of hexadecimal numbers.
*
* smp_request: SMP request
* request_len: length of the SMP request buffer, may be reduced if the
* caller only wants part of the buffer printed
* sb: sbuf(9) buffer
* line_prefix: prefix for new lines, or an empty string ("")
* first_line_len: length left on first line
* line_len: total length of subsequent lines, 0 for no additional lines
* if there are no additional lines, first line will get ...
* at the end if there is additional data
*/
void
smp_command_decode(uint8_t *smp_request, int request_len, struct sbuf *sb,
char *line_prefix, int first_line_len, int line_len)
{
int i, cur_len;
for (i = 0, cur_len = first_line_len; i < request_len; i++) {
/*
* Each byte takes 3 characters. As soon as we go less
* than 6 (meaning we have at least 3 and at most 5
* characters left), check to see whether the subsequent
* line length (line_len) is long enough to bother with.
* If the user set it to 0, or some other length that isn't
* enough to hold at least the prefix and one byte, put ...
* on the first line to indicate that there is more data
* and bail out.
*/
if ((cur_len < 6)
&& (line_len < (strlen(line_prefix) + 3))) {
sbuf_printf(sb, "...");
return;
}
if (cur_len < 3) {
sbuf_printf(sb, "\n%s", line_prefix);
cur_len = line_len - strlen(line_prefix);
}
sbuf_printf(sb, "%02x ", smp_request[i]);
cur_len = cur_len - 3;
}
}
void
smp_command_sbuf(struct ccb_smpio *smpio, struct sbuf *sb,
char *line_prefix, int first_line_len, int line_len)
{
sbuf_printf(sb, "%s. ", smp_command_desc(smpio->smp_request[1]));
/*
* Acccount for the command description and the period and space
* after the command description.
*/
first_line_len -= strlen(smp_command_desc(smpio->smp_request[1])) + 2;
smp_command_decode(smpio->smp_request, smpio->smp_request_len, sb,
line_prefix, first_line_len, line_len);
}
/*
* Print SMP error output. For userland commands, we need the cam_device
* structure so we can get the path information from the CCB.
*/
#ifdef _KERNEL
void
smp_error_sbuf(struct ccb_smpio *smpio, struct sbuf *sb)
#else /* !_KERNEL*/
void
smp_error_sbuf(struct cam_device *device, struct ccb_smpio *smpio,
struct sbuf *sb)
#endif /* _KERNEL/!_KERNEL */
{
char path_str[64];
#ifdef _KERNEL
xpt_path_string(smpio->ccb_h.path, path_str, sizeof(path_str));
#else
cam_path_string(device, path_str, sizeof(path_str));
#endif
smp_command_sbuf(smpio, sb, path_str, 80 - strlen(path_str), 80);
sbuf_printf(sb, "\n");
sbuf_cat(sb, path_str);
sbuf_printf(sb, "SMP Error: %s (0x%x)\n",
smp_error_desc(smpio->smp_response[2]),
smpio->smp_response[2]);
}
/*
* Decode the SMP REPORT GENERAL response. The format is current as of SPL
* Revision 7, but the parsing should be backward compatible for older
* versions of the spec.
*/
void
smp_report_general_sbuf(struct smp_report_general_response *response,
int response_len, struct sbuf *sb)
{
sbuf_printf(sb, "Report General\n");
sbuf_printf(sb, "Response Length: %d words (%d bytes)\n",
response->response_len,
response->response_len * SMP_WORD_LEN);
sbuf_printf(sb, "Expander Change Count: %d\n",
scsi_2btoul(response->expander_change_count));
sbuf_printf(sb, "Expander Route Indexes: %d\n",
scsi_2btoul(response->expander_route_indexes));
sbuf_printf(sb, "Long Response: %s\n",
smp_yesno(response->long_response &
SMP_RG_LONG_RESPONSE));
sbuf_printf(sb, "Number of Phys: %d\n", response->num_phys);
sbuf_printf(sb, "Table to Table Supported: %s\n",
smp_yesno(response->config_bits0 &
SMP_RG_TABLE_TO_TABLE_SUP));
sbuf_printf(sb, "Zone Configuring: %s\n",
smp_yesno(response->config_bits0 &
SMP_RG_ZONE_CONFIGURING));
sbuf_printf(sb, "Self Configuring: %s\n",
smp_yesno(response->config_bits0 &
SMP_RG_SELF_CONFIGURING));
sbuf_printf(sb, "STP Continue AWT: %s\n",
smp_yesno(response->config_bits0 &
SMP_RG_STP_CONTINUE_AWT));
sbuf_printf(sb, "Open Reject Retry Supported: %s\n",
smp_yesno(response->config_bits0 &
SMP_RG_OPEN_REJECT_RETRY_SUP));
sbuf_printf(sb, "Configures Others: %s\n",
smp_yesno(response->config_bits0 &
SMP_RG_CONFIGURES_OTHERS));
sbuf_printf(sb, "Configuring: %s\n",
smp_yesno(response->config_bits0 &
SMP_RG_CONFIGURING));
sbuf_printf(sb, "Externally Configurable Route Table: %s\n",
smp_yesno(response->config_bits0 &
SMP_RG_CONFIGURING));
sbuf_printf(sb, "Enclosure Logical Identifier: 0x%016jx\n",
(uintmax_t)scsi_8btou64(response->encl_logical_id));
/*
* If the response->response_len is 0, then we don't have the
* extended information. Also, if the user didn't allocate enough
* space for the full request, don't try to parse it.
*/
if ((response->response_len == 0)
|| (response_len < (sizeof(struct smp_report_general_response) -
sizeof(response->crc))))
return;
sbuf_printf(sb, "STP Bus Inactivity Time Limit: %d\n",
scsi_2btoul(response->stp_bus_inact_time_limit));
sbuf_printf(sb, "STP Maximum Connect Time Limit: %d\n",
scsi_2btoul(response->stp_max_conn_time_limit));
sbuf_printf(sb, "STP SMP I_T Nexus Loss Time: %d\n",
scsi_2btoul(response->stp_smp_it_nexus_loss_time));
sbuf_printf(sb, "Number of Zone Groups: %d\n",
(response->config_bits1 & SMP_RG_NUM_ZONE_GROUPS_MASK) >>
SMP_RG_NUM_ZONE_GROUPS_SHIFT);
sbuf_printf(sb, "Zone Locked: %s\n",
smp_yesno(response->config_bits1 & SMP_RG_ZONE_LOCKED));
sbuf_printf(sb, "Physical Presence Supported: %s\n",
smp_yesno(response->config_bits1 & SMP_RG_PP_SUPPORTED));
sbuf_printf(sb, "Physical Presence Asserted: %s\n",
smp_yesno(response->config_bits1 & SMP_RG_PP_ASSERTED));
sbuf_printf(sb, "Zoning Supported: %s\n",
smp_yesno(response->config_bits1 &
SMP_RG_ZONING_SUPPORTED));
sbuf_printf(sb, "Zoning Enabled: %s\n",
smp_yesno(response->config_bits1 & SMP_RG_ZONING_ENABLED));
sbuf_printf(sb, "Saving: %s\n",
smp_yesno(response->config_bits2 & SMP_RG_SAVING));
sbuf_printf(sb, "Saving Zone Manager Password Supported: %s\n",
smp_yesno(response->config_bits2 &
SMP_RG_SAVING_ZM_PWD_SUP));
sbuf_printf(sb, "Saving Zone Phy Information Supported: %s\n",
smp_yesno(response->config_bits2 &
SMP_RG_SAVING_PHY_INFO_SUP));
sbuf_printf(sb, "Saving Zone Permission Table Supported: %s\n",
smp_yesno(response->config_bits2 &
SMP_RG_SAVING_ZPERM_TAB_SUP));
sbuf_printf(sb, "Saving Zoning Enabled Supported: %s\n",
smp_yesno(response->config_bits2 &
SMP_RG_SAVING_ZENABLED_SUP));
sbuf_printf(sb, "Maximum Number of Routed SAS Addresses: %d\n",
scsi_2btoul(response->max_num_routed_addrs));
sbuf_printf(sb, "Active Zone Manager SAS Address: 0x%016jx\n",
scsi_8btou64(response->active_zm_address));
sbuf_printf(sb, "Zone Inactivity Time Limit: %d\n",
scsi_2btoul(response->zone_lock_inact_time_limit));
sbuf_printf(sb, "First Enclosure Connector Element Index: %d\n",
response->first_encl_conn_el_index);
sbuf_printf(sb, "Number of Enclosure Connector Element Indexes: %d\n",
response->num_encl_conn_el_indexes);
sbuf_printf(sb, "Reduced Functionality: %s\n",
smp_yesno(response->reduced_functionality &
SMP_RG_REDUCED_FUNCTIONALITY));
sbuf_printf(sb, "Time to Reduced Functionality: %d\n",
response->time_to_reduced_func);
sbuf_printf(sb, "Initial Time to Reduced Functionality: %d\n",
response->initial_time_to_reduced_func);
sbuf_printf(sb, "Maximum Reduced Functionality Time: %d\n",
response->max_reduced_func_time);
sbuf_printf(sb, "Last Self-Configuration Status Descriptor Index: %d\n",
scsi_2btoul(response->last_sc_stat_desc_index));
sbuf_printf(sb, "Maximum Number of Storated Self-Configuration "
"Status Descriptors: %d\n",
scsi_2btoul(response->max_sc_stat_descs));
sbuf_printf(sb, "Last Phy Event List Descriptor Index: %d\n",
scsi_2btoul(response->last_phy_evl_desc_index));
sbuf_printf(sb, "Maximum Number of Stored Phy Event List "
"Descriptors: %d\n",
scsi_2btoul(response->max_stored_pel_descs));
sbuf_printf(sb, "STP Reject to Open Limit: %d\n",
scsi_2btoul(response->stp_reject_to_open_limit));
}
/*
* Decode the SMP REPORT MANUFACTURER INFORMATION response. The format is
* current as of SPL Revision 7, but the parsing should be backward
* compatible for older versions of the spec.
*/
void
smp_report_manuf_info_sbuf(struct smp_report_manuf_info_response *response,
int response_len, struct sbuf *sb)
{
char vendor[16], product[48], revision[16];
char comp_vendor[16];
sbuf_printf(sb, "Report Manufacturer Information\n");
sbuf_printf(sb, "Expander Change count: %d\n",
scsi_2btoul(response->expander_change_count));
sbuf_printf(sb, "SAS 1.1 Format: %s\n",
smp_yesno(response->sas_11_format & SMP_RMI_SAS11_FORMAT));
cam_strvis(vendor, response->vendor, sizeof(response->vendor),
sizeof(vendor));
cam_strvis(product, response->product, sizeof(response->product),
sizeof(product));
cam_strvis(revision, response->revision, sizeof(response->revision),
sizeof(revision));
sbuf_printf(sb, "<%s %s %s>\n", vendor, product, revision);
if ((response->sas_11_format & SMP_RMI_SAS11_FORMAT) == 0) {
uint8_t *curbyte;
int line_start, line_cursor;
sbuf_printf(sb, "Vendor Specific Data:\n");
/*
* Print out the bytes roughly in the style of hd(1), but
* without the extra ASCII decoding. Hexadecimal line
* numbers on the left, and 16 bytes per line, with an
* extra space after the first 8 bytes.
*
* It would be nice if this sort of thing were available
* in a library routine.
*/
for (curbyte = (uint8_t *)&response->comp_vendor, line_start= 1,
line_cursor = 0; curbyte < (uint8_t *)&response->crc;
curbyte++, line_cursor++) {
if (line_start != 0) {
sbuf_printf(sb, "%08lx ",
(unsigned long)(curbyte -
(uint8_t *)response));
line_start = 0;
line_cursor = 0;
}
sbuf_printf(sb, "%02x", *curbyte);
if (line_cursor == 15) {
sbuf_printf(sb, "\n");
line_start = 1;
} else
sbuf_printf(sb, " %s", (line_cursor == 7) ?
" " : "");
}
if (line_cursor != 16)
sbuf_printf(sb, "\n");
return;
}
cam_strvis(comp_vendor, response->comp_vendor,
sizeof(response->comp_vendor), sizeof(comp_vendor));
sbuf_printf(sb, "Component Vendor: %s\n", comp_vendor);
sbuf_printf(sb, "Component ID: %#x\n", scsi_2btoul(response->comp_id));
sbuf_printf(sb, "Component Revision: %#x\n", response->comp_revision);
sbuf_printf(sb, "Vendor Specific: 0x%016jx\n",
(uintmax_t)scsi_8btou64(response->vendor_specific));
}
/*
* Compose a SMP REPORT GENERAL request and put it into a CCB. This is
* current as of SPL Revision 7.
*/
void
smp_report_general(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
struct smp_report_general_request *request, int request_len,
uint8_t *response, int response_len, int long_response,
uint32_t timeout)
{
cam_fill_smpio(smpio,
retries,
cbfcnp,
/*flags*/CAM_DIR_BOTH,
(uint8_t *)request,
request_len - SMP_CRC_LEN,
response,
response_len,
timeout);
bzero(request, sizeof(*request));
request->frame_type = SMP_FRAME_TYPE_REQUEST;
request->function = SMP_FUNC_REPORT_GENERAL;
request->response_len = long_response ? SMP_RG_RESPONSE_LEN : 0;
request->request_len = 0;
}
/*
* Compose a SMP DISCOVER request and put it into a CCB. This is current
* as of SPL Revision 7.
*/
void
smp_discover(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
struct smp_discover_request *request, int request_len,
uint8_t *response, int response_len, int long_response,
int ignore_zone_group, int phy, uint32_t timeout)
{
cam_fill_smpio(smpio,
retries,
cbfcnp,
/*flags*/CAM_DIR_BOTH,
(uint8_t *)request,
request_len - SMP_CRC_LEN,
response,
response_len,
timeout);
bzero(request, sizeof(*request));
request->frame_type = SMP_FRAME_TYPE_REQUEST;
request->function = SMP_FUNC_DISCOVER;
request->response_len = long_response ? SMP_DIS_RESPONSE_LEN : 0;
request->request_len = long_response ? SMP_DIS_REQUEST_LEN : 0;
if (ignore_zone_group != 0)
request->ignore_zone_group |= SMP_DIS_IGNORE_ZONE_GROUP;
request->phy = phy;
}
/*
* Compose a SMP REPORT MANUFACTURER INFORMATION request and put it into a
* CCB. This is current as of SPL Revision 7.
*/
void
smp_report_manuf_info(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
struct smp_report_manuf_info_request *request,
int request_len, uint8_t *response, int response_len,
int long_response, uint32_t timeout)
{
cam_fill_smpio(smpio,
retries,
cbfcnp,
/*flags*/CAM_DIR_BOTH,
(uint8_t *)request,
request_len - SMP_CRC_LEN,
response,
response_len,
timeout);
bzero(request, sizeof(*request));
request->frame_type = SMP_FRAME_TYPE_REQUEST;
request->function = SMP_FUNC_REPORT_MANUF_INFO;
request->response_len = long_response ? SMP_RMI_RESPONSE_LEN : 0;
request->request_len = long_response ? SMP_RMI_REQUEST_LEN : 0;
}
/*
* Compose a SMP PHY CONTROL request and put it into a CCB. This is
* current as of SPL Revision 7.
*/
void
smp_phy_control(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
struct smp_phy_control_request *request, int request_len,
uint8_t *response, int response_len, int long_response,
uint32_t expected_exp_change_count, int phy, int phy_op,
int update_pp_timeout_val, uint64_t attached_device_name,
int prog_min_prl, int prog_max_prl, int slumber_partial,
int pp_timeout_value, uint32_t timeout)
{
cam_fill_smpio(smpio,
retries,
cbfcnp,
/*flags*/CAM_DIR_BOTH,
(uint8_t *)request,
request_len - SMP_CRC_LEN,
response,
response_len,
timeout);
bzero(request, sizeof(*request));
request->frame_type = SMP_FRAME_TYPE_REQUEST;
request->function = SMP_FUNC_PHY_CONTROL;
request->response_len = long_response ? SMP_PC_RESPONSE_LEN : 0;
request->request_len = long_response ? SMP_PC_REQUEST_LEN : 0;
scsi_ulto2b(expected_exp_change_count, request->expected_exp_chg_cnt);
request->phy = phy;
request->phy_operation = phy_op;
if (update_pp_timeout_val != 0)
request->update_pp_timeout |= SMP_PC_UPDATE_PP_TIMEOUT;
scsi_u64to8b(attached_device_name, request->attached_device_name);
request->prog_min_phys_link_rate = (prog_min_prl <<
SMP_PC_PROG_MIN_PL_RATE_SHIFT) & SMP_PC_PROG_MIN_PL_RATE_MASK;
request->prog_max_phys_link_rate = (prog_max_prl <<
SMP_PC_PROG_MAX_PL_RATE_SHIFT) & SMP_PC_PROG_MAX_PL_RATE_MASK;
request->config_bits0 = slumber_partial;
request->pp_timeout_value = pp_timeout_value;
}

520
sys/cam/scsi/smp_all.h Normal file
View file

@ -0,0 +1,520 @@
/*-
* Copyright (c) 2010 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/users/kenm/FreeBSD-test/sys/cam/scsi/smp_all.h#4 $
* $FreeBSD$
*/
/*
* Serial Management Protocol definitions.
*/
#ifndef _SCSI_SMP_ALL_H
#define _SCSI_SMP_ALL_H 1
#define SMP_FRAME_TYPE_REQUEST 0x40
#define SMP_FRAME_TYPE_RESPONSE 0x41
#define SMP_WORD_LEN 4
#define SMP_CRC_LEN 4
/*
* SMP Functions (current as of SPL Revision 7)
*/
/* 0x00 to 0x7f: SMP input functions */
/* 0x00 to 0x0f: General SMP input functions */
#define SMP_FUNC_REPORT_GENERAL 0x00
#define SMP_FUNC_REPORT_MANUF_INFO 0x01
#define SMP_FUNC_REPORT_SC_STATUS 0x03
#define SMP_FUNC_REPORT_ZONE_PERM_TBL 0x04
#define SMP_FUNC_REPORT_ZONE_MAN_PWD 0x05
#define SMP_FUNC_REPORT_BROADCAST 0x06
/* 0x10 to 0x1f: Phy-based SMP input functions */
#define SMP_FUNC_DISCOVER 0x10
#define SMP_FUNC_REPORT_PHY_ERR_LOG 0x11
#define SMP_FUNC_REPORT_PHY_SATA 0x12
#define SMP_FUNC_REPORT_ROUTE_INFO 0x13
#define SMP_FUNC_REPORT_PHY_EVENT 0x14
/* 0x20 to 0x2f: Descriptor list-based SMP input functions */
#define SMP_FUNC_DISCOVER_LIST 0x20
#define SMP_FUNC_REPORT_PHY_EVENT_LIST 0x21
#define SMP_FUNC_REPORT_EXP_RTL 0x22
/* 0x30 to 0x3f: Reserved for SMP input functions */
/* 0x40 to 0x7f: Vendor specific */
/* 0x80 to 0xff: SMP output functions */
/* 0x80 to 0x8f: General SMP output functions */
#define SMP_FUNC_CONFIG_GENERAL 0x80
#define SMP_FUNC_ENABLE_DISABLE_ZONING 0x81
#define SMP_FUNC_ZONED_BROADCAST 0x85
#define SMP_FUNC_ZONE_LOCK 0x86
#define SMP_FUNC_ZONE_ACTIVATE 0x87
#define SMP_FUNC_ZONE_UNLOCK 0x88
#define SMP_FUNC_CONFIG_ZM_PWD 0x89
#define SMP_FUNC_CONFIG_ZONE_PHY_INFO 0x8a
#define SMP_FUNC_CONFIG_ZONE_PERM_TBL 0x8b
/* 0x90 to 0x9f: Phy-based SMP output functions */
#define SMP_FUNC_CONFIG_ROUTE_INFO 0x90
#define SMP_FUNC_PHY_CONTROL 0x91
#define SMP_FUNC_PHY_TEST_FUNC 0x92
#define SMP_FUNC_CONFIG_PHY_EVENT 0x93
/* 0xa0 to 0xbf: Reserved for SMP output functions */
/* 0xc0 to 0xff: Vendor specific */
/*
* Function Results (current as of SPL Revision 7)
*/
#define SMP_FR_ACCEPTED 0x00
#define SMP_FR_UNKNOWN_FUNC 0x01
#define SMP_FR_FUNCTION_FAILED 0x02
#define SMP_FR_INVALID_REQ_FRAME_LEN 0x03
#define SMP_FR_INVALID_EXP_CHG_CNT 0x04
#define SMP_FR_BUSY 0x05
#define SMP_FR_INCOMPLETE_DESC_LIST 0x06
#define SMP_FR_PHY_DOES_NOT_EXIST 0x10
#define SMP_FR_INDEX_DOES_NOT_EXIST 0x11
#define SMP_FR_PHY_DOES_NOT_SUP_SATA 0x12
#define SMP_FR_UNKNOWN_PHY_OP 0x13
#define SMP_FR_UNKNOWN_PHY_TEST_FUNC 0x14
#define SMP_FR_PHY_TEST_FUNC_INPROG 0x15
#define SMP_FR_PHY_VACANT 0x16
#define SMP_FR_UNKNOWN_PHY_EVENT_SRC 0x17
#define SMP_FR_UNKNOWN_DESC_TYPE 0x18
#define SMP_FR_UNKNOWN_PHY_FILTER 0x19
#define SMP_FR_AFFILIATION_VIOLATION 0x1a
#define SMP_FR_SMP_ZONE_VIOLATION 0x20
#define SMP_FR_NO_MGMT_ACCESS_RIGHTS 0x21
#define SMP_FR_UNKNOWN_ED_ZONING_VAL 0x22
#define SMP_FR_ZONE_LOCK_VIOLATION 0x23
#define SMP_FR_NOT_ACTIVATED 0x24
#define SMP_FR_ZG_OUT_OF_RANGE 0x25
#define SMP_FR_NO_PHYS_PRESENCE 0x26
#define SMP_FR_SAVING_NOT_SUP 0x27
#define SMP_FR_SRC_ZONE_DNE 0x28
#define SMP_FR_DISABLED_PWD_NOT_SUP 0x29
/*
* REPORT GENERAL request and response, current as of SPL Revision 7.
*/
struct smp_report_general_request
{
uint8_t frame_type;
uint8_t function;
uint8_t response_len;
uint8_t request_len;
uint8_t crc[4];
};
struct smp_report_general_response
{
uint8_t frame_type;
uint8_t function;
uint8_t function_result;
uint8_t response_len;
#define SMP_RG_RESPONSE_LEN 0x11
uint8_t expander_change_count[2];
uint8_t expander_route_indexes[2];
uint8_t long_response;
#define SMP_RG_LONG_RESPONSE 0x80
uint8_t num_phys;
uint8_t config_bits0;
#define SMP_RG_TABLE_TO_TABLE_SUP 0x80
#define SMP_RG_ZONE_CONFIGURING 0x40
#define SMP_RG_SELF_CONFIGURING 0x20
#define SMP_RG_STP_CONTINUE_AWT 0x10
#define SMP_RG_OPEN_REJECT_RETRY_SUP 0x08
#define SMP_RG_CONFIGURES_OTHERS 0x04
#define SMP_RG_CONFIGURING 0x02
#define SMP_RG_EXT_CONFIG_ROUTE_TABLE 0x01
uint8_t reserved0;
uint8_t encl_logical_id[8];
uint8_t reserved1[8];
uint8_t reserved2[2];
uint8_t stp_bus_inact_time_limit[2];
uint8_t stp_max_conn_time_limit[2];
uint8_t stp_smp_it_nexus_loss_time[2];
uint8_t config_bits1;
#define SMP_RG_NUM_ZONE_GROUPS_MASK 0xc0
#define SMP_RG_NUM_ZONE_GROUPS_SHIFT 6
#define SMP_RG_ZONE_LOCKED 0x10
#define SMP_RG_PP_SUPPORTED 0x08
#define SMP_RG_PP_ASSERTED 0x04
#define SMP_RG_ZONING_SUPPORTED 0x02
#define SMP_RG_ZONING_ENABLED 0x01
uint8_t config_bits2;
#define SMP_RG_SAVING 0x10
#define SMP_RG_SAVING_ZM_PWD_SUP 0x08
#define SMP_RG_SAVING_PHY_INFO_SUP 0x04
#define SMP_RG_SAVING_ZPERM_TAB_SUP 0x02
#define SMP_RG_SAVING_ZENABLED_SUP 0x01
uint8_t max_num_routed_addrs[2];
uint8_t active_zm_address[8];
uint8_t zone_lock_inact_time_limit[2];
uint8_t reserved3[2];
uint8_t reserved4;
uint8_t first_encl_conn_el_index;
uint8_t num_encl_conn_el_indexes;
uint8_t reserved5;
uint8_t reduced_functionality;
#define SMP_RG_REDUCED_FUNCTIONALITY 0x80
uint8_t time_to_reduced_func;
uint8_t initial_time_to_reduced_func;
uint8_t max_reduced_func_time;
uint8_t last_sc_stat_desc_index[2];
uint8_t max_sc_stat_descs[2];
uint8_t last_phy_evl_desc_index[2];
uint8_t max_stored_pel_descs[2];
uint8_t stp_reject_to_open_limit[2];
uint8_t reserved6[2];
uint8_t crc[4];
};
/*
* REPORT MANUFACTURER INFORMATION request and response, current as of SPL
* Revision 7.
*/
struct smp_report_manuf_info_request
{
uint8_t frame_type;
uint8_t function;
uint8_t response_len;
uint8_t request_len;
#define SMP_RMI_REQUEST_LEN 0x00
uint8_t crc[4];
};
struct smp_report_manuf_info_response
{
uint8_t frame_type;
uint8_t function;
uint8_t function_result;
uint8_t response_len;
#define SMP_RMI_RESPONSE_LEN 0x0e
uint8_t expander_change_count[2];
uint8_t reserved0[2];
uint8_t sas_11_format;
#define SMP_RMI_SAS11_FORMAT 0x01
uint8_t reserved1[3];
uint8_t vendor[8];
uint8_t product[16];
uint8_t revision[4];
uint8_t comp_vendor[8];
uint8_t comp_id[2];
uint8_t comp_revision;
uint8_t reserved2;
uint8_t vendor_specific[8];
uint8_t crc[4];
};
/*
* DISCOVER request and response, current as of SPL Revision 7.
*/
struct smp_discover_request
{
uint8_t frame_type;
uint8_t function;
uint8_t response_len;
uint8_t request_len;
#define SMP_DIS_REQUEST_LEN 0x02
uint8_t reserved0[4];
uint8_t ignore_zone_group;
#define SMP_DIS_IGNORE_ZONE_GROUP 0x01
uint8_t phy;
uint8_t reserved1[2];
uint8_t crc[4];
};
struct smp_discover_response
{
uint8_t frame_type;
uint8_t function;
uint8_t function_result;
uint8_t response_len;
#define SMP_DIS_RESPONSE_LEN 0x20
uint8_t expander_change_count[2];
uint8_t reserved0[3];
uint8_t phy;
uint8_t reserved1[2];
uint8_t attached_device;
#define SMP_DIS_AD_TYPE_MASK 0x70
#define SMP_DIS_AD_TYPE_NONE 0x00
#define SMP_DIS_AD_TYPE_SAS_SATA 0x10
#define SMP_DIS_AD_TYPE_EXP 0x20
#define SMP_DIS_AD_TYPE_EXP_OLD 0x30
#define SMP_DIS_ATTACH_REASON_MASK 0x0f
uint8_t neg_logical_link_rate;
#define SMP_DIS_LR_MASK 0x0f
#define SMP_DIS_LR_DISABLED 0x01
#define SMP_DIS_LR_PHY_RES_PROB 0x02
#define SMP_DIS_LR_SPINUP_HOLD 0x03
#define SMP_DIS_LR_PORT_SEL 0x04
#define SMP_DIS_LR_RESET_IN_PROG 0x05
#define SMP_DIS_LR_UNSUP_PHY_ATTACHED 0x06
#define SMP_DIS_LR_G1_15GBPS 0x08
#define SMP_DIS_LR_G2_30GBPS 0x09
#define SMP_DIS_LR_G3_60GBPS 0x0a
uint8_t config_bits0;
#define SMP_DIS_ATTACHED_SSP_INIT 0x08
#define SMP_DIS_ATTACHED_STP_INIT 0x04
#define SMP_DIS_ATTACHED_SMP_INIT 0x02
#define SMP_DIS_ATTACHED_SATA_HOST 0x01
uint8_t config_bits1;
#define SMP_DIS_ATTACHED_SATA_PORTSEL 0x80
#define SMP_DIS_STP_BUFFER_TOO_SMALL 0x10
#define SMP_DIS_ATTACHED_SSP_TARG 0x08
#define SMP_DIS_ATTACHED_STP_TARG 0x04
#define SMP_DIS_ATTACHED_SMP_TARG 0x02
#define SMP_DIS_ATTACHED_SATA_DEV 0x01
uint8_t sas_address[8];
uint8_t attached_sas_address[8];
uint8_t attached_phy_id;
uint8_t config_bits2;
#define SMP_DIS_ATT_SLUMB_CAP 0x10
#define SMP_DIS_ATT_PAR_CAP 0x08
#define SMP_DIS_ATT_IN_ZPSDS_PER 0x04
#define SMP_DIS_ATT_REQ_IN_ZPSDS 0x02
#define SMP_DIS_ATT_BREAK_RPL_CAP 0x01
uint8_t reserved2[6];
uint8_t link_rate0;
#define SMP_DIS_PROG_MIN_LR_MASK 0xf0
#define SMP_DIS_PROG_MIN_LR_SHIFT 4
#define SMP_DIS_HARD_MIN_LR_MASK 0x0f
uint8_t link_rate1;
#define SMP_DIS_PROG_MAX_LR_MAX 0xf0
#define SMP_DIS_PROG_MAX_LR_SHIFT 4
#define SMP_DIS_HARD_MAX_LR_MASK 0x0f
uint8_t phy_change_count;
uint8_t pp_timeout;
#define SMP_DIS_VIRTUAL_PHY 0x80
#define SMP_DIS_PP_TIMEOUT_MASK 0x0f
uint8_t routing_attr;
uint8_t conn_type;
uint8_t conn_el_index;
uint8_t conn_phys_link;
uint8_t config_bits3;
#define SMP_DIS_PHY_POW_COND_MASK 0xc0
#define SMP_DIS_PHY_POW_COND_SHIFT 6
#define SMP_DIS_SAS_SLUMB_CAP 0x08
#define SMP_DIS_SAS_PART_CAP 0x04
#define SMP_DIS_SATA_SLUMB_CAP 0x02
#define SMP_DIS_SATA_PART_CAP 0x01
uint8_t config_bits4;
#define SMP_DIS_SAS_SLUMB_ENB 0x08
#define SMP_DIS_SAS_PART_ENB 0x04
#define SMP_DIS_SATA_SLUMB_ENB 0x02
#define SMP_DIS_SATA_PART_ENB 0x01
uint8_t vendor_spec[2];
uint8_t attached_dev_name[8];
uint8_t config_bits5;
#define SMP_DIS_REQ_IN_ZPSDS_CHG 0x40
#define SMP_DIS_IN_ZPSDS_PER 0x20
#define SMP_DIS_REQ_IN_ZPSDS 0x10
#define SMP_DIS_ZG_PER 0x04
#define SMP_DIS_IN_ZPSDS 0x02
#define SMP_DIS_ZONING_ENB 0x01
uint8_t reserved3[2];
uint8_t zone_group;
uint8_t self_config_status;
uint8_t self_config_levels_comp;
uint8_t reserved4[2];
uint8_t self_config_sas_addr[8];
uint8_t prog_phy_cap[4];
uint8_t current_phy_cap[4];
uint8_t attached_phy_cap[4];
uint8_t reserved5[6];
uint8_t neg_phys_link_rate;
#define SMP_DIS_REASON_MASK 0xf0
#define SMP_DIS_REASON_SHIFT 4
#define SMP_DIS_PHYS_LR_MASK 0x0f
uint8_t config_bits6;
#define SMP_DIS_OPTICAL_MODE_ENB 0x04
#define SMP_DIS_NEG_SSC 0x02
#define SMP_DIS_HW_MUX_SUP 0x01
uint8_t config_bits7;
#define SMP_DIS_DEF_IN_ZPSDS_PER 0x20
#define SMP_DIS_DEF_REQ_IN_ZPSDS 0x10
#define SMP_DIS_DEF_ZG_PER 0x04
#define SMP_DIS_DEF_ZONING_ENB 0x01
uint8_t reserved6;
uint8_t reserved7;
uint8_t default_zone_group;
uint8_t config_bits8;
#define SMP_DIS_SAVED_IN_ZPSDS_PER 0x20
#define SMP_DIS_SAVED_REQ_IN_SPSDS 0x10
#define SMP_DIS_SAVED_ZG_PER 0x04
#define SMP_DIS_SAVED_ZONING_ENB 0x01
uint8_t reserved8;
uint8_t reserved9;
uint8_t saved_zone_group;
uint8_t config_bits9;
#define SMP_DIS_SHADOW_IN_ZPSDS_PER 0x20
#define SMP_DIS_SHADOW_IN_REQ_IN_ZPSDS 0x10
#define SMP_DIS_SHADOW_ZG_PER 0x04
uint8_t reserved10;
uint8_t reserved11;
uint8_t shadow_zone_group;
uint8_t device_slot_num;
uint8_t device_slot_group_num;
uint8_t device_slot_group_out_conn[6];
uint8_t stp_buffer_size[2];
uint8_t reserved12;
uint8_t reserved13;
uint8_t crc[4];
};
/*
* PHY CONTROL request and response. Current as of SPL Revision 7.
*/
struct smp_phy_control_request
{
uint8_t frame_type;
uint8_t function;
uint8_t response_len;
#define SMP_PC_RESPONSE_LEN 0x00
uint8_t request_len;
#define SMP_PC_REQUEST_LEN 0x09
uint8_t expected_exp_chg_cnt[2];
uint8_t reserved0[3];
uint8_t phy;
uint8_t phy_operation;
#define SMP_PC_PHY_OP_NOP 0x00
#define SMP_PC_PHY_OP_LINK_RESET 0x01
#define SMP_PC_PHY_OP_HARD_RESET 0x02
#define SMP_PC_PHY_OP_DISABLE 0x03
#define SMP_PC_PHY_OP_CLEAR_ERR_LOG 0x05
#define SMP_PC_PHY_OP_CLEAR_AFFILIATON 0x06
#define SMP_PC_PHY_OP_TRANS_SATA_PSS 0x07
#define SMP_PC_PHY_OP_CLEAR_STP_ITN_LS 0x08
#define SMP_PC_PHY_OP_SET_ATT_DEV_NAME 0x09
uint8_t update_pp_timeout;
#define SMP_PC_UPDATE_PP_TIMEOUT 0x01
uint8_t reserved1[12];
uint8_t attached_device_name[8];
uint8_t prog_min_phys_link_rate;
#define SMP_PC_PROG_MIN_PL_RATE_MASK 0xf0
#define SMP_PC_PROG_MIN_PL_RATE_SHIFT 4
uint8_t prog_max_phys_link_rate;
#define SMP_PC_PROG_MAX_PL_RATE_MASK 0xf0
#define SMP_PC_PROG_MAX_PL_RATE_SHIFT 4
uint8_t config_bits0;
#define SMP_PC_SP_NC 0x00
#define SMP_PC_SP_DISABLE 0x02
#define SMP_PC_SP_ENABLE 0x01
#define SMP_PC_SAS_SLUMBER_NC 0x00
#define SMP_PC_SAS_SLUMBER_DISABLE 0x80
#define SMP_PC_SAS_SLUMBER_ENABLE 0x40
#define SMP_PC_SAS_SLUMBER_MASK 0xc0
#define SMP_PC_SAS_SLUMBER_SHIFT 6
#define SMP_PC_SAS_PARTIAL_NC 0x00
#define SMP_PC_SAS_PARTIAL_DISABLE 0x20
#define SMP_PC_SAS_PARTIAL_ENABLE 0x10
#define SMP_PC_SAS_PARTIAL_MASK 0x30
#define SMP_PC_SAS_PARTIAL_SHIFT 4
#define SMP_PC_SATA_SLUMBER_NC 0x00
#define SMP_PC_SATA_SLUMBER_DISABLE 0x08
#define SMP_PC_SATA_SLUMBER_ENABLE 0x04
#define SMP_PC_SATA_SLUMBER_MASK 0x0c
#define SMP_PC_SATA_SLUMBER_SHIFT 2
#define SMP_PC_SATA_PARTIAL_NC 0x00
#define SMP_PC_SATA_PARTIAL_DISABLE 0x02
#define SMP_PC_SATA_PARTIAL_ENABLE 0x01
#define SMP_PC_SATA_PARTIAL_MASK 0x03
#define SMP_PC_SATA_PARTIAL_SHIFT 0
uint8_t reserved2;
uint8_t pp_timeout_value;
#define SMP_PC_PP_TIMEOUT_MASK 0x0f
uint8_t reserved3[3];
uint8_t crc[4];
};
struct smp_phy_control_response
{
uint8_t frame_type;
uint8_t function;
uint8_t function_result;
uint8_t response_len;
#define SMP_PC_RESPONSE_LEN 0x00
uint8_t crc[4];
};
__BEGIN_DECLS
const char *smp_error_desc(int function_result);
const char *smp_command_desc(uint8_t cmd_num);
void smp_command_decode(uint8_t *smp_request, int request_len, struct sbuf *sb,
char *line_prefix, int first_line_len, int line_len);
void smp_command_sbuf(struct ccb_smpio *smpio, struct sbuf *sb,
char *line_prefix, int first_line_len, int line_len);
#ifdef _KERNEL
void smp_error_sbuf(struct ccb_smpio *smpio, struct sbuf *sb);
#else /* !_KERNEL*/
void smp_error_sbuf(struct cam_device *device, struct ccb_smpio *smpio,
struct sbuf *sb);
#endif /* _KERNEL/!_KERNEL */
void smp_report_general_sbuf(struct smp_report_general_response *response,
int response_len, struct sbuf *sb);
void smp_report_manuf_info_sbuf(struct smp_report_manuf_info_response *response,
int response_len, struct sbuf *sb);
void smp_report_general(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
struct smp_report_general_request *request,
int request_len, uint8_t *response, int response_len,
int long_response, uint32_t timeout);
void smp_discover(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
struct smp_discover_request *request, int request_len,
uint8_t *response, int response_len, int long_response,
int ignore_zone_group, int phy, uint32_t timeout);
void smp_report_manuf_info(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
struct smp_report_manuf_info_request *request,
int request_len, uint8_t *response, int response_len,
int long_response, uint32_t timeout);
void smp_phy_control(struct ccb_smpio *smpio, uint32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
struct smp_phy_control_request *request, int request_len,
uint8_t *response, int response_len, int long_response,
uint32_t expected_exp_change_count, int phy, int phy_op,
int update_pp_timeout_val, uint64_t attached_device_name,
int prog_min_prl, int prog_max_prl, int slumber_partial,
int pp_timeout_value, uint32_t timeout);
__END_DECLS
#endif /*_SCSI_SMP_ALL_H*/

View file

@ -105,7 +105,10 @@ acl_from_aces(struct acl *aclp, const ace_t *aces, int nentries)
struct acl_entry *entry;
const ace_t *ace;
KASSERT(nentries >= 1, ("empty ZFS ACL"));
if (nentries < 1) {
printf("acl_from_aces: empty ZFS ACL; returning EINVAL.\n");
return (EINVAL);
}
if (nentries > ACL_MAX_ENTRIES) {
/*

View file

@ -192,7 +192,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
ASSERT(length <= DMU_MAX_ACCESS);
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT;
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
dbuf_flags |= DB_RF_NOPREFETCH;
@ -209,6 +209,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
os_dsl_dataset->ds_object,
(longlong_t)dn->dn_object, dn->dn_datablksz,
(longlong_t)offset, (longlong_t)length);
rw_exit(&dn->dn_struct_rwlock);
return (EIO);
}
nblks = 1;
@ -231,9 +232,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
}
/* initiate async i/o */
if (read) {
rw_exit(&dn->dn_struct_rwlock);
(void) dbuf_read(db, zio, dbuf_flags);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
}
dbp[i] = &db->db;
}
@ -540,7 +539,7 @@ dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
{
dnode_t *dn;
dmu_buf_t **dbp;
int numbufs, i, err;
int numbufs, err;
err = dnode_hold(os->os, object, FTAG, &dn);
if (err)
@ -551,7 +550,7 @@ dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
* block. If we ever do the tail block optimization, we will need to
* handle that here as well.
*/
if (dn->dn_datablkshift == 0) {
if (dn->dn_maxblkid == 0) {
int newsz = offset > dn->dn_datablksz ? 0 :
MIN(size, dn->dn_datablksz - offset);
bzero((char *)buf + newsz, size - newsz);
@ -560,6 +559,7 @@ dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
while (size > 0) {
uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
int i;
/*
* NB: we could do this block-at-a-time, but it's nice

View file

@ -3627,6 +3627,14 @@ zfsdev_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
uint_t vec;
int error;
/*
* Check if we have sufficient kernel memory allocated
* for the zfs_cmd_t request. Bail out if not so we
* will not access undefined memory region.
*/
if (IOCPARM_LEN(cmd) < sizeof(zfs_cmd_t))
return (EINVAL);
vec = ZFS_IOC(cmd);
if (vec >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0]))

View file

@ -1218,12 +1218,14 @@ zfs_mount(vfs_t *vfsp)
error = zfs_domount(vfsp, osname);
PICKUP_GIANT();
#ifdef sun
/*
* Add an extra VFS_HOLD on our parent vfs so that it can't
* disappear due to a forced unmount.
*/
if (error == 0 && ((zfsvfs_t *)vfsp->vfs_data)->z_issnap)
VFS_HOLD(mvp->v_vfsp);
#endif /* sun */
out:
return (error);
@ -1555,7 +1557,7 @@ zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
int err;
/*
* zfs_zget() can't operate on virtual entires like .zfs/ or
* zfs_zget() can't operate on virtual entries like .zfs/ or
* .zfs/snapshot/ directories, that's why we return EOPNOTSUPP.
* This will make NFS to switch to LOOKUP instead of using VGET.
*/
@ -1766,12 +1768,14 @@ zfs_freevfs(vfs_t *vfsp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
#ifdef sun
/*
* If this is a snapshot, we have an extra VFS_HOLD on our parent
* from zfs_mount(). Release it here.
*/
if (zfsvfs->z_issnap)
VFS_RELE(zfsvfs->z_parent->z_vfs);
#endif /* sun */
zfsvfs_free(zfsvfs);

View file

@ -67,6 +67,7 @@
#include <sys/sf_buf.h>
#include <sys/sched.h>
#include <sys/acl.h>
#include <vm/vm_pageout.h>
/*
* Programming rules.
@ -481,7 +482,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
uiomove_fromphys(&m, off, bytes, uio);
VM_OBJECT_LOCK(obj);
vm_page_wakeup(m);
} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
} else if (uio->uio_segflg == UIO_NOCOPY) {
/*
* The code below is here to make sendfile(2) work
* correctly with ZFS. As pointed out by ups@
@ -491,7 +492,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
*/
KASSERT(off == 0,
("unexpected offset in mappedread for sendfile"));
if ((m->oflags & VPO_BUSY) != 0) {
if (m != NULL && (m->oflags & VPO_BUSY) != 0) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
@ -501,8 +502,17 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
vm_page_flag_set(m, PG_REFERENCED);
vm_page_sleep(m, "zfsmrb");
goto again;
} else if (m == NULL) {
m = vm_page_alloc(obj, OFF_TO_IDX(start),
VM_ALLOC_NOBUSY | VM_ALLOC_NORMAL);
if (m == NULL) {
VM_OBJECT_UNLOCK(obj);
VM_WAIT;
VM_OBJECT_LOCK(obj);
goto again;
}
}
vm_page_busy(m);
vm_page_io_start(m);
VM_OBJECT_UNLOCK(obj);
if (dirbytes > 0) {
error = dmu_read_uio(os, zp->z_id, uio,
@ -520,7 +530,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
VM_OBJECT_LOCK(obj);
if (error == 0)
m->valid = VM_PAGE_BITS_ALL;
vm_page_wakeup(m);
vm_page_io_finish(m);
if (error == 0) {
uio->uio_resid -= bytes;
uio->uio_offset += bytes;
@ -1031,6 +1041,10 @@ zfs_get_done(dmu_buf_t *db, void *vzgd)
VFS_UNLOCK_GIANT(vfslocked);
}
#ifdef DEBUG
static int zil_fault_io = 0;
#endif
/*
* Get data to generate a TX_WRITE intent log record.
*/
@ -1112,7 +1126,21 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
zgd->zgd_rl = rl;
zgd->zgd_zilog = zfsvfs->z_log;
zgd->zgd_bp = &lr->lr_blkptr;
VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db));
#ifdef DEBUG
if (zil_fault_io) {
error = EIO;
zil_fault_io = 0;
} else {
error = dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db);
}
#else
error = dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db);
#endif
if (error != 0) {
kmem_free(zgd, sizeof (zgd_t));
goto out;
}
ASSERT(boff == db->db_offset);
lr->lr_blkoff = off - boff;
error = dmu_sync(zio, db, &lr->lr_blkptr,

View file

@ -933,6 +933,10 @@ zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
}
error = zilog->zl_get_data(
itx->itx_private, lr, dbuf, lwb->lwb_zio);
if (error == EIO) {
txg_wait_synced(zilog->zl_dmu_pool, txg);
return (lwb);
}
if (error) {
ASSERT(error == ENOENT || error == EEXIST ||
error == EALREADY);

View file

@ -240,6 +240,11 @@ struct prpsinfo32 {
char pr_psargs[PRARGSZ+1];
};
struct thrmisc32 {
char pr_tname[MAXCOMLEN+1];
u_int _pad;
};
struct mq_attr32 {
int mq_flags;
int mq_maxmsg;

View file

@ -2365,7 +2365,8 @@ freebsd32_nmount(struct thread *td,
* Filter out MNT_ROOTFS. We do not want clients of nmount() in
* userspace to set this flag, but we must filter it out if we want
* MNT_UPDATE on the root file system to work.
* MNT_ROOTFS should only be set in the kernel in vfs_mountroot_try().
* MNT_ROOTFS should only be set by the kernel when mounting its
* root file system.
*/
uap->flags &= ~MNT_ROOTFS;

View file

@ -29,6 +29,9 @@
* $FreeBSD$
*/
#ifndef _COMPAT_IA32_IA32_SIGNAL_H
#define _COMPAT_IA32_IA32_SIGNAL_H
struct ia32_mcontext {
u_int32_t mc_onstack; /* XXX - sigcontext compat. */
u_int32_t mc_gs; /* machine state (struct trapframe) */
@ -188,3 +191,5 @@ extern int sz_freebsd4_ia32_sigcode;
extern void ia32_sendsig(sig_t, struct ksiginfo *, sigset_t *);
extern void ia32_setregs(struct thread *td, struct image_params *imgp,
u_long stack);
#endif

View file

@ -28,6 +28,9 @@
* $FreeBSD$
*/
#ifndef _COMPAT_IA32_IA32_UTIL_H
#define _COMPAT_IA32_IA32_UTIL_H
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
@ -51,3 +54,5 @@
struct syscall_args;
int ia32_fetch_syscall_args(struct thread *td, struct syscall_args *sa);
void ia32_set_syscall_retval(struct thread *, int);
#endif

View file

@ -276,11 +276,12 @@ linprocfs_docpuinfo(PFS_FILL_ARGS)
sbuf_printf(sb,
"processor\t: %d\n"
"vendor_id\t: %.20s\n"
"cpu family\t: %d\n"
"model\t\t: %d\n"
"cpu family\t: %u\n"
"model\t\t: %u\n"
"model name\t: %s\n"
"stepping\t: %d\n\n",
i, cpu_vendor, class, cpu, model, cpu_id & 0xf);
"stepping\t: %u\n\n",
i, cpu_vendor, CPUID_TO_FAMILY(cpu_id),
CPUID_TO_MODEL(cpu_id), model, cpu_id & CPUID_STEPPING);
/* XXX per-cpu vendor / class / model / id? */
}

View file

@ -182,8 +182,8 @@ linsysfs_run_bus(device_t dev, struct pfs_node *dir, struct pfs_node *scsi, char
sprintf(host, "host%d", host_number++);
strcat(new_path, "/");
strcat(new_path, host);
sub_dir = pfs_create_dir(dir,
host, NULL, NULL, NULL, 0);
pfs_create_dir(dir, host,
NULL, NULL, NULL, 0);
scsi_host = malloc(sizeof(
struct scsi_host_queue),
M_DEVBUF, M_NOWAIT);

View file

@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sx.h>
#include <sys/proc.h>
#include <sys/syscallsubr.h>
#include <sys/sysent.h>
#include <sys/sysproto.h>
#include <sys/unistd.h>
@ -155,7 +156,7 @@ void
linux_proc_exit(void *arg __unused, struct proc *p)
{
struct linux_emuldata *em;
int error;
int error, shared_flags, shared_xstat;
struct thread *td = FIRST_THREAD_IN_PROC(p);
int *child_clear_tid;
struct proc *q, *nq;
@ -187,6 +188,8 @@ linux_proc_exit(void *arg __unused, struct proc *p)
}
EMUL_SHARED_WLOCK(&emul_shared_lock);
shared_flags = em->shared->flags;
shared_xstat = em->shared->xstat;
LIST_REMOVE(em, threads);
em->shared->refs--;
@ -196,6 +199,9 @@ linux_proc_exit(void *arg __unused, struct proc *p)
} else
EMUL_SHARED_WUNLOCK(&emul_shared_lock);
if ((shared_flags & EMUL_SHARED_HASXSTAT) != 0)
p->p_xstat = shared_xstat;
if (child_clear_tid != NULL) {
struct linux_sys_futex_args cup;
int null = 0;
@ -257,6 +263,10 @@ linux_proc_exec(void *arg __unused, struct proc *p, struct image_params *imgp)
if (__predict_false(imgp->sysent == &elf_linux_sysvec
&& p->p_sysent != &elf_linux_sysvec))
linux_proc_init(FIRST_THREAD_IN_PROC(p), p->p_pid, 0);
if (__predict_false((p->p_sysent->sv_flags & SV_ABI_MASK) ==
SV_ABI_LINUX))
/* Kill threads regardless of imgp->sysent value */
linux_kill_threads(FIRST_THREAD_IN_PROC(p), SIGKILL);
if (__predict_false(imgp->sysent != &elf_linux_sysvec
&& p->p_sysent == &elf_linux_sysvec)) {
struct linux_emuldata *em;
@ -334,3 +344,29 @@ linux_set_tid_address(struct thread *td, struct linux_set_tid_address_args *args
EMUL_UNLOCK(&emul_lock);
return 0;
}
void
linux_kill_threads(struct thread *td, int sig)
{
struct linux_emuldata *em, *td_em, *tmp_em;
struct proc *sp;
td_em = em_find(td->td_proc, EMUL_DONTLOCK);
KASSERT(td_em != NULL, ("linux_kill_threads: emuldata not found.\n"));
EMUL_SHARED_RLOCK(&emul_shared_lock);
LIST_FOREACH_SAFE(em, &td_em->shared->threads, threads, tmp_em) {
if (em->pid == td_em->pid)
continue;
sp = pfind(em->pid);
if ((sp->p_flag & P_WEXIT) == 0)
psignal(sp, sig);
PROC_UNLOCK(sp);
#ifdef DEBUG
printf(LMSG("linux_kill_threads: kill PID %d\n"), em->pid);
#endif
}
EMUL_SHARED_RUNLOCK(&emul_shared_lock);
}

View file

@ -31,8 +31,12 @@
#ifndef _LINUX_EMUL_H_
#define _LINUX_EMUL_H_
#define EMUL_SHARED_HASXSTAT 0x01
struct linux_emuldata_shared {
int refs;
int flags;
int xstat;
pid_t group_pid;
LIST_HEAD(, linux_emuldata) threads; /* head of list of linux threads */
@ -76,6 +80,7 @@ int linux_proc_init(struct thread *, pid_t, int);
void linux_proc_exit(void *, struct proc *);
void linux_schedtail(void *, struct proc *);
void linux_proc_exec(void *, struct proc *, struct image_params *);
void linux_kill_threads(struct thread *, int);
extern struct sx emul_shared_lock;
extern struct mtx emul_lock;

View file

@ -416,7 +416,7 @@ futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr)
int
linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
{
int op_ret, val, ret, nrwake;
int clockrt, nrwake, op_ret, ret, val;
struct linux_emuldata *em;
struct waiting_proc *wp;
struct futex *f, *f2 = NULL;
@ -429,7 +429,19 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
* in most cases (ie. when futexes are not shared on file descriptor
* or between different processes.).
*/
args->op = (args->op & ~LINUX_FUTEX_PRIVATE_FLAG);
args->op = args->op & ~LINUX_FUTEX_PRIVATE_FLAG;
/*
* Currently support for switching between CLOCK_MONOTONIC and
* CLOCK_REALTIME is not present. However Linux forbids the use of
* FUTEX_CLOCK_REALTIME with any op except FUTEX_WAIT_BITSET and
* FUTEX_WAIT_REQUEUE_PI.
*/
clockrt = args->op & LINUX_FUTEX_CLOCK_REALTIME;
args->op = args->op & ~LINUX_FUTEX_CLOCK_REALTIME;
if (clockrt && args->op != LINUX_FUTEX_WAIT_BITSET &&
args->op != LINUX_FUTEX_WAIT_REQUEUE_PI)
return (ENOSYS);
switch (args->op) {
case LINUX_FUTEX_WAIT:
@ -612,14 +624,23 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
case LINUX_FUTEX_LOCK_PI:
/* not yet implemented */
linux_msg(td,
"linux_sys_futex: "
"op LINUX_FUTEX_LOCK_PI not implemented\n");
return (ENOSYS);
case LINUX_FUTEX_UNLOCK_PI:
/* not yet implemented */
linux_msg(td,
"linux_sys_futex: "
"op LINUX_FUTEX_UNLOCK_PI not implemented\n");
return (ENOSYS);
case LINUX_FUTEX_TRYLOCK_PI:
/* not yet implemented */
linux_msg(td,
"linux_sys_futex: "
"op LINUX_FUTEX_TRYLOCK_PI not implemented\n");
return (ENOSYS);
case LINUX_FUTEX_REQUEUE:
@ -632,15 +653,30 @@ linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
*/
em = em_find(td->td_proc, EMUL_DONTLOCK);
if (em->used_requeue == 0) {
printf("linux(%s (%d)) sys_futex: "
"unsupported futex_requeue op\n",
td->td_proc->p_comm, td->td_proc->p_pid);
em->used_requeue = 1;
linux_msg(td,
"linux_sys_futex: "
"unsupported futex_requeue op\n");
em->used_requeue = 1;
}
return (EINVAL);
case LINUX_FUTEX_WAIT_BITSET:
/* not yet implemented */
linux_msg(td,
"linux_sys_futex: "
"op FUTEX_WAIT_BITSET not implemented\n");
return (ENOSYS);
case LINUX_FUTEX_WAIT_REQUEUE_PI:
/* not yet implemented */
linux_msg(td,
"linux_sys_futex: "
"op FUTEX_WAIT_REQUEUE_PI not implemented\n");
return (ENOSYS);
default:
printf("linux_sys_futex: unknown op %d\n", args->op);
linux_msg(td,
"linux_sys_futex: unknown op %d\n", args->op);
return (ENOSYS);
}
@ -665,7 +701,7 @@ linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args
em->robust_futexes = args->head;
EMUL_UNLOCK(&emul_lock);
return (0);
return (0);
}
int
@ -683,7 +719,7 @@ linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args
if (!args->pid) {
em = em_find(td->td_proc, EMUL_DONTLOCK);
head = em->robust_futexes;
head = em->robust_futexes;
} else {
struct proc *p;
@ -693,14 +729,14 @@ linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args
em = em_find(p, EMUL_DONTLOCK);
/* XXX: ptrace? */
if (priv_check(td, PRIV_CRED_SETUID) ||
if (priv_check(td, PRIV_CRED_SETUID) ||
priv_check(td, PRIV_CRED_SETEUID) ||
p_candebug(td, p)) {
PROC_UNLOCK(p);
return (EPERM);
}
head = em->robust_futexes;
PROC_UNLOCK(p);
}

View file

@ -39,17 +39,20 @@
extern LIST_HEAD(futex_list, futex) futex_list;
extern struct mtx futex_mtx;
#define LINUX_FUTEX_WAIT 0
#define LINUX_FUTEX_WAKE 1
#define LINUX_FUTEX_FD 2 /* unused */
#define LINUX_FUTEX_REQUEUE 3
#define LINUX_FUTEX_CMP_REQUEUE 4
#define LINUX_FUTEX_WAKE_OP 5
#define LINUX_FUTEX_LOCK_PI 6
#define LINUX_FUTEX_UNLOCK_PI 7
#define LINUX_FUTEX_TRYLOCK_PI 8
#define LINUX_FUTEX_WAIT 0
#define LINUX_FUTEX_WAKE 1
#define LINUX_FUTEX_FD 2 /* unused */
#define LINUX_FUTEX_REQUEUE 3
#define LINUX_FUTEX_CMP_REQUEUE 4
#define LINUX_FUTEX_WAKE_OP 5
#define LINUX_FUTEX_LOCK_PI 6
#define LINUX_FUTEX_UNLOCK_PI 7
#define LINUX_FUTEX_TRYLOCK_PI 8
#define LINUX_FUTEX_WAIT_BITSET 9
#define LINUX_FUTEX_WAIT_REQUEUE_PI 11
#define LINUX_FUTEX_PRIVATE_FLAG 128
#define LINUX_FUTEX_CLOCK_REALTIME 256
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */

Some files were not shown because too many files have changed in this diff Show more