Merge ^/head r312207 through r312308.

This commit is contained in:
Dimitry Andric 2017-01-16 19:56:27 +00:00
commit 721fc9d8ec
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang400-import/; revision=312309
129 changed files with 1767 additions and 642 deletions

View file

@ -187,6 +187,7 @@ The following options are recognized in
.It Dv SO_LISTENQLEN Ta "get complete queue length of the socket (get only)"
.It Dv SO_LISTENINCQLEN Ta "get incomplete queue length of the socket (get only)"
.It Dv SO_USER_COOKIE Ta "set the 'so_user_cookie' value for the socket (uint32_t, set only)"
.It Dv SO_TS_CLOCK Ta "set specific format of timestamp returned by SO_TIMESTAMP"
.El
.Pp
.Dv SO_DEBUG
@ -435,7 +436,7 @@ for
.Dv SO_BINTIME .
The
.Vt cmsghdr
fields have the following values for TIMESTAMP:
fields have the following values for TIMESTAMP by default:
.Bd -literal
cmsg_len = CMSG_LEN(sizeof(struct timeval));
cmsg_level = SOL_SOCKET;
@ -450,6 +451,24 @@ and for
cmsg_type = SCM_BINTIME;
.Ed
.Pp
Additional timestamp types are available by following
.Dv SO_TIMESTAMP
with
.Dv SO_TS_CLOCK ,
which requests specific timestamp format to be returned instead of
.Dv SCM_TIMESTAMP when
.Dv SO_TIMESTAMP is enabled.
The following
.Dv SO_TS_CLOCK
values are recognized in
.Fx :
.Bl -column SO_TS_CLOCK -offset indent
.It Dv SO_TS_REALTIME_MICRO Ta "realtime (SCM_TIMESTAMP, struct timeval), default"
.It Dv SO_TS_BINTIME Ta "realtime (SCM_BINTIME, struct bintime)"
.It Dv SO_TS_REALTIME Ta "realtime (SCM_REALTIME, struct timespec)"
.It Dv SO_TS_MONOTONIC Ta "monotonic time (SCM_MONOTONIC, struct timespec)"
.El
.Pp
.Dv SO_ACCEPTCONN ,
.Dv SO_TYPE ,
.Dv SO_PROTOCOL

View file

@ -55,10 +55,7 @@ TAP_TESTS_C+= ctrig_test
TAP_TESTS_C+= exponential_test
TAP_TESTS_C+= fenv_test
TAP_TESTS_C+= fma_test
# clang 3.8.0 fails always fails this test. See: bug 208703
.if ! (${COMPILER_TYPE} == "clang" && ${COMPILER_VERSION} >= 30800)
TAP_TESTS_C+= fmaxmin_test
.endif
TAP_TESTS_C+= ilogb2_test
TAP_TESTS_C+= invtrig_test
TAP_TESTS_C+= invctrig_test

View file

@ -86,6 +86,8 @@ testall_r(long double big, long double small)
return (ok);
}
const char *comment = NULL;
/*
* Test all the functions: fmaxf, fmax, fmaxl, fminf, fmin, and fminl,
* in all rounding modes and with the arguments in different orders.
@ -107,10 +109,17 @@ testall(int testnum, long double big, long double small)
break;
}
}
printf("%sok %d - big = %.20Lg, small = %.20Lg\n",
(i == 4) ? "" : "not ", testnum, big, small);
printf("%sok %d - big = %.20Lg, small = %.20Lg%s\n",
(i == 4) ? "" : "not ", testnum, big, small,
comment == NULL ? "" : comment);
}
/* Clang 3.8.0+ fails the invariants for testcase 6, 7, 10, and 11. */
#if defined(__clang__) && \
(__clang_major__ >= 3 && __clang_minor__ >= 8 && __clang_patchlevel__ >= 0)
#define affected_by_bug_208703
#endif
int
main(int argc, char *argv[])
{
@ -122,15 +131,23 @@ main(int argc, char *argv[])
testall(3, nextafterf(42.0, INFINITY), 42.0);
testall(4, -5.0, -5.0);
testall(5, -3.0, -4.0);
#ifdef affected_by_bug_208703
comment = "# TODO: testcase 6-7 fails invariant with clang 3.8+ (bug 208703)";
#endif
testall(6, 1.0, NAN);
testall(7, INFINITY, NAN);
comment = NULL;
testall(8, INFINITY, 1.0);
testall(9, -3.0, -INFINITY);
testall(10, 3.0, -INFINITY);
#ifdef affected_by_bug_208703
comment = "# TODO: testcase 11-12 fails invariant with clang 3.8+ (bug 208703)";
#endif
testall(11, NAN, NAN);
/* This test isn't strictly required to work by C99. */
testall(12, 0.0, -0.0);
comment = NULL;
return (0);
}

View file

@ -35,8 +35,8 @@
struct Struct_Obj_Entry;
/* Return the address of the .dynamic section in the dynamic linker. */
#define rtld_dynamic(obj) \
((const Elf_Dyn *)((obj)->relocbase + (Elf_Addr)&_DYNAMIC))
Elf_Dyn *rtld_dynamic_addr(void);
#define rtld_dynamic(obj) rtld_dynamic_addr()
/* Fixup the jump slot at "where" to transfer control to "target". */
static inline Elf_Addr

View file

@ -156,4 +156,16 @@ _rtld_bind_start:
.cfi_endproc
.size _rtld_bind_start, . - _rtld_bind_start
.align 4
.globl rtld_dynamic_addr
.type rtld_dynamic_addr,@function
rtld_dynamic_addr:
.cfi_startproc
.weak _DYNAMIC
.hidden _DYNAMIC
lea _DYNAMIC(%rip),%rax
ret
.cfi_endproc
.size rtld_dynamic_addr, . - rtld_dynamic_addr
.section .note.GNU-stack,"",%progbits

View file

@ -6,7 +6,7 @@
# Packages to install into the image we're creating. This is a deliberately
# minimalist set, providing only the packages necessary to bootstrap further
# package installation as specified via EC2 user-data.
export VM_EXTRA_PACKAGES="ec2-scripts firstboot-freebsd-update firstboot-pkgs"
export VM_EXTRA_PACKAGES="ec2-scripts firstboot-freebsd-update firstboot-pkgs dual-dhclient"
# Set to a list of third-party software to enable in rc.conf(5).
export VM_RC_LIST="ec2_configinit ec2_fetchkey ec2_ephemeralswap ec2_loghostkey firstboot_freebsd_update firstboot_pkgs"
@ -39,8 +39,9 @@ vm_extra_pre_umount() {
# time; expand our filesystem to fill the disk.
echo 'growfs_enable="YES"' >> ${DESTDIR}/etc/rc.conf
# EC2 instances use DHCP to get their network configuration.
echo 'ifconfig_DEFAULT="SYNCDHCP"' >> ${DESTDIR}/etc/rc.conf
# EC2 instances use DHCP to get their network configuration. IPv6
# requires accept_rtadv.
echo 'ifconfig_DEFAULT="SYNCDHCP accept_rtadv"' >> ${DESTDIR}/etc/rc.conf
# Unless the system has been configured via EC2 user-data, the user
# will need to SSH in to do anything.
@ -51,6 +52,10 @@ vm_extra_pre_umount() {
# via EC2 user-data.
echo 'firstboot_pkgs_list="awscli"' >> ${DESTDIR}/etc/rc.conf
# Enable IPv6 on all interfaces, and use DHCP on both IPv4 and IPv6.
echo 'ipv6_activate_all_interfaces="YES"' >> ${DESTDIR}/etc/rc.conf
echo 'dhclient_program="/usr/local/sbin/dual-dhclient"' >> ${DESTDIR}/etc/rc.conf
# The EC2 console is output-only, so while printing a backtrace can
# be useful, there's no point dropping into a debugger or waiting
# for a keypress.

View file

@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 6, 2017
.Dd January 15, 2017
.Dt CAMCONTROL 8
.Os
.Sh NAME
@ -565,12 +565,12 @@ start bit set and the load/eject bit set.
Send the SCSI Start/Stop Unit (0x1B) command to the given device with the
start bit cleared and the load/eject bit set.
.It Ic rescan
Tell the kernel to scan all busses in the system (with the
Tell the kernel to scan all buses in the system (with the
.Ar all
argument), the given bus (XPT_SCAN_BUS), or bus:target:lun
(XPT_SCAN_LUN) for new devices or devices that have gone away.
The user
may specify a scan of all busses, a single bus, or a lun.
may specify a scan of all buses, a single bus, or a lun.
Scanning all luns
on a target is not supported.
.It Ic reprobe
@ -580,7 +580,7 @@ notify the upper layer,
This includes sending the SCSI READ CAPACITY command and updating
the disk size visible to the rest of the system.
.It Ic reset
Tell the kernel to reset all busses in the system (with the
Tell the kernel to reset all buses in the system (with the
.Ar all
argument) or the given bus (XPT_RESET_BUS) by issuing a SCSI bus
reset for that bus, or to reset the given bus:target:lun
@ -2557,7 +2557,7 @@ write reallocation settings, among other things.
.Pp
.Dl camcontrol rescan all
.Pp
Rescan all SCSI busses in the system for devices that have been added,
Rescan all SCSI buses in the system for devices that have been added,
removed or changed.
.Pp
.Dl camcontrol rescan 0

View file

@ -3179,8 +3179,8 @@ rescan_or_reset_bus(path_id_t bus, int rescan)
/*
* The right way to handle this is to modify the xpt so that it can
* handle a wildcarded bus in a rescan or reset CCB. At the moment
* that isn't implemented, so instead we enumerate the busses and
* send the rescan or reset to those busses in the case where the
* that isn't implemented, so instead we enumerate the buses and
* send the rescan or reset to those buses in the case where the
* given bus is -1 (wildcard). We don't send a rescan or reset
* to the xpt bus; sending a rescan to the xpt bus is effectively a
* no-op, sending a rescan to the xpt bus would result in a status of
@ -4150,7 +4150,7 @@ scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt,
u_int8_t cdb[20];
u_int8_t atacmd[12];
struct get_hook hook;
int c, data_bytes = 0;
int c, data_bytes = 0, valid_bytes;
int cdb_len = 0;
int atacmd_len = 0;
int dmacmd = 0;
@ -4454,16 +4454,20 @@ scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt,
}
}
if (cdb_len)
valid_bytes = ccb->csio.dxfer_len - ccb->csio.resid;
else
valid_bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
&& (arglist & CAM_ARG_CMD_IN)
&& (data_bytes > 0)) {
&& (valid_bytes > 0)) {
if (fd_data == 0) {
buff_decode_visit(data_ptr, data_bytes, datastr,
buff_decode_visit(data_ptr, valid_bytes, datastr,
arg_put, NULL);
fprintf(stdout, "\n");
} else {
ssize_t amt_written;
int amt_to_write = data_bytes;
int amt_to_write = valid_bytes;
u_int8_t *buf_ptr = data_ptr;
for (amt_written = 0; (amt_to_write > 0) &&
@ -4478,7 +4482,7 @@ scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt,
} else if ((amt_written == 0)
&& (amt_to_write > 0)) {
warnx("only wrote %u bytes out of %u",
data_bytes - amt_to_write, data_bytes);
valid_bytes - amt_to_write, valid_bytes);
}
}
}
@ -8950,8 +8954,8 @@ usage(int printlong)
"load send a Start Unit command to the device with the load bit set\n"
"eject send a Stop Unit command to the device with the eject bit set\n"
"reprobe update capacity information of the given device\n"
"rescan rescan all busses, the given bus, or bus:target:lun\n"
"reset reset all busses, the given bus, or bus:target:lun\n"
"rescan rescan all buses, the given bus, or bus:target:lun\n"
"reset reset all buses, the given bus, or bus:target:lun\n"
"defects read the defect list of the specified device\n"
"modepage display or edit (-e) the given mode page\n"
"cmd send the given SCSI command, may need -i or -o as well\n"

View file

@ -124,6 +124,8 @@ Atheros AR8172 PCI Express Fast Ethernet controller
Killer E2200 Gigabit Ethernet controller
.It
Killer E2400 Gigabit Ethernet controller
.It
Killer E2500 Gigabit Ethernet controller
.El
.Sh LOADER TUNABLES
Tunables can be set at the

View file

@ -29,7 +29,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd March 4, 2012
.Dd January 15, 2017
.Dt EHCI 4
.Os
.Sh NAME
@ -68,7 +68,7 @@ but can be noticed since
1.x and
.Tn USB
2.0 devices plugged in to the same
connector appear to connect to different USB busses.
connector appear to connect to different USB buses.
.Sh SEE ALSO
.Xr ohci 4 ,
.Xr uhci 4 ,

View file

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd November 17, 2014
.Dd January 15, 2017
.Dt IICBUS 4
.Os
.Sh NAME
@ -112,11 +112,11 @@ different speed.
A general purpose I2C bus, such as those found in many embedded systems,
will often support multiple bus frequencies.
.Pp
When a system supports multiple I2C busses, a different frequency can
When a system supports multiple I2C buses, a different frequency can
be configured for each bus by number, represented by the
.Va %d
in the variable names below.
Busses can be configured using any combination of device hints,
Buses can be configured using any combination of device hints,
Flattened Device Tree (FDT) data, tunables set via
.Xr loader 8 ,
or at runtime using

View file

@ -24,7 +24,7 @@
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.Dd June 7, 2012
.Dd January 15, 2017
.Dt CAM 4
.Os
.Sh NAME
@ -62,7 +62,7 @@ devices, and to utilize different
and
.Tn ATA
host adapters through host adapter drivers.
When the system probes busses, it attaches any devices it finds to the
When the system probes buses, it attaches any devices it finds to the
appropriate drivers.
The
.Xr pass 4
@ -150,7 +150,7 @@ In that case, the
will be reset to 100ms.
.El
.Pp
All devices and busses support dynamic allocation so that
All devices and buses support dynamic allocation so that
an upper number of devices and controllers does not need to be configured;
.Cd "device da"
will suffice for any number of disk drivers.
@ -297,7 +297,7 @@ see printfs for multiple debugging levels.
This allows to set the various debugging flags from a kernel config file.
.It Dv CAM_DEBUG_BUS
Specify a bus to debug.
To debug all busses, set this to -1.
To debug all buses, set this to -1.
.It Dv CAM_DEBUG_TARGET
Specify a target to debug.
To debug all targets, set this to -1.

View file

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 6, 2005
.Dd January 15, 2017
.Dt BUS_CONFIG_INTR 9
.Os
.\"
@ -45,7 +45,7 @@
The
.Fn BUS_CONFIG_INTR
method allows bus or device drivers to provide interrupt polarity and trigger
mode to parent busses.
mode to parent buses.
This typically bubbles all the way up to the root bus (e.g.\& nexus) where the
necessary actions are taken to actually program the hardware.
Since the

View file

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 6, 2005
.Dd January 15, 2017
.Dt DEVICE_ATTACH 9
.Os
.Sh NAME
@ -51,7 +51,7 @@ system resources (such as
.Xr devfs 5
entries).
.Pp
Devices which implement busses should use this method to probe for
Devices which implement buses should use this method to probe for
the existence of devices attached to the bus and add them as
children.
If this is combined with the use of

View file

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd May 13, 2004
.Dd January 15, 2017
.Dt DEVICE_IDENTIFY 9
.Os
.Sh NAME
@ -40,7 +40,7 @@
.Ft void
.Fn DEVICE_IDENTIFY "driver_t *driver" "device_t parent"
.Sh DESCRIPTION
The identify function for a device is only needed for devices on busses
The identify function for a device is only needed for devices on buses
that cannot identify their children independently, e.g.\& the ISA bus.
It is used to recognize the device (usually done by accessing non-ambiguous
registers in the hardware) and to tell the kernel about it and thus

View file

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd August 21, 2012
.Dd January 15, 2017
.Dt DRIVER_MODULE 9
.Os
.Sh NAME
@ -69,9 +69,9 @@ or
The identifier used in
.Fn DRIVER_MODULE
can be different from the driver name.
Also, the same driver identifier can exist on different busses,
Also, the same driver identifier can exist on different buses,
which is a pretty clean way of making front ends for different cards
using the same driver on the same or different busses.
using the same driver on the same or different buses.
For example, the following is allowed:
.Pp
.Fn DRIVER_MODULE foo isa foo_driver foo_devclass NULL NULL ;

View file

@ -28,14 +28,14 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 16, 1998
.Dd January 15, 2017
.Dt BUS_GENERIC_ATTACH 9
.Os
.Sh NAME
.Nm bus_generic_attach
.Nd generic implementation of
.Dv DEVICE_ATTACH
for busses
for buses
.Sh SYNOPSIS
.In sys/param.h
.In sys/bus.h

View file

@ -28,14 +28,14 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 16, 1998
.Dd January 15, 2017
.Dt BUS_GENERIC_DETACH 9
.Os
.Sh NAME
.Nm bus_generic_detach
.Nd generic implementation of
.Dv DEVICE_DETACH
for busses
for buses
.Sh SYNOPSIS
.In sys/param.h
.In sys/bus.h

View file

@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 8, 2009
.Dd January 15, 2017
.Dt BUS_GENERIC_NEW_PASS 9
.Os
.Sh NAME
@ -47,7 +47,7 @@ It first invokes the
method for any drivers whose pass level is equal to the new pass level.
Then, for each attached child device it calls
.Xr BUS_NEW_PASS 9
to rescan child busses,
to rescan child buses,
and for each unattached child device it calls
.Xr device_probe_and_attach 9 .
.Sh SEE ALSO

View file

@ -28,14 +28,14 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 16, 1998
.Dd January 15, 2017
.Dt BUS_GENERIC_PRINT_CHILD 9
.Os
.Sh NAME
.Nm bus_generic_print_child
.Nd generic implementation of
.Dv DEVICE_PRINT_CHILD
for busses
for buses
.Sh SYNOPSIS
.In sys/param.h
.In sys/bus.h

View file

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 16, 1998
.Dd January 15, 2017
.Dt BUS_GENERIC_READ_IVAR 9
.Os
.Sh NAME
@ -38,7 +38,7 @@
.Dv BUS_READ_IVAR
and
.Dv BUS_WRITE_IVAR
for busses
for buses
.Sh SYNOPSIS
.In sys/param.h
.In sys/bus.h

View file

@ -28,14 +28,14 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 16, 1998
.Dd January 15, 2017
.Dt BUS_GENERIC_SHUTDOWN 9
.Os
.Sh NAME
.Nm bus_generic_shutdown
.Nd generic implementation of
.Dv DEVICE_SHUTDOWN
for busses
for buses
.Sh SYNOPSIS
.In sys/param.h
.In sys/bus.h

View file

@ -51,7 +51,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 13, 2005
.Dd January 15, 2017
.Dt BUS_SPACE 9
.Os
.Sh NAME
@ -601,7 +601,7 @@ of devices on different system architectures, and to allow a single driver
object file to manipulate a set of devices on multiple bus types on a
single architecture.
.Pp
Not all busses have to implement all functions described in this
Not all buses have to implement all functions described in this
document, though that is encouraged if the operations are logically
supported by the bus.
Unimplemented functions should cause
@ -625,7 +625,7 @@ machine-dependent code.
A given machine may have several different types
of bus space (e.g.\& memory space and I/O space), and thus may provide
multiple different bus space tags.
Individual busses or devices on a machine may use more than one bus space
Individual buses or devices on a machine may use more than one bus space
tag.
For instance, ISA devices are
given an ISA memory space tag and an ISA I/O space tag.
@ -639,7 +639,7 @@ The
bus address describes the start of the range in bus space.
The bus
size describes the size of the range in bytes.
Busses which are not byte
Buses which are not byte
addressable may require use of bus space ranges with appropriately
aligned addresses and properly rounded sizes.
.Pp
@ -656,7 +656,7 @@ argument, at least one handle argument, and at least one offset argument
The bus space tag specifies the space, each handle specifies a region in
the space, and each offset specifies the offset into the region of the
actual location(s) to be accessed.
Offsets are given in bytes, though busses
Offsets are given in bytes, though buses
may impose alignment constraints.
The offset used to access data
relative to a given handle must be such that all of the data being
@ -1049,7 +1049,7 @@ The
and
.Fn bus_space_write_N
families of functions provide
the ability to read and write 1, 2, 4, and 8 byte data items on busses
the ability to read and write 1, 2, 4, and 8 byte data items on buses
which support those access sizes.
.Ss Fn bus_space_read_1 space handle offset
.Ss Fn bus_space_read_2 space handle offset

View file

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 16, 1998
.Dd January 15, 2017
.Dt DEVICE 9
.Os
.Sh NAME
@ -44,7 +44,7 @@ The system defines one device,
.Va root_bus
and all other devices are created dynamically during
autoconfiguration.
Normally devices representing top-level busses in
Normally devices representing top-level buses in
the system (ISA, PCI etc.) will be attached directly to
.Va root_bus
and other devices will be added as children of their relevant bus.

View file

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd October 28, 2015
.Dd January 15, 2017
.Dt DEVICE_ADD_CHILD 9
.Os
.Sh NAME
@ -63,10 +63,10 @@ If no name is given then all drivers for the owning bus are probed.
In any event, only the name of the device is stored so that one may
safely unload/load a driver bound to that name.
.Pp
This allows busses which can uniquely identify device instances (such
This allows buses which can uniquely identify device instances (such
as PCI) to allow each driver to check each device instance for a
match.
For busses which rely on supplied probe hints where only one
For buses which rely on supplied probe hints where only one
driver can have a chance of probing the device, the driver name should
be specified as the device name.
.Pp

View file

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd September 6, 2016
.Dd January 15, 2017
.Dt PCI 9
.Os
.Sh NAME
@ -583,14 +583,14 @@ full functionality to the user.
.It Dv PCI_POWERSTATE_D1
Class-specific low-power state in which device context may or
may not be lost.
Busses in this state cannot do anything to the bus, to
Buses in this state cannot do anything to the bus, to
force devices to lose context.
.It Dv PCI_POWERSTATE_D2
Class-specific low-power state in which device context may or
may not be lost.
Attains greater power savings than
.Dv PCI_POWERSTATE_D1 .
Busses in this state can cause devices to lose some context.
Buses in this state can cause devices to lose some context.
Devices
.Em must
be prepared for the bus to be in this state or higher.

View file

@ -13,10 +13,6 @@
#
# umask 022
# Enable the builtin emacs(1) command line editor in sh(1),
# e.g. C-a -> beginning-of-line.
set -o emacs
# Uncomment this and comment the above to enable the builtin vi(1) command
# line editor in sh(1), e.g. ESC to go into visual mode.
# set -o vi

View file

@ -30,7 +30,7 @@
#define _VMM_HOST_H_
#ifndef _KERNEL
#error "no user-servicable parts inside"
#error "no user-serviceable parts inside"
#endif
struct xsave_limits {

View file

@ -36,7 +36,6 @@
*/
#include <machine/asm.h>
#include <machine/cpuconf.h>
__FBSDID("$FreeBSD$");
/*

View file

@ -57,7 +57,6 @@ __FBSDID("$FreeBSD$");
#include <vm/pmap.h>
#include <vm/uma.h>
#include <machine/cpuconf.h>
#include <machine/cpufunc.h>
#if defined(CPU_XSCALE_81342)

View file

@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$");
#include <machine/cpu.h>
#include <machine/endian.h>
#include <machine/cpuconf.h>
#include <machine/md_var.h>
char machine[] = "arm";

View file

@ -37,7 +37,6 @@
#include <sys/syscall.h>
#include <machine/asm.h>
#include <machine/armreg.h>
#include <machine/cpuconf.h>
#include <machine/pte-v4.h>
__FBSDID("$FreeBSD$");

View file

@ -34,7 +34,6 @@
#include <machine/asmacros.h>
#include <machine/armreg.h>
#include <machine/sysreg.h>
#include <machine/cpuconf.h>
#include <machine/pte-v6.h>
__FBSDID("$FreeBSD$");

View file

@ -50,7 +50,7 @@ OF_decode_addr(phandle_t dev, int regno, bus_space_tag_t *tag,
return (res);
/*
* Nothing special to do for PCI busses right now.
* Nothing special to do for PCI buses right now.
* This may need to be handled per-platform when it does come up.
*/
#ifdef notyet

View file

@ -139,11 +139,7 @@ static const struct data_abort data_aborts[] = {
{dab_align, "Alignment Fault 3"},
{dab_buserr, "External Linefetch Abort (S)"},
{NULL, "Translation Fault (S)"},
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
{NULL, "Translation Flag Fault"},
#else
{dab_buserr, "External Linefetch Abort (P)"},
#endif
{NULL, "Translation Fault (P)"},
{dab_buserr, "External Non-Linefetch Abort (S)"},
{NULL, "Domain Fault (S)"},

View file

@ -90,7 +90,7 @@ ccm_init_gates(struct ccm_softc *sc)
{
uint32_t reg;
/* ahpbdma, aipstz 1 & 2 busses */
/* ahpbdma, aipstz 1 & 2 buses */
reg = CCGR0_AIPS_TZ1 | CCGR0_AIPS_TZ2 | CCGR0_ABPHDMA;
WR4(sc, CCM_CCGR0, reg);

View file

@ -117,7 +117,7 @@ fdt_pic_decode_t fdt_pic_table[] = {
* - GIC node exists and is its own interrupt parent.
*
* This applies to all models of imx6. Luckily all of them have the devices
* involved at the same addresses on the same busses, so we don't need any
* involved at the same addresses on the same buses, so we don't need any
* per-soc logic. We handle this at platform attach time rather than via the
* fdt_fixup_table, because the latter requires matching on the FDT "model"
* property, and this applies to all boards including those not yet invented.

View file

@ -35,7 +35,7 @@
* Note that the hardware is capable of running as both a master and a slave.
* This driver currently implements only master-mode operations.
*
* This driver supports multi-master i2c busses, by detecting bus arbitration
* This driver supports multi-master i2c buses, by detecting bus arbitration
* loss and returning IIC_EBUSBSY status. Notably, it does not do any kind of
* retries if some other master jumps onto the bus and interrupts one of our
* transfer cycles resulting in arbitration loss in mid-transfer. The caller

View file

@ -44,8 +44,6 @@
#ifndef _KERNEL
#include <machine/sysarch.h>
#else
#include <machine/cpuconf.h>
#endif
#if __ARM_ARCH >= 6

View file

@ -1,195 +0,0 @@
/* $NetBSD: cpuconf.h,v 1.8 2003/09/06 08:55:42 rearnsha Exp $ */
/*-
* Copyright (c) 2002 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _MACHINE_CPUCONF_H_
#define _MACHINE_CPUCONF_H_
/*
* IF YOU CHANGE THIS FILE, MAKE SURE TO UPDATE THE DEFINITION OF
* "PMAP_NEEDS_PTE_SYNC" IN <arm/arm32/pmap.h> FOR THE CPU TYPE
* YOU ARE ADDING SUPPORT FOR.
*/
/*
* Step 1: Count the number of CPU types configured into the kernel.
*/
#define CPU_NTYPES (defined(CPU_ARM9) + \
defined(CPU_ARM9E) + \
defined(CPU_ARM1176) + \
defined(CPU_XSCALE_PXA2X0) + \
defined(CPU_FA526) + \
defined(CPU_XSCALE_IXP425)) + \
defined(CPU_CORTEXA8) + \
defined(CPU_CORTEXA_MP) + \
defined(CPU_KRAIT) + \
defined(CPU_MV_PJ4B)
/*
* Step 2: Determine which ARM architecture versions are configured.
*/
#if defined(CPU_ARM9) || defined(CPU_FA526)
#define ARM_ARCH_4 1
#else
#define ARM_ARCH_4 0
#endif
#if (defined(CPU_ARM9E) || \
defined(CPU_XSCALE_81342) || \
defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425))
#define ARM_ARCH_5 1
#else
#define ARM_ARCH_5 0
#endif
#if !defined(ARM_ARCH_6)
#if defined(CPU_ARM1176)
#define ARM_ARCH_6 1
#else
#define ARM_ARCH_6 0
#endif
#endif
#if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || \
defined(CPU_KRAIT) || defined(CPU_MV_PJ4B)
#define ARM_ARCH_7A 1
#else
#define ARM_ARCH_7A 0
#endif
#define ARM_NARCH (ARM_ARCH_4 + ARM_ARCH_5 + ARM_ARCH_6 | ARM_ARCH_7A)
/*
* Compatibility for userland builds that have no CPUTYPE defined. Use the ARCH
* constants predefined by the compiler to define our old-school arch constants.
* This is a stopgap measure to tide us over until the conversion of all code
* to the newer ACLE constants defined by ARM (see acle-compat.h).
*/
#if ARM_NARCH == 0
#if defined(__ARM_ARCH_4T__)
#undef ARM_ARCH_4
#undef ARM_NARCH
#define ARM_ARCH_4 1
#define ARM_NARCH 1
#define CPU_ARM9 1
#elif defined(__ARM_ARCH_6ZK__)
#undef ARM_ARCH_6
#undef ARM_NARCH
#define ARM_ARCH_6 1
#define ARM_NARCH 1
#define CPU_ARM1176 1
#endif
#endif
#if ARM_NARCH == 0 && !defined(KLD_MODULE) && defined(_KERNEL)
#error ARM_NARCH is 0
#endif
#if ARM_ARCH_5 || ARM_ARCH_6 || ARM_ARCH_7A
/*
* We could support Thumb code on v4T, but the lack of clean interworking
* makes that hard.
*/
#define THUMB_CODE
#endif
/*
* Step 3: Define which MMU classes are configured:
*
* ARM_MMU_MEMC Prehistoric, external memory controller
* and MMU for ARMv2 CPUs.
*
* ARM_MMU_GENERIC Generic ARM MMU, compatible with ARMv4 and v5.
*
* ARM_MMU_V6 ARMv6 MMU.
*
* ARM_MMU_V7 ARMv7 MMU.
*
* ARM_MMU_XSCALE XScale MMU. Compatible with generic ARM
* MMU, but also has several extensions which
* require different PTE layout to use.
*/
#if (defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_FA526))
#define ARM_MMU_GENERIC 1
#else
#define ARM_MMU_GENERIC 0
#endif
#if defined(CPU_ARM1176)
#define ARM_MMU_V6 1
#else
#define ARM_MMU_V6 0
#endif
#if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || \
defined(CPU_KRAIT) || defined(CPU_MV_PJ4B)
#define ARM_MMU_V7 1
#else
#define ARM_MMU_V7 0
#endif
#if (defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_XSCALE_81342))
#define ARM_MMU_XSCALE 1
#else
#define ARM_MMU_XSCALE 0
#endif
#define ARM_NMMUS (ARM_MMU_GENERIC + ARM_MMU_V6 + \
ARM_MMU_V7 + ARM_MMU_XSCALE)
#if ARM_NMMUS == 0 && !defined(KLD_MODULE) && defined(_KERNEL)
#error ARM_NMMUS is 0
#endif
/*
* Step 4: Define features that may be present on a subset of CPUs
*
* ARM_XSCALE_PMU Performance Monitoring Unit on 81342
*/
#if (defined(CPU_XSCALE_81342))
#define ARM_XSCALE_PMU 1
#else
#define ARM_XSCALE_PMU 0
#endif
#if defined(CPU_XSCALE_81342)
#define CPU_XSCALE_CORE3
#endif
#endif /* _MACHINE_CPUCONF_H_ */

View file

@ -48,7 +48,6 @@
#include <sys/types.h>
#include <machine/armreg.h>
#include <machine/cpuconf.h>
static __inline void
breakpoint(void)

View file

@ -32,8 +32,6 @@
#ifdef _KERNEL
#include <machine/cpuconf.h>
#define ALT_STACK_SIZE 128
struct vmspace;

View file

@ -51,7 +51,30 @@
#define _MACHINE_PMAP_V4_H_
#include <machine/pte-v4.h>
#include <machine/cpuconf.h>
/*
* Define the MMU types we support based on the cpu types. While the code has
* some theoretical support for multiple MMU types in a single kernel, there are
* no actual working configurations that use that feature.
*/
#if (defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_FA526))
#define ARM_MMU_GENERIC 1
#else
#define ARM_MMU_GENERIC 0
#endif
#if (defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_XSCALE_81342))
#define ARM_MMU_XSCALE 1
#else
#define ARM_MMU_XSCALE 0
#endif
#define ARM_NMMUS (ARM_MMU_GENERIC + ARM_MMU_XSCALE)
#if ARM_NMMUS == 0 && !defined(KLD_MODULE) && defined(_KERNEL)
#error ARM_NMMUS is 0
#endif
/*
* Pte related macros
*/

View file

@ -147,7 +147,7 @@ mv_pci_ranges_decode(phandle_t node, struct mv_pci_range *io_space,
/*
* Initialize the ranges so that we don't have to worry about
* having them all defined in the FDT. In particular, it is
* perfectly fine not to want I/O space on PCI busses.
* perfectly fine not to want I/O space on PCI buses.
*/
bzero(io_space, sizeof(*io_space));
bzero(mem_space, sizeof(*mem_space));

View file

@ -219,20 +219,19 @@ find_currdev(EFI_LOADED_IMAGE *img, struct devsw **dev, int *unit,
if (h == NULL)
break;
if (efi_handle_lookup(h, dev, unit, extra) == 0) {
if (copy != NULL)
free(copy);
return (0);
}
free(copy);
copy = NULL;
if (efi_handle_lookup(h, dev, unit, extra) == 0)
return (0);
if (copy != NULL)
free(copy);
devpath = efi_lookup_devpath(h);
if (devpath != NULL) {
copy = efi_devpath_trim(devpath);
devpath = copy;
}
}
free(copy);
/* Try to fallback on first device */
if (devsw[0] != NULL) {

View file

@ -31,7 +31,7 @@
#ifndef _CAM_CAM_COMPAT_H
#define _CAM_CAM_COMPAT_H
/* No user-servicable parts in here. */
/* No user-serviceable parts in here. */
#ifdef _KERNEL
int cam_compat_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,

View file

@ -31,7 +31,7 @@
#ifndef _CAM_CAM_IOSCHED_H
#define _CAM_CAM_IOSCHED_H
/* No user-servicable parts in here. */
/* No user-serviceable parts in here. */
#ifdef _KERNEL
/* Forward declare all structs to keep interface thin */

View file

@ -106,7 +106,12 @@ struct xpt_softc {
int buses_to_config;
int buses_config_done;
/* Registered busses */
/*
* Registered buses
*
* N.B., "busses" is an archaic spelling of "buses". In new code
* "buses" is preferred.
*/
TAILQ_HEAD(,cam_eb) xpt_busses;
u_int bus_generation;
@ -892,7 +897,7 @@ xpt_init(void *dummy)
/*
* The xpt layer is, itself, the equivalent of a SIM.
* Allow 16 ccbs in the ccb pool for it. This should
* give decent parallelism when we probe busses and
* give decent parallelism when we probe buses and
* perform other XPT functions.
*/
devq = cam_simq_alloc(16);
@ -1627,7 +1632,7 @@ xptedtbusfunc(struct cam_eb *bus, void *arg)
}
/*
* If the user is only interested in busses, there's no
* If the user is only interested in buses, there's no
* reason to descend to the next level in the tree.
*/
if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
@ -2070,7 +2075,7 @@ xptperiphlistmatch(struct ccb_dev_match *cdm)
/*
* At this point in the edt traversal function, we check the bus
* list generation to make sure that no busses have been added or
* list generation to make sure that no buses have been added or
* removed since the user last sent a XPT_DEV_MATCH ccb through.
* For the peripheral driver list traversal function, however, we
* don't have to worry about new peripheral driver types coming or
@ -2816,7 +2821,7 @@ xpt_action_default(union ccb *start_ccb)
/*
* There are two ways of getting at information in the EDT.
* The first way is via the primary EDT tree. It starts
* with a list of busses, then a list of targets on a bus,
* with a list of buses, then a list of targets on a bus,
* then devices/luns on a target, and then peripherals on a
* device/lun. The "other" way is by the peripheral driver
* lists. The peripheral driver lists are organized by
@ -3356,7 +3361,7 @@ xpt_run_devq(struct cam_devq *devq)
/*
* Device queues can be shared among multiple SIM instances
* that reside on different busses. Use the SIM from the
* that reside on different buses. Use the SIM from the
* queued device, rather than the one from the calling bus.
*/
sim = device->sim;
@ -3865,7 +3870,7 @@ CAM_XPT_XPORT(xport_default);
* A sim structure, listing the SIM entry points and instance
* identification info is passed to xpt_bus_register to hook the SIM
* into the CAM framework. xpt_bus_register creates a cam_eb entry
* for this new bus and places it in the array of busses and assigns
* for this new bus and places it in the array of buses and assigns
* it a path_id. The path_id may be influenced by "hard wiring"
* information specified by the user. Once interrupt services are
* available, the bus will be probed.
@ -5040,7 +5045,7 @@ xpt_release_boot(void)
xsoftc.buses_config_done = 1;
xpt_unlock_buses();
/* Call manually because we don't have any busses */
/* Call manually because we don't have any buses */
task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
if (task != NULL) {
TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
@ -5138,7 +5143,7 @@ xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
(csa.event_enable & AC_PATH_REGISTERED)) {
/*
* Get this peripheral up to date with all
* the currently existing busses.
* the currently existing buses.
*/
xpt_for_all_busses(xptsetasyncbusfunc, &csa);
}

View file

@ -88,7 +88,7 @@ SET_DECLARE(cam_xpt_proto_set, struct xpt_proto);
/*
* The CAM EDT (Existing Device Table) contains the device information for
* all devices for all busses in the system. The table contains a
* all devices for all buses in the system. The table contains a
* cam_ed structure for each device on the bus.
*/
struct cam_ed {

View file

@ -5053,18 +5053,13 @@ ctl_config_move_done(union ctl_io *io)
if ((io->io_hdr.port_status != 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
/*
* For hardware error sense keys, the sense key
* specific value is defined to be a retry count,
* but we use it to pass back an internal FETD
* error code. XXX KDM Hopefully the FETD is only
* using 16 bits for an error code, since that's
* all the space we have in the sks field.
*/
ctl_set_internal_failure(&io->scsiio,
/*sks_valid*/ 1,
/*retry_count*/
io->io_hdr.port_status);
ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
/*retry_count*/ io->io_hdr.port_status);
} else if (io->scsiio.kern_data_resid != 0 &&
(io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
ctl_set_invalid_field_ciu(&io->scsiio);
}
if (ctl_debug & CTL_DEBUG_CDB_DATA)
@ -5462,7 +5457,6 @@ ctl_format(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
ctsio->kern_data_len = length;
ctsio->kern_total_len = length;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -5588,7 +5582,6 @@ ctl_read_buffer(struct ctl_scsiio *ctsio)
}
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctl_set_success(ctsio);
@ -5634,7 +5627,6 @@ ctl_write_buffer(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -5742,7 +5734,6 @@ ctl_write_same(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -5788,7 +5779,6 @@ ctl_unmap(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -6278,7 +6268,6 @@ ctl_mode_select(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
ctsio->kern_data_len = param_len;
ctsio->kern_total_len = param_len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -6508,7 +6497,6 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
if (total_len < alloc_len) {
ctsio->residual = alloc_len - total_len;
@ -6861,7 +6849,6 @@ ctl_log_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
if (total_len < alloc_len) {
ctsio->residual = alloc_len - total_len;
@ -6929,7 +6916,6 @@ ctl_read_capacity(struct ctl_scsiio *ctsio)
ctsio->residual = 0;
ctsio->kern_data_len = sizeof(*data);
ctsio->kern_total_len = sizeof(*data);
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -6995,7 +6981,6 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -7050,7 +7035,6 @@ ctl_get_lba_status(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -7112,7 +7096,6 @@ ctl_read_defect(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -7211,7 +7194,6 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
if (ext) {
@ -7412,7 +7394,6 @@ ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
switch (cdb->options & RSO_OPTIONS_MASK) {
@ -7526,7 +7507,6 @@ ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr;
@ -7574,7 +7554,6 @@ ctl_report_timestamp(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
@ -7647,7 +7626,6 @@ ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -8225,7 +8203,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
ctsio->kern_data_len = param_len;
ctsio->kern_total_len = param_len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -9207,7 +9184,6 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9270,7 +9246,6 @@ ctl_request_sense(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = cdb->length;
ctsio->kern_total_len = cdb->length;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9409,7 +9384,6 @@ ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9478,7 +9452,6 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9537,7 +9510,6 @@ ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9613,7 +9585,6 @@ ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9678,7 +9649,6 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9807,7 +9777,6 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9883,7 +9852,6 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -9959,7 +9927,6 @@ ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -10016,7 +9983,6 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -10151,7 +10117,6 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
ctsio->kern_sg_entries = 0;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
if (data_len < alloc_len) {
@ -10379,7 +10344,6 @@ ctl_get_config(struct ctl_scsiio *ctsio)
sizeof(struct scsi_get_config_feature) + 4;
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr;
@ -10585,7 +10549,6 @@ ctl_get_event_status(struct ctl_scsiio *ctsio)
data_len = sizeof(struct scsi_get_event_status_header);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
if (data_len < alloc_len) {
@ -10623,7 +10586,6 @@ ctl_mechanism_status(struct ctl_scsiio *ctsio)
data_len = sizeof(struct scsi_mechanism_status_header);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
if (data_len < alloc_len) {
@ -10683,7 +10645,6 @@ ctl_read_toc(struct ctl_scsiio *ctsio)
data_len += sizeof(struct scsi_read_toc_type01_descr);
ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
ctsio->kern_sg_entries = 0;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
if (data_len < alloc_len) {
@ -12585,6 +12546,9 @@ ctl_datamove(union ctl_io *io)
CTL_DEBUG_PRINT(("ctl_datamove\n"));
/* No data transferred yet. Frontend must update this when done. */
io->scsiio.kern_data_resid = io->scsiio.kern_data_len;
#ifdef CTL_TIME_IO
if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
char str[256];

View file

@ -419,6 +419,16 @@ ctl_be_block_move_done(union ctl_io *io)
*/
if (io->io_hdr.flags & CTL_FLAG_ABORT) {
;
} else if ((io->io_hdr.port_status != 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
/*retry_count*/ io->io_hdr.port_status);
} else if (io->scsiio.kern_data_resid != 0 &&
(io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
ctl_set_invalid_field_ciu(&io->scsiio);
} else if ((io->io_hdr.port_status == 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
lbalen = ARGS(beio->io);
@ -428,21 +438,6 @@ ctl_be_block_move_done(union ctl_io *io)
/* We have two data blocks ready for comparison. */
ctl_be_block_compare(io);
}
} else if ((io->io_hdr.port_status != 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
/*
* For hardware error sense keys, the sense key
* specific value is defined to be a retry count,
* but we use it to pass back an internal FETD
* error code. XXX KDM Hopefully the FETD is only
* using 16 bits for an error code, since that's
* all the space we have in the sks field.
*/
ctl_set_internal_failure(&io->scsiio,
/*sks_valid*/ 1,
/*retry_count*/
io->io_hdr.port_status);
}
/*
@ -1634,7 +1629,6 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
else
io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
io->scsiio.kern_data_len = beio->io_len;
io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = beio->num_segs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;

View file

@ -231,6 +231,16 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
if (io->io_hdr.flags & CTL_FLAG_ABORT) {
;
} else if (io->io_hdr.port_status != 0 &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
/*retry_count*/ io->io_hdr.port_status);
} else if (io->scsiio.kern_data_resid != 0 &&
(io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
ctl_set_invalid_field_ciu(&io->scsiio);
} else if ((io->io_hdr.port_status == 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
@ -243,21 +253,6 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
return (0);
}
ctl_set_success(&io->scsiio);
} else if ((io->io_hdr.port_status != 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
(io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
/*
* For hardware error sense keys, the sense key
* specific value is defined to be a retry count,
* but we use it to pass back an internal FETD
* error code. XXX KDM Hopefully the FETD is only
* using 16 bits for an error code, since that's
* all the space we have in the sks field.
*/
ctl_set_internal_failure(&io->scsiio,
/*sks_valid*/ 1,
/*retry_count*/
io->io_hdr.port_status);
}
ctl_data_submit_done(io);
return(0);
@ -318,7 +313,6 @@ ctl_backend_ramdisk_continue(union ctl_io *io)
#endif /* CTL_RAMDISK_PAGES */
io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
io->scsiio.kern_data_resid = 0;
io->scsiio.kern_data_len = len_filled;
io->scsiio.kern_sg_entries = sg_filled;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;

View file

@ -641,6 +641,18 @@ ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
/*data*/ sks,
SSD_ELEM_NONE);
}
void
ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio)
{
/* "Invalid field in command information unit" */
ctl_set_sense(ctsio,
/*current_error*/ 1,
/*sense_key*/ SSD_KEY_ABORTED_COMMAND,
/*ascq*/ 0x0E,
/*ascq*/ 0x03,
SSD_ELEM_NONE);
}
void
ctl_set_invalid_opcode(struct ctl_scsiio *ctsio)

View file

@ -66,6 +66,7 @@ void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio);
void ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag);
void ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
int field, int bit_valid, int bit);
void ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio);
void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio);
void ctl_set_param_len_error(struct ctl_scsiio *ctsio);
void ctl_set_already_locked(struct ctl_scsiio *ctsio);

View file

@ -300,14 +300,10 @@ cfcs_datamove(union ctl_io *io)
struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
int cam_sg_count, ctl_sg_count, cam_sg_start;
int cam_sg_offset;
int len_to_copy, len_copied;
int len_to_copy;
int ctl_watermark, cam_watermark;
int i, j;
cam_sg_offset = 0;
cam_sg_start = 0;
ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
/*
@ -330,6 +326,8 @@ cfcs_datamove(union ctl_io *io)
cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
cam_sg_count = ccb->csio.sglist_cnt;
cam_sg_start = cam_sg_count;
cam_sg_offset = 0;
for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
if ((len_seen + cam_sglist[i].ds_len) >=
@ -367,7 +365,6 @@ cfcs_datamove(union ctl_io *io)
ctl_watermark = 0;
cam_watermark = cam_sg_offset;
len_copied = 0;
for (i = cam_sg_start, j = 0;
i < cam_sg_count && j < ctl_sg_count;) {
uint8_t *cam_ptr, *ctl_ptr;
@ -389,9 +386,6 @@ cfcs_datamove(union ctl_io *io)
ctl_ptr = (uint8_t *)ctl_sglist[j].addr;
ctl_ptr = ctl_ptr + ctl_watermark;
ctl_watermark += len_to_copy;
cam_watermark += len_to_copy;
if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n",
@ -407,24 +401,27 @@ cfcs_datamove(union ctl_io *io)
bcopy(cam_ptr, ctl_ptr, len_to_copy);
}
len_copied += len_to_copy;
io->scsiio.ext_data_filled += len_to_copy;
io->scsiio.kern_data_resid -= len_to_copy;
cam_watermark += len_to_copy;
if (cam_sglist[i].ds_len == cam_watermark) {
i++;
cam_watermark = 0;
}
ctl_watermark += len_to_copy;
if (ctl_sglist[j].len == ctl_watermark) {
j++;
ctl_watermark = 0;
}
}
io->scsiio.ext_data_filled += len_copied;
if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
ccb->csio.resid = ccb->csio.dxfer_len -
io->scsiio.ext_data_filled;
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_REQ_CMP;
xpt_done(ccb);
@ -453,6 +450,10 @@ cfcs_done(union ctl_io *io)
/*
* Translate CTL status to CAM status.
*/
if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
ccb->csio.resid = ccb->csio.dxfer_len -
io->scsiio.ext_data_filled;
}
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
switch (io->io_hdr.status & CTL_STATUS_MASK) {
case CTL_SUCCESS:

View file

@ -138,22 +138,20 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
struct ctl_sg_entry ext_entry, kern_entry;
int ext_sglen, ext_sg_entries, kern_sg_entries;
int ext_sg_start, ext_offset;
int len_to_copy, len_copied;
int len_to_copy;
int kern_watermark, ext_watermark;
int ext_sglist_malloced;
int i, j;
ext_sglist_malloced = 0;
ext_sg_start = 0;
ext_offset = 0;
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
/*
* If this flag is set, fake the data transfer.
*/
if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
ctsio->ext_data_filled = ctsio->ext_data_len;
ext_sglist_malloced = 0;
ctsio->ext_data_filled += ctsio->kern_data_len;
ctsio->kern_data_resid = 0;
goto bailout;
}
@ -165,7 +163,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
int len_seen;
ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
M_WAITOK);
ext_sglist_malloced = 1;
@ -174,6 +171,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
goto bailout;
}
ext_sg_entries = ctsio->ext_sg_entries;
ext_sg_start = ext_sg_entries;
ext_offset = 0;
len_seen = 0;
for (i = 0; i < ext_sg_entries; i++) {
if ((len_seen + ext_sglist[i].len) >=
@ -186,6 +185,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
}
} else {
ext_sglist = &ext_entry;
ext_sglist_malloced = 0;
ext_sglist->addr = ctsio->ext_data_ptr;
ext_sglist->len = ctsio->ext_data_len;
ext_sg_entries = 1;
@ -203,10 +203,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
kern_sg_entries = 1;
}
kern_watermark = 0;
ext_watermark = ext_offset;
len_copied = 0;
for (i = ext_sg_start, j = 0;
i < ext_sg_entries && j < kern_sg_entries;) {
uint8_t *ext_ptr, *kern_ptr;
@ -228,9 +226,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
kern_ptr = (uint8_t *)kern_sglist[j].addr;
kern_ptr = kern_ptr + kern_watermark;
kern_watermark += len_to_copy;
ext_watermark += len_to_copy;
if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
@ -252,21 +247,22 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
}
}
len_copied += len_to_copy;
ctsio->ext_data_filled += len_to_copy;
ctsio->kern_data_resid -= len_to_copy;
ext_watermark += len_to_copy;
if (ext_sglist[i].len == ext_watermark) {
i++;
ext_watermark = 0;
}
kern_watermark += len_to_copy;
if (kern_sglist[j].len == kern_watermark) {
j++;
kern_watermark = 0;
}
}
ctsio->ext_data_filled += len_copied;
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
"kern_sg_entries: %d\n", ext_sg_entries,
kern_sg_entries));
@ -274,10 +270,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
"kern_data_len = %d\n", ctsio->ext_data_len,
ctsio->kern_data_len));
/* XXX KDM set residual?? */
bailout:
if (ext_sglist_malloced != 0)
free(ext_sglist, M_CTL);
@ -397,7 +390,7 @@ ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td)
{
union ctl_io *io;
void *pool_tmp;
void *pool_tmp, *sc_tmp;
int retval = 0;
/*
@ -414,8 +407,10 @@ ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
* spammed by the user's ctl_io.
*/
pool_tmp = io->io_hdr.pool;
sc_tmp = CTL_SOFTC(io);
memcpy(io, (void *)addr, sizeof(*io));
io->io_hdr.pool = pool_tmp;
CTL_SOFTC(io) = sc_tmp;
/*
* No status yet, so make sure the status is set properly.

View file

@ -769,6 +769,7 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c
cdw->cdw_sg_len -= copy_len;
off += copy_len;
io->scsiio.ext_data_filled += copy_len;
io->scsiio.kern_data_resid -= copy_len;
if (cdw->cdw_sg_len == 0) {
/*
@ -2514,6 +2515,7 @@ cfiscsi_datamove_in(union ctl_io *io)
}
sg_addr += len;
sg_len -= len;
io->scsiio.kern_data_resid -= len;
KASSERT(buffer_offset + response->ip_data_len <= expected_len,
("buffer_offset %zd + ip_data_len %zd > expected_len %zd",
@ -2599,7 +2601,7 @@ cfiscsi_datamove_out(union ctl_io *io)
struct iscsi_bhs_r2t *bhsr2t;
struct cfiscsi_data_wait *cdw;
struct ctl_sg_entry ctl_sg_entry, *ctl_sglist;
uint32_t expected_len, r2t_off, r2t_len;
uint32_t expected_len, datamove_len, r2t_off, r2t_len;
uint32_t target_transfer_tag;
bool done;
@ -2618,16 +2620,15 @@ cfiscsi_datamove_out(union ctl_io *io)
PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len;
/*
* Report write underflow as error since CTL and backends don't
* really support it, and SCSI does not tell how to do it right.
* Complete write underflow. Not a single byte to read. Return.
*/
expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length);
if (io->scsiio.kern_rel_offset + io->scsiio.kern_data_len >
expected_len) {
io->scsiio.io_hdr.port_status = 43;
if (io->scsiio.kern_rel_offset > expected_len) {
io->scsiio.be_move_done(io);
return;
}
datamove_len = MIN(io->scsiio.kern_data_len,
expected_len - io->scsiio.kern_rel_offset);
target_transfer_tag =
atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1);
@ -2650,7 +2651,7 @@ cfiscsi_datamove_out(union ctl_io *io)
cdw->cdw_ctl_io = io;
cdw->cdw_target_transfer_tag = target_transfer_tag;
cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag;
cdw->cdw_r2t_end = io->scsiio.kern_data_len;
cdw->cdw_r2t_end = datamove_len;
cdw->cdw_datasn = 0;
/* Set initial data pointer for the CDW respecting ext_data_filled. */
@ -2659,7 +2660,7 @@ cfiscsi_datamove_out(union ctl_io *io)
} else {
ctl_sglist = &ctl_sg_entry;
ctl_sglist->addr = io->scsiio.kern_data_ptr;
ctl_sglist->len = io->scsiio.kern_data_len;
ctl_sglist->len = datamove_len;
}
cdw->cdw_sg_index = 0;
cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr;
@ -2690,7 +2691,7 @@ cfiscsi_datamove_out(union ctl_io *io)
}
r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled;
r2t_len = MIN(io->scsiio.kern_data_len - io->scsiio.ext_data_filled,
r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled,
cs->cs_max_burst_length);
cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len;

View file

@ -293,7 +293,6 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
@ -470,7 +469,6 @@ ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
@ -568,7 +566,6 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
@ -646,7 +643,6 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
@ -718,7 +714,6 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
@ -1730,7 +1725,6 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -1885,7 +1879,6 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -2083,7 +2076,6 @@ ctl_populate_token(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -2247,7 +2239,6 @@ ctl_write_using_token(struct ctl_scsiio *ctsio)
ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
ctsio->kern_data_len = len;
ctsio->kern_total_len = len;
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
ctsio->kern_sg_entries = 0;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -2423,7 +2414,6 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
@ -2504,7 +2494,6 @@ ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
ctsio->kern_data_len = alloc_len;
ctsio->kern_total_len = alloc_len;
}
ctsio->kern_data_resid = 0;
ctsio->kern_rel_offset = 0;
data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;

View file

@ -137,7 +137,7 @@ tpcl_datamove(union ctl_io *io)
struct ctl_sg_entry ext_entry, kern_entry;
int ext_sg_entries, kern_sg_entries;
int ext_sg_start, ext_offset;
int len_to_copy, len_copied;
int len_to_copy;
int kern_watermark, ext_watermark;
struct ctl_scsiio *ctsio;
int i, j;
@ -196,7 +196,6 @@ tpcl_datamove(union ctl_io *io)
kern_watermark = 0;
ext_watermark = ext_offset;
len_copied = 0;
for (i = ext_sg_start, j = 0;
i < ext_sg_entries && j < kern_sg_entries;) {
uint8_t *ext_ptr, *kern_ptr;
@ -218,9 +217,6 @@ tpcl_datamove(union ctl_io *io)
kern_ptr = (uint8_t *)kern_sglist[j].addr;
kern_ptr = kern_ptr + kern_watermark;
kern_watermark += len_to_copy;
ext_watermark += len_to_copy;
if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
@ -236,27 +232,27 @@ tpcl_datamove(union ctl_io *io)
memcpy(kern_ptr, ext_ptr, len_to_copy);
}
len_copied += len_to_copy;
ctsio->ext_data_filled += len_to_copy;
ctsio->kern_data_resid -= len_to_copy;
ext_watermark += len_to_copy;
if (ext_sglist[i].len == ext_watermark) {
i++;
ext_watermark = 0;
}
kern_watermark += len_to_copy;
if (kern_sglist[j].len == kern_watermark) {
j++;
kern_watermark = 0;
}
}
ctsio->ext_data_filled += len_copied;
CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
__func__, ext_sg_entries, kern_sg_entries));
CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
__func__, ctsio->ext_data_len, ctsio->kern_data_len));
/* XXX KDM set residual?? */
bailout:
io->scsiio.be_move_done(io);
}

View file

@ -697,7 +697,6 @@ ctl_scsi_free_io(union ctl_io *io)
free(io);
}
#endif /* !_KERNEL */
void
ctl_scsi_zero_io(union ctl_io *io)
{
@ -707,11 +706,10 @@ ctl_scsi_zero_io(union ctl_io *io)
return;
pool_ref = io->io_hdr.pool;
memset(io, 0, sizeof(*io));
io->io_hdr.pool = pool_ref;
}
#endif /* !_KERNEL */
const char *
ctl_scsi_task_string(struct ctl_taskio *taskio)

View file

@ -96,8 +96,10 @@ void ctl_scsi_maintenance_in(union ctl_io *io, uint8_t *data_ptr,
#ifndef _KERNEL
union ctl_io *ctl_scsi_alloc_io(uint32_t initid);
void ctl_scsi_free_io(union ctl_io *io);
#endif /* !_KERNEL */
void ctl_scsi_zero_io(union ctl_io *io);
#else
#define ctl_scsi_zero_io(io) ctl_zero_io(io)
#endif /* !_KERNEL */
const char *ctl_scsi_task_string(struct ctl_taskio *taskio);
void ctl_io_sbuf(union ctl_io *io, struct sbuf *sb);
void ctl_io_error_sbuf(union ctl_io *io,

View file

@ -721,15 +721,18 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
idx = cmd_info->cur_transfer_index;
off = cmd_info->cur_transfer_off;
cmd_info->flags &= ~CTLFE_CMD_PIECEWISE;
if (io->scsiio.kern_sg_entries == 0) {
/* No S/G list. */
if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */
/* One time shift for SRR offset. */
off += io->scsiio.ext_data_filled;
io->scsiio.ext_data_filled = 0;
*data_ptr = io->scsiio.kern_data_ptr + off;
if (io->scsiio.kern_data_len - off <= bus_softc->maxio) {
*dxfer_len = io->scsiio.kern_data_len - off;
} else {
*dxfer_len = bus_softc->maxio;
cmd_info->cur_transfer_index = -1;
cmd_info->cur_transfer_off = bus_softc->maxio;
cmd_info->cur_transfer_off += bus_softc->maxio;
cmd_info->flags |= CTLFE_CMD_PIECEWISE;
}
*sglist_cnt = 0;
@ -738,9 +741,18 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io,
*flags |= CAM_DATA_PADDR;
else
*flags |= CAM_DATA_VADDR;
} else {
/* S/G list with physical or virtual pointers. */
} else { /* S/G list with physical or virtual pointers. */
ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
/* One time shift for SRR offset. */
while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) {
io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off;
idx++;
off = 0;
}
off += io->scsiio.ext_data_filled;
io->scsiio.ext_data_filled = 0;
cam_sglist = cmd_info->cam_sglist;
*dxfer_len = 0;
for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) {
@ -818,18 +830,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
/*
* Datamove call, we need to setup the S/G list.
*/
scsi_status = 0;
csio->cdb_len = atio->cdb_len;
ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len,
&csio->sglist_cnt);
io->scsiio.ext_data_filled += dxfer_len;
if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) {
xpt_print(periph->path, "%s: tag 0x%04x "
"fill len %u > total %u\n",
__func__, io->scsiio.tag_num,
io->scsiio.ext_data_filled,
io->scsiio.kern_total_len);
}
} else {
/*
* We're done, send status back.
@ -891,8 +893,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
data_ptr = NULL;
dxfer_len = 0;
csio->sglist_cnt = 0;
scsi_status = 0;
}
scsi_status = 0;
if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) &&
(cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 &&
((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 ||
@ -1246,13 +1248,36 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
| (done_ccb->csio.msg_ptr[6]);
}
/*
* If we have an SRR and we're still sending data, we
* should be able to adjust offsets and cycle again.
* It is possible only if offset is from this datamove.
*/
if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) &&
srr_off >= io->scsiio.kern_rel_offset &&
srr_off < io->scsiio.kern_rel_offset +
io->scsiio.kern_data_len) {
io->scsiio.kern_data_resid =
io->scsiio.kern_rel_offset +
io->scsiio.kern_data_len - srr_off;
io->scsiio.ext_data_filled = srr_off;
io->scsiio.io_hdr.status = CTL_STATUS_NONE;
io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED;
softc->ccbs_freed++;
xpt_release_ccb(done_ccb);
TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
periph_links.tqe);
xpt_schedule(periph, /*priority*/ 1);
break;
}
/*
* If status was being sent, the back end data is now history.
* Hack it up and resubmit a new command with the CDB adjusted.
* If the SIM does the right thing, all of the resid math
* should work.
*/
if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) {
/*
* If status was being sent, the back end data is now
* history. Hack it up and resubmit a new command with
* the CDB adjusted. If the SIM does the right thing,
* all of the resid math should work.
*/
softc->ccbs_freed++;
xpt_release_ccb(done_ccb);
if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
@ -1262,22 +1287,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
/*
* Fall through to doom....
*/
} else if (srr) {
/*
* If we have an srr and we're still sending data, we
* should be able to adjust offsets and cycle again.
*/
io->scsiio.kern_rel_offset =
io->scsiio.ext_data_filled = srr_off;
io->scsiio.ext_data_len = io->scsiio.kern_total_len -
io->scsiio.kern_rel_offset;
softc->ccbs_freed++;
io->scsiio.io_hdr.status = CTL_STATUS_NONE;
xpt_release_ccb(done_ccb);
TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h,
periph_links.tqe);
xpt_schedule(periph, /*priority*/ 1);
break;
}
if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) &&
@ -1320,16 +1329,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
io->scsiio.ext_data_len += csio->dxfer_len;
if (io->scsiio.ext_data_len >
io->scsiio.kern_total_len) {
xpt_print(periph->path, "%s: tag 0x%04x "
"done len %u > total %u sent %u\n",
__func__, io->scsiio.tag_num,
io->scsiio.ext_data_len,
io->scsiio.kern_total_len,
io->scsiio.ext_data_filled);
}
/*
* Translate CAM status to CTL status. Success
* does not change the overall, ctl_io status. In
@ -1339,6 +1338,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
*/
switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) {
case CAM_REQ_CMP:
io->scsiio.kern_data_resid -= csio->dxfer_len;
io->io_hdr.port_status = 0;
break;
default:
@ -1368,7 +1368,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
if ((cmd_info->flags & CTLFE_CMD_PIECEWISE)
&& (io->io_hdr.port_status == 0)) {
ccb_flags flags;
uint8_t scsi_status;
uint8_t *data_ptr;
uint32_t dxfer_len;
@ -1379,8 +1378,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
ctlfedata(softc, io, &flags, &data_ptr,
&dxfer_len, &csio->sglist_cnt);
scsi_status = 0;
if (((flags & CAM_SEND_STATUS) == 0)
&& (dxfer_len == 0)) {
printf("%s: tag %04x no status or "
@ -1400,7 +1397,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
MSG_SIMPLE_Q_TAG : 0,
atio->tag_id,
atio->init_id,
scsi_status,
0,
/*data_ptr*/ data_ptr,
/*dxfer_len*/ dxfer_len,
/*timeout*/ 5 * 1000);
@ -2003,6 +2000,7 @@ ctlfe_datamove(union ctl_io *io)
KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type));
io->scsiio.ext_data_filled = 0;
ccb = PRIV_CCB(io);
periph = xpt_path_periph(ccb->ccb_h.path);
cam_periph_lock(periph);

View file

@ -1371,7 +1371,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x0E, 0x02, SS_RDEF, /* XXX TBD */
"Information unit too long") },
/* DT P R MAEBK F */
{ SST(0x0E, 0x03, SS_RDEF, /* XXX TBD */
{ SST(0x0E, 0x03, SS_FATAL | EINVAL,
"Invalid field in command information unit") },
/* D W O BK */
{ SST(0x10, 0x00, SS_RDEF,

View file

@ -1714,6 +1714,7 @@ dev/gpio/gpioled_fdt.c optional gpioled fdt
dev/gpio/gpiopower.c optional gpiopower fdt
dev/gpio/gpioregulator.c optional gpioregulator fdt ext_resources
dev/gpio/gpiospi.c optional gpiospi
dev/gpio/gpioths.c optional gpioths
dev/gpio/gpio_if.m optional gpio
dev/gpio/gpiobus_if.m optional gpio
dev/gpio/gpiopps.c optional gpiopps

View file

@ -123,6 +123,8 @@ static struct alc_ident alc_ident_table[] = {
"Killer E2200 Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024,
"Killer E2400 Gigabit Ethernet" },
{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2500, 9 * 1024,
"Killer E2500 Gigabit Ethernet" },
{ 0, 0, 0, NULL}
};
@ -1083,6 +1085,7 @@ alc_phy_down(struct alc_softc *sc)
case DEVICEID_ATHEROS_AR8161:
case DEVICEID_ATHEROS_E2200:
case DEVICEID_ATHEROS_E2400:
case DEVICEID_ATHEROS_E2500:
case DEVICEID_ATHEROS_AR8162:
case DEVICEID_ATHEROS_AR8171:
case DEVICEID_ATHEROS_AR8172:
@ -1402,6 +1405,7 @@ alc_attach(device_t dev)
switch (sc->alc_ident->deviceid) {
case DEVICEID_ATHEROS_E2200:
case DEVICEID_ATHEROS_E2400:
case DEVICEID_ATHEROS_E2500:
sc->alc_flags |= ALC_FLAG_E2X00;
/* FALLTHROUGH */
case DEVICEID_ATHEROS_AR8161:
@ -1480,7 +1484,8 @@ alc_attach(device_t dev)
if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
sc->alc_dma_wr_burst = 3;
/*
* Force maximum payload size to 128 bytes for E2200/E2400.
* Force maximum payload size to 128 bytes for
* E2200/E2400/E2500.
* Otherwise it triggers DMA write error.
*/
if ((sc->alc_flags & ALC_FLAG_E2X00) != 0)

View file

@ -50,6 +50,7 @@
#define DEVICEID_ATHEROS_AR8172 0x10A0
#define DEVICEID_ATHEROS_E2200 0xE091
#define DEVICEID_ATHEROS_E2400 0xE0A1
#define DEVICEID_ATHEROS_E2500 0xE0B1
#define ATHEROS_AR8152_B_V10 0xC0
#define ATHEROS_AR8152_B_V11 0xC1

View file

@ -77,7 +77,7 @@ enum bhnd_device_vars {
*/
enum {
BHND_PROBE_ROOT = 0, /**< Nexus or host bridge */
BHND_PROBE_BUS = 1000, /**< Busses and bridges */
BHND_PROBE_BUS = 1000, /**< Buses and bridges */
BHND_PROBE_CPU = 2000, /**< CPU devices */
BHND_PROBE_INTERRUPT = 3000, /**< Interrupt controllers. */
BHND_PROBE_TIMER = 4000, /**< Timers and clocks. */

View file

@ -845,6 +845,7 @@ static int
arswitch_setled(struct arswitch_softc *sc, int phy, int led, int style)
{
int shift;
int err;
if (phy < 0 || phy > sc->numphys)
return EINVAL;
@ -852,10 +853,15 @@ arswitch_setled(struct arswitch_softc *sc, int phy, int led, int style)
if (style < 0 || style > ETHERSWITCH_PORT_LED_MAX)
return (EINVAL);
ARSWITCH_LOCK(sc);
shift = ar8327_led_mapping[phy][led].shift;
return (arswitch_modifyreg(sc->sc_dev,
err = (arswitch_modifyreg(sc->sc_dev,
ar8327_led_mapping[phy][led].reg,
0x03 << shift, led_pattern_table[style] << shift));
ARSWITCH_UNLOCK(sc);
return (err);
}
static void

View file

@ -81,6 +81,8 @@ static int
ar7240_hw_global_setup(struct arswitch_softc *sc)
{
ARSWITCH_LOCK(sc);
/* Enable CPU port; disable mirror port */
arswitch_writereg(sc->sc_dev, AR8X16_REG_CPU_PORT,
AR8X16_CPU_PORT_EN | AR8X16_CPU_MIRROR_DIS);
@ -103,6 +105,8 @@ ar7240_hw_global_setup(struct arswitch_softc *sc)
arswitch_modifyreg(sc->sc_dev, AR8X16_REG_SERVICE_TAG,
AR8X16_SERVICE_TAG_MASK, 0);
ARSWITCH_UNLOCK(sc);
return (0);
}

View file

@ -127,6 +127,8 @@ static int
ar8316_hw_global_setup(struct arswitch_softc *sc)
{
ARSWITCH_LOCK(sc);
arswitch_writereg(sc->sc_dev, 0x38, AR8X16_MAGIC);
/* Enable CPU port and disable mirror port. */
@ -156,6 +158,7 @@ ar8316_hw_global_setup(struct arswitch_softc *sc)
arswitch_modifyreg(sc->sc_dev, AR8X16_REG_SERVICE_TAG,
AR8X16_SERVICE_TAG_MASK, 0);
ARSWITCH_UNLOCK(sc);
return (0);
}

View file

@ -708,6 +708,8 @@ ar8327_hw_global_setup(struct arswitch_softc *sc)
{
uint32_t t;
ARSWITCH_LOCK(sc);
/* enable CPU port and disable mirror port */
t = AR8327_FWD_CTRL0_CPU_PORT_EN |
AR8327_FWD_CTRL0_MIRROR_PORT;
@ -741,6 +743,7 @@ ar8327_hw_global_setup(struct arswitch_softc *sc)
/* GMAC0 (CPU), GMAC1..5 (PHYs), GMAC6 (CPU) */
sc->info.es_nports = 7;
ARSWITCH_UNLOCK(sc);
return (0);
}

View file

@ -81,6 +81,8 @@ static int
ar9340_hw_global_setup(struct arswitch_softc *sc)
{
ARSWITCH_LOCK(sc);
/* Enable CPU port; disable mirror port */
arswitch_writereg(sc->sc_dev, AR8X16_REG_CPU_PORT,
AR8X16_CPU_PORT_EN | AR8X16_CPU_MIRROR_DIS);
@ -142,6 +144,7 @@ ar9340_hw_global_setup(struct arswitch_softc *sc)
} else {
device_printf(sc->sc_dev, "%s: need is_gmii or is_mii set\n",
__func__);
ARSWITCH_UNLOCK(sc);
return (ENXIO);
}
@ -163,6 +166,7 @@ ar9340_hw_global_setup(struct arswitch_softc *sc)
/* Settle time */
DELAY(1000);
ARSWITCH_UNLOCK(sc);
return (0);
}

View file

@ -125,7 +125,7 @@ simplebus_probe(device_t dev)
/*
* FDT data puts a "simple-bus" compatible string on many things that
* have children but aren't really busses in our world. Without a
* have children but aren't really buses in our world. Without a
* ranges property we will fail to attach, so just fail to probe too.
*/
if (!(ofw_bus_is_compatible(dev, "simple-bus") &&

405
sys/dev/gpio/gpioths.c Normal file
View file

@ -0,0 +1,405 @@
/*-
* Copyright (c) 2016 Michael Zhilin <mizhka@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/module.h>
#include <sys/errno.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <sys/gpio.h>
#include <machine/resource.h>
#include "gpiobus_if.h"
/*
* GPIOTHS - Temp/Humidity sensor over GPIO, e.g. DHT11/DHT22
* This is driver for Temperature & Humidity sensor which provides digital
* output over single-wire protocol from embedded 8-bit microcontroller.
*
* Temp/Humidity sensor can't be discovered automatically, please specify hints
* as part of loader or kernel configuration:
* hint.gpioths.0.at="gpiobus0"
* hint.gpioths.0.pins=<PIN>
*/
#define GPIOTHS_POLLTIME 5 /* in seconds */
#define GPIOTHS_DHT_STARTCYCLE 20000 /* 20ms = 20000us */
#define GPIOTHS_DHT_TIMEOUT 1000 /* 1ms = 1000us */
#define GPIOTHS_DHT_CYCLES 41
#define GPIOTHS_DHT_ONEBYTEMASK 0xFF
#define GPIOTHS_DHT_TEMP_SHIFT 8
#define GPIOTHS_DHT_HUM_SHIFT 24
struct gpioths_softc {
device_t dev;
int temp;
int hum;
int fails;
struct sysctl_oid *temp_oid;
struct sysctl_oid *hum_oid;
struct sysctl_oid *fails_oid;
struct callout callout;
};
static devclass_t gpioths_devclass;
/* Prototypes */
static int gpioths_probe(device_t dev);
static int gpioths_attach(device_t dev);
static int gpioths_detach(device_t dev);
static void gpioths_poll(void *arg);
static int gpioths_temp_sysctl(SYSCTL_HANDLER_ARGS);
static int gpioths_hum_sysctl(SYSCTL_HANDLER_ARGS);
static int gpioths_fails_sysctl(SYSCTL_HANDLER_ARGS);
/* DHT-specific methods */
static int gpioths_dht_initread(device_t bus, device_t dev);
static int gpioths_dht_readbytes(device_t bus, device_t dev);
static int gpioths_dht_timeuntil(device_t bus, device_t dev,
uint32_t lev, uint32_t *time);
/* Implementation */
static int
gpioths_probe(device_t dev)
{
device_set_desc(dev, "Temperature and Humidity Sensor over GPIO");
return (0);
}
static int
gpioths_dht_timeuntil(device_t bus, device_t dev, uint32_t lev, uint32_t *time)
{
uint32_t cur_level;
int i;
for (i = 0; i < GPIOTHS_DHT_TIMEOUT; i++) {
GPIOBUS_PIN_GET(bus, dev, 0, &cur_level);
if (cur_level == lev) {
if (time != NULL)
*time = i;
return (0);
}
DELAY(1);
}
/* Timeout */
return (ETIMEDOUT);
}
static int
gpioths_dht_initread(device_t bus, device_t dev)
{
int err;
err = GPIOBUS_PIN_SETFLAGS(bus, dev, 0, GPIO_PIN_OUTPUT);
if (err != 0) {
device_printf(dev, "err(GPIOBUS_PIN_SETFLAGS, OUT) = %d\n", err);
return (err);
}
DELAY(1);
err = GPIOBUS_PIN_SET(bus, dev, 0, GPIO_PIN_LOW);
if (err != 0) {
device_printf(dev, "err(GPIOBUS_PIN_SET, LOW) = %d\n", err);
return (err);
}
/*
* According to specifications we need to wait no more than 18ms
* to start data transfer
*/
DELAY(GPIOTHS_DHT_STARTCYCLE);
err = GPIOBUS_PIN_SET(bus, dev, 0, GPIO_PIN_HIGH);
if (err != 0) {
device_printf(dev, "err(GPIOBUS_PIN_SET, HIGH) = %d\n", err);
return (err);
}
DELAY(1);
err = GPIOBUS_PIN_SETFLAGS(bus, dev, 0, GPIO_PIN_INPUT) ;
if (err != 0) {
device_printf(dev, "err(GPIOBUS_PIN_SETFLAGS, IN) = %d\n", err);
return (err);
}
DELAY(1);
return (0);
}
static int
gpioths_dht_readbytes(device_t bus, device_t dev)
{
struct gpioths_softc *sc;
uint32_t calibrations[GPIOTHS_DHT_CYCLES];
uint32_t intervals[GPIOTHS_DHT_CYCLES];
uint32_t err, avglen, value;
uint8_t crc, calc;
int i, offset, size;
sc = device_get_softc(dev);
err = gpioths_dht_initread(bus,dev);
if (err) {
device_printf(dev, "gpioths_dht_initread error = %d\n", err);
goto error;
}
err = gpioths_dht_timeuntil(bus, dev, GPIO_PIN_LOW, NULL);
if (err) {
device_printf(dev, "err(START) = %d\n", err);
goto error;
}
/* reading - 41 cycles */
for (i = 0; i < GPIOTHS_DHT_CYCLES; i++) {
err = gpioths_dht_timeuntil(bus, dev, GPIO_PIN_HIGH,
&calibrations[i]);
if (err) {
device_printf(dev, "err(CAL, %d) = %d\n", i, err);
goto error;
}
err = gpioths_dht_timeuntil(bus, dev, GPIO_PIN_LOW,
&intervals[i]);
if (err) {
device_printf(dev, "err(INTERVAL, %d) = %d\n", i, err);
goto error;
}
}
err = GPIOBUS_PIN_SETFLAGS(bus, dev, 0, GPIO_PIN_OUTPUT);
if (err != 0) {
device_printf(dev, "err(FINAL_SETFLAGS, OUT) = %d\n", err);
goto error;
}
DELAY(1);
/* Calculate average data calibration cycle length */
avglen = 0;
for (i = 1; i < GPIOTHS_DHT_CYCLES; i++)
avglen += calibrations[i];
avglen = avglen / (GPIOTHS_DHT_CYCLES - 1);
/* Calculate data */
value = 0;
offset = 1;
size = sizeof(value) * 8;
for (i = offset; i < size + offset; i++) {
value <<= 1;
if (intervals[i] > avglen)
value += 1;
}
/* Calculate CRC */
crc = 0;
offset = sizeof(value) * 8 + 1;
size = sizeof(crc) * 8;
for (i = offset; i < size + offset; i++) {
crc <<= 1;
if (intervals[i] > avglen)
crc += 1;
}
calc = 0;
for (i = 0; i < sizeof(value); i++)
calc += (value >> (8*i)) & GPIOTHS_DHT_ONEBYTEMASK;
#ifdef GPIOTHS_DEBUG
/* Debug bits */
for (i = 0; i < GPIOTHS_DHT_CYCLES; i++)
device_printf(dev, "%d: %d %d\n", i, calibrations[i],
intervals[i]);
device_printf(dev, "len=%d, data=%x, crc=%x/%x\n", avglen, value, crc,
calc);
#endif /* GPIOTHS_DEBUG */
/* CRC check */
if (calc != crc) {
err = -1;
goto error;
}
sc->fails = 0;
sc->temp = (value >> GPIOTHS_DHT_TEMP_SHIFT) & GPIOTHS_DHT_ONEBYTEMASK;
sc->hum = (value >> GPIOTHS_DHT_HUM_SHIFT) & GPIOTHS_DHT_ONEBYTEMASK;
#ifdef GPIOTHS_DEBUG
/* Debug bits */
device_printf(dev, "fails=%d, temp=%d, hum=%d\n", sc->fails,
sc->temp, sc->hum);
#endif /* GPIOTHS_DEBUG */
return (0);
error:
sc->fails++;
return (err);
}
static void
gpioths_poll(void *arg)
{
struct gpioths_softc *sc;
device_t dev;
dev = (device_t)arg;
sc = device_get_softc(dev);
gpioths_dht_readbytes(device_get_parent(dev), dev);
callout_schedule(&sc->callout, GPIOTHS_POLLTIME * hz);
}
static int
gpioths_temp_sysctl(SYSCTL_HANDLER_ARGS)
{
struct gpioths_softc *sc;
int value;
sc = (struct gpioths_softc*)arg1;
value = sc->temp;
return (sysctl_handle_int(oidp, &value, 0, req));
}
static int
gpioths_hum_sysctl(SYSCTL_HANDLER_ARGS)
{
struct gpioths_softc *sc;
int value;
sc = (struct gpioths_softc*)arg1;
value = sc->hum;
return (sysctl_handle_int(oidp, &value, 0, req));
}
static int
gpioths_fails_sysctl(SYSCTL_HANDLER_ARGS)
{
struct gpioths_softc *sc;
int value;
sc = (struct gpioths_softc*)arg1;
value = sc->fails;
return (sysctl_handle_int(oidp, &value, 0, req));
}
static int
gpioths_attach(device_t dev)
{
struct gpioths_softc *sc;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
sc = device_get_softc(dev);
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
sc->dev = dev;
callout_init(&sc->callout, 1);
callout_reset(&sc->callout, GPIOTHS_POLLTIME * hz, gpioths_poll, dev);
sc->temp_oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"temperature", CTLTYPE_INT | CTLFLAG_RD, sc, 0,
gpioths_temp_sysctl, "I", "temperature(C)");
sc->hum_oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"humidity", CTLTYPE_INT | CTLFLAG_RD, sc, 0,
gpioths_hum_sysctl, "I", "humidity(%)");
sc->fails_oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"fails", CTLTYPE_INT | CTLFLAG_RD, sc, 0,
gpioths_fails_sysctl, "I", "fails since last successful read");
return (0);
}
static int
gpioths_detach(device_t dev)
{
return (0);
}
/* DDB bits */
#include "opt_ddb.h"
#ifdef DDB
#include <ddb/ddb.h>
#include <ddb/db_lex.h>
#include <sys/cons.h>
static struct command_table db_gpioths_table = LIST_HEAD_INITIALIZER(db_t4_table);
_DB_SET(_show, gpioths, NULL, db_show_table, 0, &db_gpioths_table);
DB_FUNC(read, db_show_gpiothsread, db_gpioths_table, CS_OWN, NULL)
{
device_t dev;
int t;
int init;
init = 0;
t = db_read_token();
if (t == tIDENT) {
dev = device_lookup_by_name(db_tok_string);
init = 1;
}
db_skip_to_eol();
if (init)
db_printf("read: 0x%x\n",
gpioths_dht_readbytes(dev, device_get_parent(dev)));
else
db_printf("usage: show gpioths read <gpiothsdevice>\n");
return;
}
#endif /* DDB */
/* Driver bits */
static device_method_t gpioths_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, gpioths_probe),
DEVMETHOD(device_attach, gpioths_attach),
DEVMETHOD(device_detach, gpioths_detach),
DEVMETHOD_END
};
DEFINE_CLASS_0(gpioths, gpioths_driver, gpioths_methods, sizeof(struct gpioths_softc));
DRIVER_MODULE(gpioths, gpiobus, gpioths_driver, gpioths_devclass, 0, 0);

View file

@ -4396,7 +4396,7 @@ isp_start(XS_T *xs)
/*
* Now see if we need to synchronize the ISP with respect to anything.
* We do dual duty here (cough) for synchronizing for busses other
* We do dual duty here (cough) for synchronizing for buses other
* than which we got here to send a command to.
*/
reqp = (ispreq_t *) local;

View file

@ -55,7 +55,7 @@
#define DEV_MMC_MMCREG_H
/*
* This file contains the register definitions for the mmc and sd busses.
* This file contains the register definitions for the mmc and sd buses.
* They are taken from publicly available sources.
*/

View file

@ -1544,7 +1544,7 @@ mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
/*
* Send the initialization request. This is where we specify how many
* SCSI busses and how many devices per bus we wish to emulate.
* SCSI buses and how many devices per bus we wish to emulate.
* This is also the command that specifies the max size of the reply
* frames from the IOC that we will be allocating.
*/
@ -1559,7 +1559,7 @@ mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
init.WhoInit = who;
init.Function = MPI_FUNCTION_IOC_INIT;
init.MaxDevices = 0; /* at least 256 devices per bus */
init.MaxBuses = 16; /* at least 16 busses */
init.MaxBuses = 16; /* at least 16 buses */
init.MsgVersion = htole16(MPI_VERSION);
init.HeaderVersion = htole16(MPI_HEADER_VERSION);

View file

@ -809,7 +809,7 @@ mpt_read(struct mpt_softc *mpt, int offset)
/*
* Some operations (e.g. diagnostic register writes while the ARM proccessor
* is disabled), must be performed using "PCI pio" operations. On non-PCI
* busses, these operations likely map to normal register accesses.
* buses, these operations likely map to normal register accesses.
*/
static __inline void
mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)

View file

@ -782,7 +782,7 @@ mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
}
#endif
/* XXX Ignores that there may be multiple busses/IOCs involved. */
/* XXX Ignores that there may be multiple buses/IOCs involved. */
cam_status
mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
{
@ -799,7 +799,7 @@ mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
return (-1);
}
/* XXX Ignores that there may be multiple busses/IOCs involved. */
/* XXX Ignores that there may be multiple buses/IOCs involved. */
int
mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
{
@ -818,7 +818,7 @@ mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
}
/* XXX Ignores that there may be multiple busses/IOCs involved. */
/* XXX Ignores that there may be multiple buses/IOCs involved. */
int
mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
{

View file

@ -351,7 +351,7 @@ cbb_pci_attach(device_t brdev)
/*
* This is a gross hack. We should be scanning the entire pci
* tree, assigning bus numbers in a way such that we (1) can
* reserve 1 extra bus just in case and (2) all sub busses
* reserve 1 extra bus just in case and (2) all sub buses
* are in an appropriate range.
*/
DEVPRINTF((brdev, "Secondary bus is %d\n", sc->bus.sec));

View file

@ -37,7 +37,7 @@ __FBSDID("$FreeBSD$");
/*
* Provide a device to "eat" the host->pci bridge devices that show up
* on PCI busses and stop them showing up twice on the probes. This also
* on PCI buses and stop them showing up twice on the probes. This also
* stops them showing up as 'none' in pciconf -l. If the host bridge
* provides an AGP capability then we create a child agp device for the
* agp GART driver to attach to.

View file

@ -281,13 +281,14 @@ static const struct pci_quirk pci_quirks[] = {
{ 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
/*
* Atheros AR8161/AR8162/E2200/E2400 Ethernet controllers have a
* bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
* Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have
* a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
* of the command register is set.
*/
{ 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
{ 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
{ 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
{ 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
{ 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
/*
@ -3095,7 +3096,7 @@ pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
* If base is 0, then we have problems if this architecture does
* not allow that. It is best to ignore such entries for the
* moment. These will be allocated later if the driver specifically
* requests them. However, some removable busses look better when
* requests them. However, some removable buses look better when
* all resources are allocated, so allow '0' to be overriden.
*
* Similarly treat maps whose values is the same as the test value
@ -4178,7 +4179,7 @@ pci_attach(device_t dev)
/*
* Since there can be multiple independently numbered PCI
* busses on systems with multiple PCI domains, we can't use
* buses on systems with multiple PCI domains, we can't use
* the unit number to decide which bus we are probing. We ask
* the parent pcib what our domain and bus numbers are.
*/

View file

@ -34,7 +34,7 @@
/*
* Export definitions of the pci bus so that we can more easily share
* it with "subclass" busses.
* it with "subclass" buses.
*/
DECLARE_CLASS(pci_driver);

View file

@ -73,7 +73,7 @@ host_pcib_get_busno(pci_read_config_fn read_config, int bus, int slot, int func,
* For the 450nx chipset, there is a whole bundle of
* things pretending to be host bridges. The MIOC will
* be seen first and isn't really a pci bridge (the
* actual busses are attached to the PXB's). We need to
* actual buses are attached to the PXB's). We need to
* read the registers of the MIOC to figure out the
* bus numbers for the PXB channels.
*

View file

@ -116,9 +116,8 @@ spibus_probe_nomatch(device_t bus, device_t child)
{
struct spibus_ivar *devi = SPIBUS_IVAR(child);
device_printf(bus, "<unknown card>");
printf(" at cs %d", devi->cs);
printf(" mode %d\n", devi->mode);
device_printf(bus, "<unknown card> at cs %d mode %d\n", devi->cs,
devi->mode);
return;
}

View file

@ -2297,7 +2297,7 @@ usb_needs_explore(struct usb_bus *bus, uint8_t do_probe)
* usb_needs_explore_all
*
* This function is called whenever a new driver is loaded and will
* cause that all USB busses are re-explored.
* cause that all USB buses are re-explored.
*------------------------------------------------------------------------*/
void
usb_needs_explore_all(void)
@ -2315,7 +2315,7 @@ usb_needs_explore_all(void)
return;
}
/*
* Explore all USB busses in parallel.
* Explore all USB buses in parallel.
*/
max = devclass_get_maxunit(dc);
while (max >= 0) {

View file

@ -206,7 +206,7 @@ METHOD void driver_added {
/**
* @brief Create a new child device
*
* For busses which use use drivers supporting DEVICE_IDENTIFY() to
* For buses which use use drivers supporting DEVICE_IDENTIFY() to
* enumerate their devices, this method is used to create new
* device instances. The new device will be added after the last
* existing child with the same order. Implementations of bus_add_child
@ -475,7 +475,7 @@ METHOD int teardown_intr {
* @brief Define a resource which can be allocated with
* BUS_ALLOC_RESOURCE().
*
* This method is used by some busses (typically ISA) to allow a
* This method is used by some buses (typically ISA) to allow a
* driver to describe a resource range that it would like to
* allocate. The resource defined by @p _type and @p _rid is defined
* to start at @p _start and to include @p _count indices in its
@ -562,7 +562,7 @@ METHOD struct resource_list * get_resource_list {
* should return -1 if it is present. Any errors in determining
* should be returned as a normal errno value. Client drivers are to
* assume that the device is present, even if there is an error
* determining if it is there. Busses are to try to avoid returning
* determining if it is there. Buses are to try to avoid returning
* errors, but newcard will return an error if the device fails to
* implement this method.
*

View file

@ -469,8 +469,8 @@ static uint32_t
encode_timeval(struct timeval tv)
{
int log2_s;
int val, exponent; /* Unnormalized value and exponent */
int norm_exponent; /* Normalized exponent */
int val, exp; /* Unnormalized value and exponent */
int norm_exp; /* Normalized exponent */
int shift;
/*
@ -481,7 +481,7 @@ encode_timeval(struct timeval tv)
if (tv.tv_sec == 0) {
if (tv.tv_usec == 0)
return (0);
exponent = 0;
exp = 0;
val = tv.tv_usec;
} else {
/*
@ -490,24 +490,24 @@ encode_timeval(struct timeval tv)
*/
log2_s = fls(tv.tv_sec) - 1;
if (log2_s + LOG2_1M < CALC_BITS) {
exponent = 0;
exp = 0;
val = 1000000 * tv.tv_sec + tv.tv_usec;
} else {
exponent = log2_s + LOG2_1M - CALC_BITS;
exp = log2_s + LOG2_1M - CALC_BITS;
val = (unsigned int)(((uint64_t)1000000 * tv.tv_sec +
tv.tv_usec) >> exponent);
tv.tv_usec) >> exp);
}
}
/* Now normalize and pack the value into an IEEE-754 float. */
norm_exponent = fls(val) - 1;
shift = FLT_MANT_DIG - norm_exponent - 1;
norm_exp = fls(val) - 1;
shift = FLT_MANT_DIG - norm_exp - 1;
#ifdef ACCT_DEBUG
printf("val=%d exp=%d shift=%d log2(val)=%d\n",
val, exponent, shift, norm_exponent);
printf("exp=%x mant=%x\n", FLT_MAX_EXP - 1 + exponent + norm_exponent,
val, exp, shift, norm_exp);
printf("exp=%x mant=%x\n", FLT_MAX_EXP - 1 + exp + norm_exp,
((shift > 0 ? (val << shift) : (val >> -shift)) & MANT_MASK));
#endif
return (((FLT_MAX_EXP - 1 + exponent + norm_exponent) << (FLT_MANT_DIG - 1)) |
return (((FLT_MAX_EXP - 1 + exp + norm_exp) << (FLT_MANT_DIG - 1)) |
((shift > 0 ? val << shift : val >> -shift) & MANT_MASK));
}
@ -518,7 +518,7 @@ encode_timeval(struct timeval tv)
static uint32_t
encode_long(long val)
{
int norm_exponent; /* Normalized exponent */
int norm_exp; /* Normalized exponent */
int shift;
if (val == 0)
@ -529,15 +529,15 @@ encode_long(long val)
val);
val = LONG_MAX;
}
norm_exponent = fls(val) - 1;
shift = FLT_MANT_DIG - norm_exponent - 1;
norm_exp = fls(val) - 1;
shift = FLT_MANT_DIG - norm_exp - 1;
#ifdef ACCT_DEBUG
printf("val=%d shift=%d log2(val)=%d\n",
val, shift, norm_exponent);
printf("exp=%x mant=%x\n", FLT_MAX_EXP - 1 + exp + norm_exponent,
val, shift, norm_exp);
printf("exp=%x mant=%x\n", FLT_MAX_EXP - 1 + exp + norm_exp,
((shift > 0 ? (val << shift) : (val >> -shift)) & MANT_MASK));
#endif
return (((FLT_MAX_EXP - 1 + norm_exponent) << (FLT_MANT_DIG - 1)) |
return (((FLT_MAX_EXP - 1 + norm_exp) << (FLT_MANT_DIG - 1)) |
((shift > 0 ? val << shift : val >> -shift) & MANT_MASK));
}

View file

@ -344,6 +344,7 @@ static struct {
{ &null_filtops }, /* EVFILT_LIO */
{ &user_filtops, 1 }, /* EVFILT_USER */
{ &null_filtops }, /* EVFILT_SENDFILE */
{ &file_filtops, 1 }, /* EVFILT_EMPTY */
};
/*

View file

@ -656,7 +656,7 @@ int __noinline cpu_search_both(const struct cpu_group *cg,
* according to the match argument. This routine actually compares the
* load on all paths through the tree and finds the least loaded cpu on
* the least loaded path, which may differ from the least loaded cpu in
* the system. This balances work among caches and busses.
* the system. This balances work among caches and buses.
*
* This inline is instantiated in three forms below using constants for the
* match argument. It is reduced to the minimum set for each case. It is

View file

@ -1093,7 +1093,7 @@ devclass_driver_added(devclass_t dc, driver_t *driver)
int i;
/*
* Call BUS_DRIVER_ADDED for any existing busses in this class.
* Call BUS_DRIVER_ADDED for any existing buses in this class.
*/
for (i = 0; i < dc->maxunit; i++)
if (dc->devices[i] && device_is_attached(dc->devices[i]))
@ -3269,7 +3269,7 @@ resource_list_delete(struct resource_list *rl, int type, int rid)
/**
* @brief Allocate a reserved resource
*
* This can be used by busses to force the allocation of resources
* This can be used by buses to force the allocation of resources
* that are always active in the system even if they are not allocated
* by a driver (e.g. PCI BARs). This function is usually called when
* adding a new child to the bus. The resource is allocated from the
@ -3648,7 +3648,7 @@ bus_generic_probe(device_t dev)
* only call the identify routines of eligible drivers
* when this routine is called. Drivers for later
* passes should have their identify routines called
* on early-pass busses during BUS_NEW_PASS().
* on early-pass buses during BUS_NEW_PASS().
*/
if (dl->pass > bus_current_pass)
continue;

View file

@ -169,7 +169,7 @@ CTASSERT(sizeof(struct m_ext) == 28);
* plain pointer does.
*/
#ifdef INVARIANTS
static struct mbuf m_assertbuf;
static struct mbuf __used m_assertbuf;
CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));

View file

@ -161,6 +161,7 @@ static void filt_sowdetach(struct knote *kn);
static int filt_sowrite(struct knote *kn, long hint);
static int filt_solisten(struct knote *kn, long hint);
static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id);
static int filt_soempty(struct knote *kn, long hint);
fo_kqfilter_t soo_kqfilter;
static struct filterops solisten_filtops = {
@ -178,6 +179,11 @@ static struct filterops sowrite_filtops = {
.f_detach = filt_sowdetach,
.f_event = filt_sowrite,
};
static struct filterops soempty_filtops = {
.f_isfd = 1,
.f_detach = filt_sowdetach,
.f_event = filt_soempty,
};
so_gen_t so_gencnt; /* generation count for sockets */
@ -2681,6 +2687,18 @@ sosetopt(struct socket *so, struct sockopt *sopt)
#endif
break;
case SO_TS_CLOCK:
error = sooptcopyin(sopt, &optval, sizeof optval,
sizeof optval);
if (error)
goto bad;
if (optval < 0 || optval > SO_TS_CLOCK_MAX) {
error = EINVAL;
goto bad;
}
so->so_ts_clock = optval;
break;
default:
if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
error = hhook_run_socket(so, sopt,
@ -2868,6 +2886,10 @@ sogetopt(struct socket *so, struct sockopt *sopt)
optval = so->so_incqlen;
goto integer;
case SO_TS_CLOCK:
optval = so->so_ts_clock;
goto integer;
default:
if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
error = hhook_run_socket(so, sopt,
@ -3083,6 +3105,10 @@ soo_kqfilter(struct file *fp, struct knote *kn)
kn->kn_fop = &sowrite_filtops;
sb = &so->so_snd;
break;
case EVFILT_EMPTY:
kn->kn_fop = &soempty_filtops;
sb = &so->so_snd;
break;
default:
return (EINVAL);
}
@ -3344,6 +3370,21 @@ filt_sowrite(struct knote *kn, long hint)
return (kn->kn_data >= so->so_snd.sb_lowat);
}
static int
filt_soempty(struct knote *kn, long hint)
{
struct socket *so;
so = kn->kn_fp->f_data;
SOCKBUF_LOCK_ASSERT(&so->so_snd);
kn->kn_data = sbused(&so->so_snd);
if (kn->kn_data == 0)
return (1);
else
return (0);
}
/*ARGSUSED*/
static int
filt_solisten(struct knote *kn, long hint)

View file

@ -1899,6 +1899,7 @@ unp_internalize(struct mbuf **controlp, struct thread *td)
struct filedescent *fde, **fdep, *fdev;
struct file *fp;
struct timeval *tv;
struct timespec *ts;
int i, *fdp;
void *data;
socklen_t clen = control->m_len, datalen;
@ -2019,6 +2020,30 @@ unp_internalize(struct mbuf **controlp, struct thread *td)
bintime(bt);
break;
case SCM_REALTIME:
*controlp = sbcreatecontrol(NULL, sizeof(*ts),
SCM_REALTIME, SOL_SOCKET);
if (*controlp == NULL) {
error = ENOBUFS;
goto out;
}
ts = (struct timespec *)
CMSG_DATA(mtod(*controlp, struct cmsghdr *));
nanotime(ts);
break;
case SCM_MONOTONIC:
*controlp = sbcreatecontrol(NULL, sizeof(*ts),
SCM_MONOTONIC, SOL_SOCKET);
if (*controlp == NULL) {
error = ENOBUFS;
goto out;
}
ts = (struct timespec *)
CMSG_DATA(mtod(*controlp, struct cmsghdr *));
nanouptime(ts);
break;
default:
error = EINVAL;
goto out;

View file

@ -111,6 +111,10 @@
#define PCI_WINDOW7_CONF_ADDR 0x07000000
#define AR71XX_UART_ADDR 0x18020000
#define AR71XX_UART_THR 0x0
#define AR71XX_UART_LSR 0x14
#define AR71XX_UART_LSR_THRE (1 << 5)
#define AR71XX_UART_LSR_TEMT (1 << 6)
#define AR71XX_USB_CTRL_FLADJ 0x18030000
#define USB_CTRL_FLADJ_HOST_SHIFT 12

View file

@ -86,4 +86,21 @@ uart_ar71xx_probe(device_t dev)
return (uart_bus_probe(dev, 2, freq, 0, 0));
}
#ifdef EARLY_PRINTF
static void
ar71xx_early_putc(int c)
{
int i;
for (i = 0; i < 1000; i++) {
if (ATH_READ_REG(AR71XX_UART_ADDR + AR71XX_UART_LSR)
& AR71XX_UART_LSR_THRE)
break;
}
ATH_WRITE_REG(AR71XX_UART_ADDR + AR71XX_UART_THR, (c & 0xff));
}
early_putc_t *early_putc = ar71xx_early_putc;
#endif
DRIVER_MODULE(uart, apb, uart_ar71xx_driver, uart_devclass, 0, 0);

View file

@ -165,6 +165,12 @@ struct ieee80211_qosframe_addr4 {
#define IEEE80211_IS_MGMT(wh) \
(!! (((wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK) \
== IEEE80211_FC0_TYPE_MGT))
#define IEEE80211_IS_CTL(wh) \
(!! (((wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK) \
== IEEE80211_FC0_TYPE_CTL))
#define IEEE80211_IS_DATA(wh) \
(!! (((wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK) \
== IEEE80211_FC0_TYPE_DATA))
#define IEEE80211_FC0_QOSDATA \
(IEEE80211_FC0_TYPE_DATA|IEEE80211_FC0_SUBTYPE_QOS|IEEE80211_FC0_VERSION_0)

View file

@ -52,7 +52,7 @@
#define _NETINET_CC_CC_H_
#if !defined(_KERNEL)
#error "no user-servicable parts inside"
#error "no user-serviceable parts inside"
#endif
/* Global CC vars. */

View file

@ -1157,30 +1157,48 @@ ip_forward(struct mbuf *m, int srcrt)
icmp_error(mcopy, type, code, dest.s_addr, mtu);
}
#define CHECK_SO_CT(sp, ct) \
(((sp->so_options & SO_TIMESTAMP) && (sp->so_ts_clock == ct)) ? 1 : 0)
void
ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
struct mbuf *m)
{
if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) {
if ((inp->inp_socket->so_options & SO_BINTIME) ||
CHECK_SO_CT(inp->inp_socket, SO_TS_BINTIME)) {
struct bintime bt;
bintime(&bt);
if (inp->inp_socket->so_options & SO_BINTIME) {
*mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt),
SCM_BINTIME, SOL_SOCKET);
if (*mp)
mp = &(*mp)->m_next;
}
if (inp->inp_socket->so_options & SO_TIMESTAMP) {
struct timeval tv;
*mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt),
SCM_BINTIME, SOL_SOCKET);
if (*mp)
mp = &(*mp)->m_next;
}
if (CHECK_SO_CT(inp->inp_socket, SO_TS_REALTIME_MICRO)) {
struct timeval tv;
bintime2timeval(&bt, &tv);
*mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv),
SCM_TIMESTAMP, SOL_SOCKET);
if (*mp)
mp = &(*mp)->m_next;
}
microtime(&tv);
*mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv),
SCM_TIMESTAMP, SOL_SOCKET);
if (*mp)
mp = &(*mp)->m_next;
} else if (CHECK_SO_CT(inp->inp_socket, SO_TS_REALTIME)) {
struct timespec ts;
nanotime(&ts);
*mp = sbcreatecontrol((caddr_t)&ts, sizeof(ts),
SCM_REALTIME, SOL_SOCKET);
if (*mp)
mp = &(*mp)->m_next;
} else if (CHECK_SO_CT(inp->inp_socket, SO_TS_MONOTONIC)) {
struct timespec ts;
nanouptime(&ts);
*mp = sbcreatecontrol((caddr_t)&ts, sizeof(ts),
SCM_MONOTONIC, SOL_SOCKET);
if (*mp)
mp = &(*mp)->m_next;
}
if (inp->inp_flags & INP_RECVDSTADDR) {
*mp = sbcreatecontrol((caddr_t)&ip->ip_dst,

View file

@ -507,19 +507,21 @@ sysctl_ip6_tempvltime(SYSCTL_HANDLER_ARGS)
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_FORWARDING, forwarding,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_forwarding), 0,
"Enable IPv6 forwarding between interfaces");
"Enable forwarding of IPv6 packets between interfaces");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_SENDREDIRECTS, redirect,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_sendredirects), 0,
"Send a redirect message when forwarding back to a source link");
"Send ICMPv6 redirects for unforwardable IPv6 packets");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFHLIM, hlim,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_defhlim), 0,
"Default hop limit");
"Default hop limit to use for outgoing IPv6 packets");
SYSCTL_VNET_PCPUSTAT(_net_inet6_ip6, IPV6CTL_STATS, stats, struct ip6stat,
ip6stat,
"IP6 statistics (struct ip6stat, netinet6/ip6_var.h)");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragpackets), 0,
"Maximum allowed number of outstanding fragmented IPv6 packets");
"Default maximum number of outstanding fragmented IPv6 packets. "
"A value of 0 means no fragmented packets will be accepted, while a "
"a value of -1 means no limit");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_ACCEPT_RTADV, accept_rtadv,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_accept_rtadv), 0,
"Default value of per-interface flag for accepting ICMPv6 RA messages");
@ -541,7 +543,8 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_LOG_INTERVAL, log_interval,
"Frequency in seconds at which to log IPv6 forwarding errors");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_HDRNESTLIMIT, hdrnestlimit,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_hdrnestlimit), 0,
"Maximum allowed number of nested protocol headers");
"Default maximum number of IPv6 extension headers permitted on "
"incoming IPv6 packets, 0 for no artificial limit");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DAD_COUNT, dad_count,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_dad_count), 0,
"Number of ICMPv6 NS messages sent during duplicate address detection");
@ -550,7 +553,8 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_AUTO_FLOWLABEL, auto_flowlabel,
"Provide an IPv6 flowlabel in outbound packets");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFMCASTHLIM, defmcasthlim,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_defmcasthlim), 0,
"Default hop limit for multicast packets");
"Default hop limit for IPv6 multicast packets originating from this "
"node");
SYSCTL_STRING(_net_inet6_ip6, IPV6CTL_KAME_VERSION, kame_version,
CTLFLAG_RD, __KAME_VERSION, 0,
"KAME version string");

Some files were not shown because too many files have changed in this diff Show more