Fix typos.

PR:	bin/148894
Submitted by:	olgeni
This commit is contained in:
Rebecca Cran 2010-11-09 10:59:09 +00:00
parent d6cb4126e3
commit b1ce21c6ef
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=215034
21 changed files with 90 additions and 90 deletions

View file

@ -100,7 +100,7 @@ unmappedaddr(struct sockaddr_in6 *sin6)
sin4->sin_len = sizeof(struct sockaddr_in);
}
/* Get a field from a \0 seperated string */
/* Get a field from a \0 separated string */
ssize_t
get_field(int peer, char *buffer, ssize_t size)
{

View file

@ -317,7 +317,7 @@ ixpqmgr_attach(device_t dev)
sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
ixpqmgr_rebuild(sc); /* build inital priority table */
ixpqmgr_rebuild(sc); /* build initial priority table */
aqm_reset(sc); /* reset h/w */
return (0);
}
@ -775,7 +775,7 @@ ixpqmgr_intr(void *arg)
*
* The search will end when all the bits of the interrupt
* register are cleared. There is no need to maintain
* a seperate value and test it at each iteration.
* a separate value and test it at each iteration.
*/
if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
priorityTableIndex = 0;

View file

@ -88,7 +88,7 @@ board_init(void)
/*
* This should be called just before starting the kernel. This is so
* that one can undo incompatable hardware settings.
* that one can undo incompatible hardware settings.
*/
void
clr_board(void)
@ -504,7 +504,7 @@ cfaltwait(u_int8_t mask)
while (tout <= 5000000) {
status = cfaltread8(CF_ALT_STATUS);
if (status == 0xff) {
printf("cfaltwait: master: no status, reselectin\n");
printf("cfaltwait: master: no status, reselecting\n");
cfwrite8(CF_DRV_HEAD, CF_D_IBM);
DELAY(1);
status = cfread8(CF_STATUS);

View file

@ -188,13 +188,13 @@ ad_detach(device_t dev)
free(children, M_TEMP);
}
/* detroy disk from the system so we dont get any further requests */
/* destroy disk from the system so we don't get any further requests */
disk_destroy(adp->disk);
/* fail requests on the queue and any thats "in flight" for this device */
/* fail requests on the queue and any that's "in flight" for this device */
ata_fail_requests(dev);
/* dont leave anything behind */
/* don't leave anything behind */
device_set_ivars(dev, NULL);
free(adp, M_AD);
return 0;
@ -536,7 +536,7 @@ ad_describe(device_t dev)
struct ad_softc *adp = device_get_ivars(dev);
u_int8_t *marker, vendor[64], product[64];
/* try to seperate the ATA model string into vendor and model parts */
/* try to separate the ATA model string into vendor and model parts */
if ((marker = index(atadev->param.model, ' ')) ||
(marker = index(atadev->param.model, '-'))) {
int len = (marker - atadev->param.model);

View file

@ -345,7 +345,7 @@ __FBSDID("$FreeBSD$");
* position takes place.
*
* Most likely this is used to ignore rest of the program in cases
* where group of verts arent visible. For some reason this "section"
* where group of verts aren't visible. For some reason this "section"
* is sometimes accepted other instruction that have no relationship with
* position calculations.
*/
@ -590,7 +590,7 @@ __FBSDID("$FreeBSD$");
#define R300_RE_FOG_START 0x4298
/* Not sure why there are duplicate of factor and constant values.
* My best guess so far is that there are seperate zbiases for test and write.
* My best guess so far is that there are separate zbiases for test and write.
* Ordering might be wrong.
* Some of the tests indicate that fgl has a fallback implementation of zbias
* via pixel shaders.
@ -608,7 +608,7 @@ __FBSDID("$FreeBSD$");
* My guess is that there are two bits for each zbias primitive
* (FILL, LINE, POINT).
* One to enable depth test and one for depth write.
* Yet this doesnt explain why depth writes work ...
* Yet this doesn't explain why depth writes work ...
*/
#define R300_RE_OCCLUSION_CNTL 0x42B4
# define R300_OCCLUSION_ON (1<<1)
@ -693,7 +693,7 @@ __FBSDID("$FreeBSD$");
* the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
* color register index.
*
* Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
* Apparently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
* R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
* See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
* correct or not. - Oliver.
@ -817,8 +817,8 @@ __FBSDID("$FreeBSD$");
# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
/* NOTE: NEAREST doesnt seem to exist.
* Im not seting MAG_FILTER_MASK and (3 << 11) on for all
/* NOTE: NEAREST doesn't seem to exist.
* I'm not setting MAG_FILTER_MASK and (3 << 11) on for all
* anisotropy modes because that would void selected mag filter
*/
# define R300_TX_MIN_FILTER_ANISO_NEAREST (0 << 13)

View file

@ -1790,7 +1790,7 @@ isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep)
* The firmware status (except for the QLTM_SVALID bit)
* indicates why this ATIO was sent to us.
*
* If QLTM_SVALID is set, the firware has recommended Sense Data.
* If QLTM_SVALID is set, the firmware has recommended Sense Data.
*
* If the DISCONNECTS DISABLED bit is set in the flags field,
* we're still connected on the SCSI bus.
@ -1917,7 +1917,7 @@ isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep)
* The firmware status (except for the QLTM_SVALID bit)
* indicates why this ATIO was sent to us.
*
* If QLTM_SVALID is set, the firware has recommended Sense Data.
* If QLTM_SVALID is set, the firmware has recommended Sense Data.
*/
if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
isp_prt(isp, ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", aep->at_status);
@ -2581,7 +2581,7 @@ isp_handle_platform_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *inot)
/*
* Note that we're just getting notification that an ELS was received
* (possibly with some associcated information sent upstream). This is
* (possibly with some associated information sent upstream). This is
* *not* the same as being given the ELS frame to accept or reject.
*/
switch (inot->in_status_subcode) {
@ -2784,7 +2784,7 @@ isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp)
}
/*
* Handle task managment functions.
* Handle task management functions.
*
* We show up here with a notify structure filled out.
*
@ -2894,7 +2894,7 @@ isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify)
}
/*
* Find the associated private data and makr it as dead so
* Find the associated private data and mark it as dead so
* we don't try to work on it any further.
*/
static void
@ -5368,7 +5368,7 @@ isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
/*
* For channel zero just return what we have. For either ACIIVE or
* For channel zero just return what we have. For either ACTIVE or
* DEFAULT cases, we depend on default override of NVRAM values for
* channel zero.
*/
@ -5404,7 +5404,7 @@ isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
* physical port on dual-port chips (23XX/24XX)
*
* This is somewhat nutty, particularly since bit 48 is
* irrelevant as they assign seperate serial numbers to
* irrelevant as they assign separate serial numbers to
* different physical ports anyway.
*
* We'll stick our channel number plus one first into bits

View file

@ -257,7 +257,7 @@ TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
/*
* Header split: this causes the hardware to DMA
* the header into a seperate mbuf from the payload,
* the header into a separate mbuf from the payload,
* it can be a performance win in some workloads, but
* in others it actually hurts, its off by default.
*/
@ -3661,7 +3661,7 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
rxbuf = &rxr->rx_buffers[j];
/*
** Dont allocate mbufs if not
** Don't allocate mbufs if not
** doing header split, its wasteful
*/
if (rxr->hdr_split == FALSE)
@ -4129,7 +4129,7 @@ ixgbe_rxeof(struct ix_queue *que, int count)
** not be fragmented across sequential
** descriptors, rather the next descriptor
** is indicated in bits of the descriptor.
** This also means that we might proceses
** This also means that we might process
** more than one packet at a time, something
** that has never been true before, it
** required eliminating global chain pointers
@ -4478,14 +4478,14 @@ ixgbe_enable_intr(struct adapter *adapter)
/* With RSS we use auto clear */
if (adapter->msix_mem) {
mask = IXGBE_EIMS_ENABLE_MASK;
/* Dont autoclear Link */
/* Don't autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
}
/*
** Now enable all queues, this is done seperately to
** Now enable all queues, this is done separately to
** allow for handling the extended (beyond 32) MSIX
** vectors that can be used by 82599
*/

View file

@ -1157,7 +1157,7 @@ mskc_setup_rambuffer(struct msk_softc *sc)
sc->msk_pflags |= MSK_FLAG_RAMBUF;
/*
* Give receiver 2/3 of memory and round down to the multiple
* of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
* of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
* of 1024.
*/
sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
@ -1621,7 +1621,7 @@ msk_attach(device_t dev)
*/
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
/*
* Enable Rx checksum offloading for VLAN taggedd frames
* Enable Rx checksum offloading for VLAN tagged frames
* if controller support new descriptor format.
*/
if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
@ -1809,7 +1809,7 @@ mskc_attach(device_t dev)
* does not rely on status word of received frame
* in msk_rxeof() which in turn disables all
* hardware assistance bits reported by the status
* word as well as validity of the recevied frame.
* word as well as validity of the received frame.
* Just pass received frames to upper stack with
* minimal test and let upper stack handle them.
*/
@ -2143,10 +2143,10 @@ msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
* what DMA address is used and chain another descriptor for the
* 64bits DMA operation. This also means descriptor ring size is
* variable. Limiting DMA address to be in 32bit address space greatly
* simplyfies descriptor handling and possibly would increase
* simplifies descriptor handling and possibly would increase
* performance a bit due to efficient handling of descriptors.
* Apart from harassing checksum offloading mechanisms, it seems
* it's really bad idea to use a seperate descriptor for 64bit
* it's really bad idea to use a separate descriptor for 64bit
* DMA operation to save small descriptor memory. Anyway, I've
* never seen these exotic scheme on ethernet interface hardware.
*/
@ -2643,7 +2643,7 @@ msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
* Short UDP packets appear to be handled correctly by
* Yukon II. Also I assume this bug does not happen on
* controllers that use newer descriptor format or
* automatic Tx checksum calaulcation.
* automatic Tx checksum calculation.
*/
m = m_pullup(m, offset + sizeof(struct tcphdr));
if (m == NULL) {
@ -2780,7 +2780,7 @@ msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
/* Update producer index. */
sc_if->msk_cdata.msk_tx_prod = prod;
/* Set EOP on the last desciptor. */
/* Set EOP on the last descriptor. */
prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
tx_le->msk_control |= htole32(EOP);
@ -3321,7 +3321,7 @@ msk_intr_gmac(struct msk_if_softc *sc_if)
* XXX
* In case of Tx underrun, we may need to flush/reset
* Tx MAC but that would also require resynchronization
* with status LEs. Reintializing status LEs would
* with status LEs. Reinitializing status LEs would
* affect other port in dual MAC configuration so it
* should be avoided as possible as we can.
* Due to lack of documentation it's all vague guess but
@ -3833,7 +3833,7 @@ msk_init_locked(struct msk_if_softc *sc_if)
msk_setvlan(sc_if, ifp);
if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
/* Set Rx Pause threshould. */
/* Set Rx Pause threshold. */
CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
MSK_ECU_LLPP);
CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),

View file

@ -31,7 +31,7 @@
*/
/*
* PCCARD_API_LEVEL. When set to 5, we provide a 5.x compatable API
* PCCARD_API_LEVEL. When set to 5, we provide a 5.x compatible API
* for driver writers that have to share their code between 5.x and 6.x.
* The 5.x compatibility interfaces will be unsupported in 7.0, at which
* point we'll only support 6 and newer, etc.
@ -191,7 +191,7 @@ enum {
PCCARD_IVAR_PRODEXT,
PCCARD_IVAR_FUNCTION_NUMBER,
PCCARD_IVAR_VENDOR_STR, /* CIS string for "Manufacturer" */
PCCARD_IVAR_PRODUCT_STR,/* CIS strnig for "Product" */
PCCARD_IVAR_PRODUCT_STR,/* CIS string for "Product" */
PCCARD_IVAR_CIS3_STR,
PCCARD_IVAR_CIS4_STR,
PCCARD_IVAR_FUNCTION,
@ -254,7 +254,7 @@ enum {
#endif
/*
* Defines to decoe the get_funce_disk return value. See the PCMCIA standard
* Defines to decode the get_funce_disk return value. See the PCMCIA standard
* for all the details of what these bits mean.
*/
#define PFD_I_V_MASK 0x3

View file

@ -70,7 +70,7 @@ __FBSDID("$FreeBSD$");
* for playback/capture.
* Since I couldn't find any documentation for APCDMA programming
* information, I guessed the usage of APCDMA from that of OpenBSD's
* driver. The EBDMA infomation of PCIO can be obtained from
* driver. The EBDMA information of PCIO can be obtained from
* http://solutions.sun.com/embedded/databook/web/microprocessors/pcio.html
* And CS4231A datasheet can also be obtained from
* ftp://ftp.alsa-project.org/pub/manuals/cirrus/4231a.pdf
@ -1198,7 +1198,7 @@ cs4231_chan_fs(struct cs4231_softc *sc, int dir, u_int8_t fs)
/*
* capture channel
* cs4231 doesn't allow seperate fs setup for playback/capture.
* cs4231 doesn't allow separate fs setup for playback/capture.
* I believe this will break full-duplex operation.
*/
if (dir == PCMDIR_REC) {

View file

@ -52,7 +52,7 @@ __FBSDID("$FreeBSD$");
/*
* XXX Placeholder.
* This calculations should be dynamically scaled by number of seperate sc
* This calculations should be dynamically scaled by number of separate sc
* devices. A base value of 'extra_history_size' should be defined for
* each syscons unit, and added and subtracted from the dynamic
* 'extra_history_size' as units are added and removed. This way, each time
@ -86,7 +86,7 @@ int
sc_alloc_history_buffer(scr_stat *scp, int lines, int prev_ysize, int wait)
{
/*
* syscons unconditionally allocates buffers upto
* syscons unconditionally allocates buffers up to
* SC_HISTORY_SIZE lines or scp->ysize lines, whichever
* is larger. A value greater than that is allowed,
* subject to extra_history_size.
@ -116,7 +116,7 @@ sc_alloc_history_buffer(scr_stat *scp, int lines, int prev_ysize, int wait)
delta = cur_lines - min_lines;
}
/* lines upto min_lines are always allowed. */
/* lines up to min_lines are always allowed. */
min_lines = imax(SC_HISTORY_SIZE, scp->ysize);
if (lines > min_lines) {
if (lines - min_lines > extra_history_size + delta) {

View file

@ -172,8 +172,8 @@ uart_parse_tag(__const char **p)
/*
* Parse a device specification. The specification is a list of attributes
* seperated by commas. Each attribute is a tag-value pair with the tag and
* value seperated by a colon. Supported tags are:
* separated by commas. Each attribute is a tag-value pair with the tag and
* value separated by a colon. Supported tags are:
*
* br = Baudrate
* ch = Channel

View file

@ -170,7 +170,7 @@ SLIST_HEAD(lphead, lock_prof);
/*
* Array of objects and profs for each type of object for each cpu. Spinlocks
* are handled seperately because a thread may be preempted and acquire a
* are handled separately because a thread may be preempted and acquire a
* spinlock while in the lock profiling code of a non-spinlock. In this way
* we only need a critical section to protect the per-cpu lists.
*/

View file

@ -121,8 +121,8 @@ cpu_fork(register struct thread *td1,register struct proc *p2,
/* Copy p1's pcb, note that in this case
* our pcb also includes the td_frame being copied
* too. The older mips2 code did an additional copy
* of the td_frame, for us thats not needed any
* longer (this copy does them both
* of the td_frame, for us that's not needed any
* longer (this copy does them both)
*/
bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
@ -312,7 +312,7 @@ cpu_set_syscall_retval(struct thread *td, int error)
* Initialize machine state (pcb and trap frame) for a new thread about to
* upcall. Put enough state in the new thread's PCB to get it to go back
* userret(), where we can intercept it again to set the return (upcall)
* Address and stack, along with those from upcals that are from other sources
* Address and stack, along with those from upcalls that are from other sources
* such as those generated in thread_userret() itself.
*/
void
@ -334,7 +334,7 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
* at this time (see the matching comment below for
* more analysis) (need a good safe default).
* In MIPS, the trapframe is the first element of the PCB
* and gets copied when we copy the PCB. No seperate copy
* and gets copied when we copy the PCB. No separate copy
* is needed.
*/
bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));

View file

@ -108,7 +108,7 @@ struct sctp_paramhdr {
#define SCTP_MAX_BURST 0x00000019 /* rw */
/* assoc level context */
#define SCTP_CONTEXT 0x0000001a /* rw */
/* explict EOR signalling */
/* explicit EOR signalling */
#define SCTP_EXPLICIT_EOR 0x0000001b
#define SCTP_REUSE_PORT 0x0000001c /* rw */
#define SCTP_AUTH_DEACTIVATE_KEY 0x0000001d
@ -131,9 +131,9 @@ struct sctp_paramhdr {
* Blocking I/O is enabled on any TCP type socket by default. For the UDP
* model if this is turned on then the socket buffer is shared for send
* resources amongst all associations. The default for the UDP model is that
* is SS_NBIO is set. Which means all associations have a seperate send
* is SS_NBIO is set. Which means all associations have a separate send
* limit BUT they will NOT ever BLOCK instead you will get an error back
* EAGAIN if you try to send to much. If you want the blocking symantics you
* EAGAIN if you try to send too much. If you want the blocking semantics you
* set this option at the cost of sharing one socket send buffer size amongst
* all associations. Peeled off sockets turn this option off and block. But
* since both TCP and peeled off sockets have only one assoc per socket this
@ -141,7 +141,7 @@ struct sctp_paramhdr {
* model OR peeled off UDP model, but we do allow you to do so. You just use
* the normal syscall to toggle SS_NBIO the way you want.
*
* Blocking I/O is controled by the SS_NBIO flag on the socket state so_state
* Blocking I/O is controlled by the SS_NBIO flag on the socket state so_state
* field.
*/
@ -166,7 +166,7 @@ struct sctp_paramhdr {
/* Special hook for dynamically setting primary for all assoc's,
* this is a write only option that requires root privledge.
* this is a write only option that requires root privilege.
*/
#define SCTP_SET_DYNAMIC_PRIMARY 0x00002001
@ -181,7 +181,7 @@ struct sctp_paramhdr {
* to. The endpoint, before binding, may select
* the "default" VRF it is in by using a set socket
* option with SCTP_VRF_ID. This will also
* get propegated to the default VRF. Once the
* get propagated to the default VRF. Once the
* endpoint binds an address then it CANNOT add
* additional VRF's to become a Multi-VRF endpoint.
*
@ -308,7 +308,7 @@ struct sctp_paramhdr {
#define SCTP_CAUSE_UNSUPPORTED_HMACID 0x0105
/*
* error cause parameters (user visisble)
* error cause parameters (user visible)
*/
struct sctp_error_cause {
uint16_t code;

View file

@ -316,7 +316,7 @@ typedef struct callout sctp_os_timer_t;
}
/* We make it so if you have up to 4 threads
* writting based on the default size of
* writing based on the default size of
* the packet log 65 k, that would be
* 4 16k packets before we would hit
* a problem.
@ -353,7 +353,7 @@ typedef struct callout sctp_os_timer_t;
/* For BSD this just accesses the M_PKTHDR length
* so it operates on an mbuf with hdr flag. Other
* O/S's may have seperate packet header and mbuf
* O/S's may have separate packet header and mbuf
* chain pointers.. thus the macro.
*/
#define SCTP_HEADER_TO_CHAIN(m) (m)

View file

@ -107,7 +107,7 @@ sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
* ... +--ifa-> ifa -> ifa
* vrf
*
* We keep these seperate lists since the SCTP subsystem will
* We keep these separate lists since the SCTP subsystem will
* point to these from its source address selection nets structure.
* When an address is deleted it does not happen right away on
* the SCTP side, it gets scheduled. What we do when a
@ -191,7 +191,7 @@ sctp_find_ifn(void *ifn, uint32_t ifn_index)
struct sctp_ifnlist *hash_ifn_head;
/*
* We assume the lock is held for the addresses if thats wrong
* We assume the lock is held for the addresses if that's wrong
* problems could occur :-)
*/
hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
@ -327,7 +327,7 @@ sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr,
len1 = strlen(if_name);
len2 = strlen(sctp_ifap->ifn_p->ifn_name);
if (len1 != len2) {
SCTPDBG(SCTP_DEBUG_PCB4, "IFN of ifa names different lenght %d vs %d - ignored\n",
SCTPDBG(SCTP_DEBUG_PCB4, "IFN of ifa names different length %d vs %d - ignored\n",
len1, len2);
goto out;
}
@ -380,7 +380,7 @@ sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr,
len1 = strlen(if_name);
len2 = strlen(sctp_ifap->ifn_p->ifn_name);
if (len1 != len2) {
SCTPDBG(SCTP_DEBUG_PCB4, "IFN of ifa names different lenght %d vs %d - ignored\n",
SCTPDBG(SCTP_DEBUG_PCB4, "IFN of ifa names different length %d vs %d - ignored\n",
len1, len2);
goto out;
}
@ -567,7 +567,7 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index,
} else {
if (sctp_ifap->ifn_p) {
/*
* The last IFN gets the address, removee
* The last IFN gets the address, remove
* the old one
*/
SCTPDBG(SCTP_DEBUG_PCB4, "Moving ifa %p from %s (0x%x) to %s (0x%x)\n",
@ -1515,7 +1515,7 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
int fnd;
/*
* Endpoing probe expects that the INP_INFO is locked.
* Endpoint probe expects that the INP_INFO is locked.
*/
sin = NULL;
#ifdef INET6
@ -1800,7 +1800,7 @@ sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
* endpoint is gone but there exists a connected socket for this guy
* yet. If so we can return the first one that we find. This may NOT
* be the correct one so the caller should be wary on the return
* INP. Currently the onlyc caller that sets this flag is in bindx
* INP. Currently the only caller that sets this flag is in bindx
* where we are verifying that a user CAN bind the address. He
* either has bound it already, or someone else has, or its open to
* bind, so this is good enough.
@ -2005,7 +2005,7 @@ sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag
}
if (remote_tag) {
/*
* If we have both vtags thats all we match
* If we have both vtags that's all we match
* on
*/
if (stcb->asoc.peer_vtag == remote_tag) {
@ -2183,7 +2183,7 @@ sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
* association that is linked to an existing
* association that is under the TCP pool (i.e. no
* listener exists). The endpoint finding routine
* will always find a listner before examining the
* will always find a listener before examining the
* TCP pool.
*/
if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
@ -3554,7 +3554,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
SCTP_INP_INFO_WUNLOCK();
/*
* Now we release all locks. Since this INP cannot be found anymore
* except possbily by the kill timer that might be running. We call
* except possibly by the kill timer that might be running. We call
* the drain function here. It should hit the case were it sees the
* ACTIVE flag cleared and exit out freeing us to proceed and
* destroy everything.
@ -3716,7 +3716,7 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
{
/*
* The following is redundant to the same lines in the
* sctp_aloc_assoc() but is needed since other's call the add
* sctp_aloc_assoc() but is needed since others call the add
* address function
*/
struct sctp_nets *net, *netfirst;
@ -4034,7 +4034,7 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
(stcb->asoc.primary_destination)) {
/*
* first one on the list is NOT the primary sctp_cmpaddr()
* is much more efficent if the primary is the first on the
* is much more efficient if the primary is the first on the
* list, make it so.
*/
TAILQ_REMOVE(&stcb->asoc.nets,
@ -4176,7 +4176,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
/*
* If you have not performed a bind, then we need to do the
* ephemerial bind for you.
* ephemeral bind for you.
*/
if ((err = sctp_inpcb_bind(inp->sctp_socket,
(struct sockaddr *)NULL,
@ -5159,7 +5159,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
SCTP_INP_RUNLOCK(inp);
/*
* This will start the kill timer (if we are the
* lastone) since we hold an increment yet. But this
* last one) since we hold an increment yet. But this
* is the only safe way to do this since otherwise
* if the socket closes at the same time we are here
* we might collide in the cleanup.
@ -6461,7 +6461,7 @@ sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
if (net != stcb->asoc.primary_destination) {
/*
* first one on the list is NOT the primary
* sctp_cmpaddr() is much more efficent if the
* sctp_cmpaddr() is much more efficient if the
* primary is the first on the list, make it so.
*/
TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
@ -6676,7 +6676,7 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
}
/*
* Another issue, in un-setting the TSN's in the mapping array we
* DID NOT adjust the higest_tsn marker. This will cause one of two
* DID NOT adjust the highest_tsn marker. This will cause one of two
* things to occur. It may cause us to do extra work in checking for
* our mapping array movement. More importantly it may cause us to
* SACK every datagram. This may not be a bad thing though since we

View file

@ -80,7 +80,7 @@ struct sctp_initmsg {
/* We add 96 bytes to the size of sctp_sndrcvinfo.
* This makes the current structure 128 bytes long
* which is nicely 64 bit aligned but also has room
* for us to add more and keep ABI compatability.
* for us to add more and keep ABI compatibility.
* For example, already we have the sctp_extrcvinfo
* when enabled which is 48 bytes.
*/
@ -304,7 +304,7 @@ struct sctp_setadaptation {
uint32_t ssb_adaptation_ind;
};
/* compatable old spelling */
/* compatible old spelling */
struct sctp_adaption_event {
uint16_t sai_type;
uint16_t sai_flags;
@ -396,7 +396,7 @@ union sctp_notification {
struct sctp_send_failed sn_send_failed;
struct sctp_shutdown_event sn_shutdown_event;
struct sctp_adaptation_event sn_adaptation_event;
/* compatability same as above */
/* compatibility same as above */
struct sctp_adaption_event sn_adaption_event;
struct sctp_pdapi_event sn_pdapi_event;
struct sctp_authkey_event sn_auth_event;
@ -417,7 +417,7 @@ union sctp_notification {
#define SCTP_AUTHENTICATION_EVENT 0x0008
#define SCTP_STREAM_RESET_EVENT 0x0009
#define SCTP_SENDER_DRY_EVENT 0x000a
#define SCTP__NOTIFICATIONS_STOPPED_EVENT 0x000b /* we dont send this */
#define SCTP__NOTIFICATIONS_STOPPED_EVENT 0x000b /* we don't send this */
/*
* socket option structs
*/
@ -893,7 +893,7 @@ struct sctpstat {
uint32_t sctps_earlyfrstrid;
uint32_t sctps_earlyfrstrout;
uint32_t sctps_earlyfrstrtmr;
/* otheres */
/* others */
uint32_t sctps_hdrops; /* packet shorter than header */
uint32_t sctps_badsum; /* checksum error */
uint32_t sctps_noport; /* no endpoint for port */
@ -904,8 +904,8 @@ struct sctpstat {
* RTT window */
uint32_t sctps_markedretrans;
uint32_t sctps_naglesent; /* nagle allowed sending */
uint32_t sctps_naglequeued; /* nagle does't allow sending */
uint32_t sctps_maxburstqueued; /* max burst dosn't allow sending */
uint32_t sctps_naglequeued; /* nagle doesn't allow sending */
uint32_t sctps_maxburstqueued; /* max burst doesn't allow sending */
uint32_t sctps_ifnomemqueued; /* look ahead tells us no memory in
* interface ring buffer OR we had a
* send error and are queuing one
@ -931,7 +931,7 @@ struct sctpstat {
uint32_t sctps_wu_sacks_sent; /* Window Update only sacks sent */
uint32_t sctps_sends_with_flags; /* number of sends with
* sinfo_flags !=0 */
uint32_t sctps_sends_with_unord /* number of undordered sends */ ;
uint32_t sctps_sends_with_unord; /* number of unordered sends */
uint32_t sctps_sends_with_eof; /* number of sends with EOF flag set */
uint32_t sctps_sends_with_abort; /* number of sends with ABORT
* flag set */
@ -943,7 +943,7 @@ struct sctpstat {
* with peek */
uint32_t sctps_cached_chk; /* Number of cached chunks used */
uint32_t sctps_cached_strmoq; /* Number of cached stream oq's used */
uint32_t sctps_left_abandon; /* Number of unread message abandonded
uint32_t sctps_left_abandon; /* Number of unread messages abandoned
* by close */
uint32_t sctps_send_burst_avoid; /* Unused */
uint32_t sctps_send_cwnd_avoid; /* Send cwnd full avoidance, already

View file

@ -133,7 +133,7 @@ struct rule {
/*
* Text format for the rule string is that a rule consists of a
* comma-seperated list of elements. Each element is in the form
* comma-separated list of elements. Each element is in the form
* idtype:id:protocol:portnumber, and constitutes granting of permission
* for the specified binding.
*/

View file

@ -251,7 +251,7 @@ valid_format(const char *fmt)
fmt++;
break;
}
/* flags, width and precsision */
/* flags, width and precision */
if (isdigit((unsigned char)*fmt) ||
strchr("+- 0#.", *fmt))
continue;
@ -329,7 +329,7 @@ unescape(char *orig)
*orig = c;
--cp;
continue;
case 'x': /* hexidecimal number */
case 'x': /* hexadecimal number */
cp++; /* skip 'x' */
for (i = 0, c = 0;
isxdigit((unsigned char)*cp) && i < 2;
@ -402,7 +402,7 @@ decimal_places(const char *number)
/*
* generate_format - create a format string
*
* XXX to be bug for bug compatable with Plan9 and GNU return "%g"
* XXX to be bug for bug compatible with Plan9 and GNU return "%g"
* when "%g" prints as "%e" (this way no width adjustments are made)
*/
char *

View file

@ -26,7 +26,7 @@
* was a huge problem for 'make buildkernel' which was run with the installed
* /usr/sbin/config, not a cross built one. We started bumping the version
* number as a way to trap cases where the previous installworld was not
* compatable with the new buildkernel. The buildtools phase and much more
* compatible with the new buildkernel. The buildtools phase and much more
* comprehensive error code returns solved this original problem.
*
* Most end-users will use buildkernel and the build tools from buildworld.