powerpc: Fix typos

Fix typos, most reported by "codespell arch/powerpc".  Only touches
comments, no code changes.

Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240103231605.1801364-8-helgaas@kernel.org
This commit is contained in:
Bjorn Helgaas 2024-01-03 17:16:04 -06:00 committed by Michael Ellerman
parent 39434af10f
commit 0ddbbb8960
29 changed files with 40 additions and 40 deletions

View file

@ -108,8 +108,8 @@ DTC_FLAGS ?= -p 1024
# these files into the build dir, fix up any includes and ensure that dependent
# files are copied in the right order.
# these need to be seperate variables because they are copied out of different
# directories in the kernel tree. Sure you COULd merge them, but it's a
# these need to be separate variables because they are copied out of different
# directories in the kernel tree. Sure you COULD merge them, but it's a
# cure-is-worse-than-disease situation.
zlib-decomp-$(CONFIG_KERNEL_GZIP) := decompress_inflate.c
zlib-$(CONFIG_KERNEL_GZIP) := inffast.c inflate.c inftrees.c

View file

@ -172,7 +172,7 @@ ieee1588@ef602800 {
reg = <0xef602800 0x60>;
interrupt-parent = <&UIC0>;
interrupts = <0x4 0x4>;
/* This thing is a bit weird. It has it's own UIC
/* This thing is a bit weird. It has its own UIC
* that it uses to generate snapshot triggers. We
* don't really support this device yet, and it needs
* work to figure this out.

View file

@ -188,7 +188,7 @@ static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { }
/* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier.
* The buffer is put in its own section so that tools may locate it easier.
*/
static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline")));

View file

@ -25,7 +25,7 @@ BSS_STACK(4096);
/* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier.
* The buffer is put in its own section so that tools may locate it easier.
*/
static char cmdline[BOOT_COMMAND_LINE_SIZE]

View file

@ -982,7 +982,7 @@ static inline phys_addr_t page_to_phys(struct page *page)
}
/*
* 32 bits still uses virt_to_bus() for it's implementation of DMA
* 32 bits still uses virt_to_bus() for its implementation of DMA
* mappings se we have to keep it defined here. We also have some old
* drivers (shame shame shame) that use bus_to_virt() and haven't been
* fixed yet so I need to define it here.

View file

@ -1027,10 +1027,10 @@ struct opal_i2c_request {
* The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
* with individual elements being 16 bits wide to fetch the system
* wide EPOW status. Each element in the buffer will contain the
* EPOW status in it's bit representation for a particular EPOW sub
* EPOW status in its bit representation for a particular EPOW sub
* class as defined here. So multiple detailed EPOW status bits
* specific for any sub class can be represented in a single buffer
* element as it's bit representation.
* element as its bit representation.
*/
/* System EPOW type */

View file

@ -192,7 +192,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node,
/* PMAC_FTR_BMAC_ENABLE (struct device_node* node, 0, int value)
* enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive
* it's reset line
* its reset line
*/
#define PMAC_FTR_BMAC_ENABLE PMAC_FTR_DEF(6)

View file

@ -144,7 +144,7 @@
#define UNI_N_HWINIT_STATE_SLEEPING 0x01
#define UNI_N_HWINIT_STATE_RUNNING 0x02
/* This last bit appear to be used by the bootROM to know the second
* CPU has started and will enter it's sleep loop with IP=0
* CPU has started and will enter its sleep loop with IP=0
*/
#define UNI_N_HWINIT_STATE_CPU1_FLAG 0x10000000

View file

@ -108,7 +108,7 @@ typedef struct boot_infos
/* ALL BELOW NEW (vers. 4) */
/* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag
(non-PCI) only. On PCI, memory is contiguous and it's size is in the
(non-PCI) only. On PCI, memory is contiguous and its size is in the
device-tree. */
boot_info_map_entry_t
physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */

View file

@ -527,7 +527,7 @@ EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
* eeh_pe_mark_isolated
* @pe: EEH PE
*
* Record that a PE has been isolated by marking the PE and it's children as
* Record that a PE has been isolated by marking the PE and its children as
* EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
* as pci_channel_io_frozen.
*/

View file

@ -681,7 +681,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
* old_cpu == -1 means this is the first CPU which has come here,
* go ahead and trigger fadump.
*
* old_cpu != -1 means some other CPU has already on it's way
* old_cpu != -1 means some other CPU has already on its way
* to trigger fadump, just keep looping here.
*/
this_cpu = smp_processor_id();

View file

@ -192,7 +192,7 @@ _GLOBAL(scom970_read)
xori r0,r0,MSR_EE
mtmsrd r0,1
/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
/* rotate 24 bits SCOM address 8 bits left and mask out its low 8 bits
* (including parity). On current CPUs they must be 0'd,
* and finally or in RW bit
*/
@ -226,7 +226,7 @@ _GLOBAL(scom970_write)
xori r0,r0,MSR_EE
mtmsrd r0,1
/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
/* rotate 24 bits SCOM address 8 bits left and mask out its low 8 bits
* (including parity). On current CPUs they must be 0'd.
*/

View file

@ -1661,7 +1661,7 @@ void arch_setup_new_exec(void)
* cases will happen:
*
* 1. The correct thread is running, the wrong thread is not
* In this situation, the correct thread is woken and proceeds to pass it's
* In this situation, the correct thread is woken and proceeds to pass its
* condition check.
*
* 2. Neither threads are running
@ -1671,15 +1671,15 @@ void arch_setup_new_exec(void)
* for the wrong thread, or they will execute the condition check immediately.
*
* 3. The wrong thread is running, the correct thread is not
* The wrong thread will be woken, but will fail it's condition check and
* The wrong thread will be woken, but will fail its condition check and
* re-execute wait. The correct thread, when scheduled, will execute either
* it's condition check (which will pass), or wait, which returns immediately
* when called the first time after the thread is scheduled, followed by it's
* its condition check (which will pass), or wait, which returns immediately
* when called the first time after the thread is scheduled, followed by its
* condition check (which will pass).
*
* 4. Both threads are running
* Both threads will be woken. The wrong thread will fail it's condition check
* and execute another wait, while the correct thread will pass it's condition
* Both threads will be woken. The wrong thread will fail its condition check
* and execute another wait, while the correct thread will pass its condition
* check.
*
* @t: the task to set the thread ID for

View file

@ -12,7 +12,7 @@ void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to().
* its thread_struct during __switch_to().
*
* A reclaim flushes ALL the state or if not in TM save TM SPRs
* in the appropriate thread structures from live.

View file

@ -1567,7 +1567,7 @@ static void add_cpu_to_masks(int cpu)
/*
* This CPU will not be in the online mask yet so we need to manually
* add it to it's own thread sibling mask.
* add it to its own thread sibling mask.
*/
map_cpu_to_node(cpu, cpu_to_node(cpu));
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));

View file

@ -139,7 +139,7 @@ static unsigned long dscr_default;
* @val: Returned cpu specific DSCR default value
*
* This function returns the per cpu DSCR default value
* for any cpu which is contained in it's PACA structure.
* for any cpu which is contained in its PACA structure.
*/
static void read_dscr(void *val)
{
@ -152,7 +152,7 @@ static void read_dscr(void *val)
* @val: New cpu specific DSCR default value to update
*
* This function updates the per cpu DSCR default value
* for any cpu which is contained in it's PACA structure.
* for any cpu which is contained in its PACA structure.
*/
static void write_dscr(void *val)
{

View file

@ -531,7 +531,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
xc->cppr = xive_prio_from_guest(new_cppr);
/*
* IPIs are synthetized from MFRR and thus don't need
* IPIs are synthesized from MFRR and thus don't need
* any special EOI handling. The underlying interrupt
* used to signal MFRR changes is EOId when fetched from
* the queue.

View file

@ -78,7 +78,7 @@ EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_HIGHMEM
/**
* flush_dcache_icache_phys() - Flush a page by it's physical address
* flush_dcache_icache_phys() - Flush a page by its physical address
* @physaddr: the physical address of the page
*/
static void flush_dcache_icache_phys(unsigned long physaddr)

View file

@ -376,7 +376,7 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
}
/* Copy the kernel to it's new location and run */
/* Copy the kernel to its new location and run */
memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);

View file

@ -279,7 +279,7 @@ static void __init mpc512x_setup_diu(void)
* and so negatively affect boot time. Instead we reserve the
* already configured frame buffer area so that it won't be
* destroyed. The starting address of the area to reserve and
* also it's length is passed to memblock_reserve(). It will be
* also its length is passed to memblock_reserve(). It will be
* freed later on first open of fbdev, when splash image is not
* needed any more.
*/

View file

@ -868,7 +868,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
}
/**
* spu_deactivate - unbind a context from it's physical spu
* spu_deactivate - unbind a context from its physical spu
* @ctx: spu context to unbind
*
* Unbind @ctx from the physical spu it is running on and schedule

View file

@ -595,7 +595,7 @@ void __init maple_pci_init(void)
/* Probe root PCI hosts, that is on U3 the AGP host and the
* HyperTransport host. That one is actually "kept" around
* and actually added last as it's resource management relies
* and actually added last as its resource management relies
* on the AGP resources to have been setup first
*/
root = of_find_node_by_path("/");

View file

@ -2,7 +2,7 @@
/*
* Support for the interrupt controllers found on Power Macintosh,
* currently Apple's "Grand Central" interrupt controller in all
* it's incarnations. OpenPIC support used on newer machines is
* its incarnations. OpenPIC support used on newer machines is
* in a separate file
*
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)

View file

@ -176,7 +176,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
* memory location containing the PC to resume from
* at address 0.
* - On Core99, we must store the wakeup vector at
* address 0x80 and eventually it's parameters
* address 0x80 and eventually its parameters
* at address 0x84. I've have some trouble with those
* parameters however and I no longer use them.
*/

View file

@ -238,7 +238,7 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
} else if (pdev->is_physfn) {
/*
* For PFs adjust their allocated IOV resources to match what
* the PHB can support using it's M64 BAR table.
* the PHB can support using its M64 BAR table.
*/
pnv_pci_ioda_fixup_iov_resources(pdev);
}
@ -658,7 +658,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
list_add_tail(&pe->list, &phb->ioda.pe_list);
mutex_unlock(&phb->ioda.pe_list_mutex);
/* associate this pe to it's pdn */
/* associate this pe to its pdn */
list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
if (vf_pdn->busno == vf_bus &&
vf_pdn->devfn == vf_devfn) {

View file

@ -1059,7 +1059,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
}
} else {
/*
* Interrupt hanlder or fault window setup failed. Means
* Interrupt handler or fault window setup failed. Means
* NX can not generate fault for page fault. So not
* opening for user space tx window.
*/

View file

@ -228,7 +228,7 @@ static irqreturn_t pseries_vas_irq_handler(int irq, void *data)
struct pseries_vas_window *txwin = data;
/*
* The thread hanlder will process this interrupt if it is
* The thread handler will process this interrupt if it is
* already running.
*/
atomic_inc(&txwin->pending_faults);

View file

@ -383,7 +383,7 @@ static unsigned int xive_get_irq(void)
* CPU.
*
* If we find that there is indeed more in there, we call
* force_external_irq_replay() to make Linux synthetize an
* force_external_irq_replay() to make Linux synthesize an
* external interrupt on the next call to local_irq_restore().
*/
static void xive_do_queue_eoi(struct xive_cpu *xc)
@ -874,7 +874,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
*
* This also tells us that it's in flight to a host queue
* or has already been fetched but hasn't been EOIed yet
* by the host. This it's potentially using up a host
* by the host. Thus it's potentially using up a host
* queue slot. This is important to know because as long
* as this is the case, we must not hard-unmask it when
* "returning" that interrupt to the host.

View file

@ -415,7 +415,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
return;
}
/* Grab it's CAM value */
/* Grab its CAM value */
rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
if (rc) {
pr_err("Failed to get pool VP info CPU %d\n", cpu);