arm64: Switch the address argument to cpu_*cache* to a pointer

No functional change, but this reduces diffs with CheriBSD downstream.

Reviewed by:	andrew
Sponsored by:	University of Cambridge, Google, Inc.
Differential Revision:	https://reviews.freebsd.org/D44342
This commit is contained in:
John Baldwin 2024-03-15 10:09:49 -07:00
parent eab7ae7811
commit 1e3f42b6ba
12 changed files with 50 additions and 50 deletions

View file

@ -985,15 +985,15 @@ bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
}
static void
dma_preread_safe(vm_offset_t va, vm_size_t size)
dma_preread_safe(char *va, vm_size_t size)
{
/*
* Write back any partial cachelines immediately before and
* after the DMA region.
*/
if (va & (dcache_line_size - 1))
if (!__is_aligned(va, dcache_line_size))
cpu_dcache_wb_range(va, 1);
if ((va + size) & (dcache_line_size - 1))
if (!__is_aligned(va + size, dcache_line_size))
cpu_dcache_wb_range(va + size, 1);
cpu_dcache_inv_range(va, size);
@ -1030,7 +1030,7 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
switch (op) {
case BUS_DMASYNC_PREWRITE:
case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
cpu_dcache_wb_range(va, len);
cpu_dcache_wb_range((void *)va, len);
break;
case BUS_DMASYNC_PREREAD:
/*
@ -1043,11 +1043,11 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
* misalignment. Buffers which are not mbufs bounce if
* they are not aligned to a cacheline.
*/
dma_preread_safe(va, len);
dma_preread_safe((void *)va, len);
break;
case BUS_DMASYNC_POSTREAD:
case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
cpu_dcache_inv_range(va, len);
cpu_dcache_inv_range((void *)va, len);
break;
default:
panic("unsupported combination of sync operations: "
@ -1097,7 +1097,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);
if ((map->flags & DMAMAP_COHERENT) == 0)
cpu_dcache_wb_range(bpage->vaddr,
cpu_dcache_wb_range((void *)bpage->vaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
@ -1105,7 +1105,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
} else if ((op & BUS_DMASYNC_PREREAD) != 0) {
while (bpage != NULL) {
if ((map->flags & DMAMAP_COHERENT) == 0)
cpu_dcache_wbinv_range(bpage->vaddr,
cpu_dcache_wbinv_range((void *)bpage->vaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
@ -1114,7 +1114,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
if ((op & BUS_DMASYNC_POSTREAD) != 0) {
while (bpage != NULL) {
if ((map->flags & DMAMAP_COHERENT) == 0)
cpu_dcache_inv_range(bpage->vaddr,
cpu_dcache_inv_range((void *)bpage->vaddr,
bpage->datacount);
tempvaddr = 0;
datavaddr = bpage->datavaddr;

View file

@ -104,7 +104,7 @@ ENTRY(arm64_tlb_flushID)
END(arm64_tlb_flushID)
/*
* void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
* void arm64_dcache_wb_range(void *, vm_size_t)
*/
ENTRY(arm64_dcache_wb_range)
cache_handle_range dcop = cvac
@ -112,7 +112,7 @@ ENTRY(arm64_dcache_wb_range)
END(arm64_dcache_wb_range)
/*
* void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
* void arm64_dcache_wbinv_range(void *, vm_size_t)
*/
ENTRY(arm64_dcache_wbinv_range)
cache_handle_range dcop = civac
@ -120,7 +120,7 @@ ENTRY(arm64_dcache_wbinv_range)
END(arm64_dcache_wbinv_range)
/*
* void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
* void arm64_dcache_inv_range(void *, vm_size_t)
*
* Note, we must not invalidate everything. If the range is too big we
* must use wb-inv of the entire cache.
@ -131,7 +131,7 @@ ENTRY(arm64_dcache_inv_range)
END(arm64_dcache_inv_range)
/*
* void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t)
* void arm64_dic_idc_icache_sync_range(void *, vm_size_t)
* When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
* When the CTR_EL0.DIC bit is set icache invalidation becomes an isb.
*/
@ -142,7 +142,7 @@ ENTRY(arm64_dic_idc_icache_sync_range)
END(arm64_dic_idc_icache_sync_range)
/*
* void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t)
* void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t)
* When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
*/
ENTRY(arm64_idc_aliasing_icache_sync_range)
@ -154,7 +154,7 @@ ENTRY(arm64_idc_aliasing_icache_sync_range)
END(arm64_idc_aliasing_icache_sync_range)
/*
* void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t)
* void arm64_aliasing_icache_sync_range(void *, vm_size_t)
*/
ENTRY(arm64_aliasing_icache_sync_range)
/*
@ -170,7 +170,7 @@ ENTRY(arm64_aliasing_icache_sync_range)
END(arm64_aliasing_icache_sync_range)
/*
* int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t)
* int arm64_icache_sync_range_checked(void *, vm_size_t)
*/
ENTRY(arm64_icache_sync_range_checked)
adr x5, cache_maint_fault

View file

@ -175,7 +175,7 @@ db_write_bytes(vm_offset_t addr, size_t size, char *data)
* Ensure the I & D cache are in sync if we wrote
* to executable memory.
*/
cpu_icache_sync_range(addr, (vm_size_t)size);
cpu_icache_sync_range((void *)addr, (vm_size_t)size);
}
}
(void)kdb_jmpbuf(prev_jb);

View file

@ -299,7 +299,7 @@ elf_cpu_load_file(linker_file_t lf)
{
if (lf->id != 1)
cpu_icache_sync_range((vm_offset_t)lf->address, lf->size);
cpu_icache_sync_range(lf->address, lf->size);
return (0);
}

View file

@ -94,7 +94,8 @@ freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap)
return (error);
if ((uint64_t)args.addr + (uint64_t)args.size > 0xffffffff)
return (EINVAL);
cpu_icache_sync_range_checked(args.addr, args.size);
cpu_icache_sync_range_checked(
(void *)(uintptr_t)args.addr, args.size);
return 0;
}
case ARM_GET_VFPSTATE:

View file

@ -744,7 +744,7 @@ gicv3_its_conftable_init(struct gicv3_its_softc *sc)
LPI_CONFTAB_SIZE);
/* Flush the table to memory */
cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE);
cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE);
}
static void
@ -761,7 +761,7 @@ gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
/* Flush so the ITS can see the memory */
cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i],
cpu_dcache_wb_range(sc->sc_pend_base[i],
LPI_PENDTAB_SIZE);
}
}
@ -1158,7 +1158,7 @@ gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
/* Clean D-cache under command. */
cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
cpu_dcache_wb_range(&conf[girq->gi_lpi], 1);
} else {
/* DSB inner shareable, store */
dsb(ishst);
@ -1182,7 +1182,7 @@ gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
/* Clean D-cache under command. */
cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
cpu_dcache_wb_range(&conf[girq->gi_lpi], 1);
} else {
/* DSB inner shareable, store */
dsb(ishst);
@ -1396,12 +1396,11 @@ its_device_alloc(struct gicv3_its_softc *sc, int devid)
ptable->ptab_page_size, 0);
if (!shareable)
cpu_dcache_wb_range((vm_offset_t)l2_table, ptable->ptab_l2_size);
cpu_dcache_wb_range(l2_table, ptable->ptab_l2_size);
table[index] = vtophys(l2_table) | GITS_BASER_VALID;
if (!shareable)
cpu_dcache_wb_range((vm_offset_t)&table[index],
sizeof(table[index]));
cpu_dcache_wb_range(&table[index], sizeof(table[index]));
dsb(sy);
return (true);
@ -1463,7 +1462,7 @@ its_device_get(device_t dev, device_t child, u_int nvecs)
/* Make sure device sees zeroed ITT. */
if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0)
cpu_dcache_wb_range((vm_offset_t)its_dev->itt, its_dev->itt_size);
cpu_dcache_wb_range(its_dev->itt, its_dev->itt_size);
mtx_lock_spin(&sc->sc_its_dev_lock);
TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
@ -1861,7 +1860,7 @@ its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
/* Clean D-cache under command. */
cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
cpu_dcache_wb_range(cmd, sizeof(*cmd));
} else {
/* DSB inner shareable, store */
dsb(ishst);

View file

@ -83,7 +83,7 @@ static void check_cpu_regs(u_int cpu, struct cpu_desc *desc,
* The default implementation of I-cache sync assumes we have an
* aliasing cache until we know otherwise.
*/
void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t) =
void (*arm64_icache_sync_range)(void *, vm_size_t) =
&arm64_aliasing_icache_sync_range;
static int

View file

@ -4738,10 +4738,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
(opa != pa || (orig_l3 & ATTR_S1_XN))) {
PMAP_ASSERT_STAGE1(pmap);
cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa),
PAGE_SIZE);
}
} else {
cpu_dcache_wb_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
cpu_dcache_wb_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE);
}
/*
@ -5006,7 +5007,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
if ((new_l2 & ATTR_S1_XN) == 0 && (PTE_TO_PHYS(new_l2) !=
PTE_TO_PHYS(old_l2) || (old_l2 & ATTR_S1_XN) != 0) &&
pmap != kernel_pmap && m->md.pv_memattr == VM_MEMATTR_WRITE_BACK) {
cpu_icache_sync_range(PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)),
cpu_icache_sync_range((void *)PHYS_TO_DMAP(PTE_TO_PHYS(new_l2)),
L2_SIZE);
}
@ -5219,7 +5220,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
/* Sync icache before the mapping is stored to PTE */
if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa), PAGE_SIZE);
pmap_store(l3, l3_val);
dsb(ishst);
@ -6990,7 +6991,7 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
* the cache.
*/
if (mode == VM_MEMATTR_UNCACHEABLE)
cpu_dcache_wbinv_range(tmpva, pte_size);
cpu_dcache_wbinv_range((void *)tmpva, pte_size);
tmpva += pte_size;
}
}
@ -7673,7 +7674,7 @@ pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
("%s: Address not in canonical form: %lx", __func__, va));
if (ADDR_IS_KERNEL(va)) {
cpu_icache_sync_range(va, sz);
cpu_icache_sync_range((void *)va, sz);
} else {
u_int len, offset;
vm_paddr_t pa;
@ -7686,7 +7687,8 @@ pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
/* Extract the physical address & find it in the DMAP */
pa = pmap_extract(pmap, va);
if (pa != 0)
cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
cpu_icache_sync_range((void *)PHYS_TO_DMAP(pa),
len);
/* Move to the next page */
sz -= len;

View file

@ -177,20 +177,20 @@ extern int64_t dczva_line_size;
#define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
#define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
extern void (*arm64_icache_sync_range)(void *, vm_size_t);
#define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
#define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
void arm64_nullop(void);
void arm64_tlb_flushID(void);
void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
void arm64_dic_idc_icache_sync_range(void *, vm_size_t);
void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t);
void arm64_aliasing_icache_sync_range(void *, vm_size_t);
int arm64_icache_sync_range_checked(void *, vm_size_t);
void arm64_dcache_wbinv_range(void *, vm_size_t);
void arm64_dcache_inv_range(void *, vm_size_t);
void arm64_dcache_wb_range(void *, vm_size_t);
bool arm64_get_writable_addr(vm_offset_t, vm_offset_t *);
#endif /* _KERNEL */

View file

@ -44,7 +44,7 @@ static __inline void
kdb_cpu_sync_icache(unsigned char *addr, size_t size)
{
cpu_icache_sync_range((vm_offset_t)addr, size);
cpu_icache_sync_range(addr, size);
}
static __inline void

View file

@ -77,7 +77,7 @@ fbt_patch_tracepoint(fbt_probe_t *fbt, fbt_patchval_t val)
panic("%s: Unable to write new instruction", __func__);
*(fbt_patchval_t *)addr = val;
cpu_icache_sync_range((vm_offset_t)fbt->fbtp_patchpoint, 4);
cpu_icache_sync_range(fbt->fbtp_patchpoint, 4);
}
int

View file

@ -153,8 +153,7 @@ kinst_trampoline_populate(struct kinst_probe *kp)
kinst_memcpy(kp->kp_tramp, &kp->kp_savedval, INSN_SIZE);
kinst_memcpy(&kp->kp_tramp[INSN_SIZE], &bpt, INSN_SIZE);
cpu_icache_sync_range((vm_offset_t)kp->kp_tramp,
(vm_size_t)KINST_TRAMP_SIZE);
cpu_icache_sync_range(kp->kp_tramp, KINST_TRAMP_SIZE);
}
/*
@ -241,8 +240,7 @@ kinst_patch_tracepoint(struct kinst_probe *kp, kinst_patchval_t val)
if (!arm64_get_writable_addr((vm_offset_t)kp->kp_patchpoint, &addr))
panic("%s: Unable to write new instruction", __func__);
*(kinst_patchval_t *)addr = val;
cpu_icache_sync_range((vm_offset_t)kp->kp_patchpoint,
(vm_size_t)INSN_SIZE);
cpu_icache_sync_range(kp->kp_patchpoint, INSN_SIZE);
}
static void