Drop more useless casts from void * to pointer

Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
Message-Id: <20221123133811.1398562-1-armbru@redhat.com>
This commit is contained in:
Markus Armbruster 2022-11-23 14:38:11 +01:00
parent ea3a008d2d
commit 3d558330ad
15 changed files with 24 additions and 28 deletions

View file

@ -156,7 +156,7 @@ static abi_ulong copy_elf_strings(int argc, char **argv, void **page,
--p; --tmp; --len;
if (--offset < 0) {
offset = p % TARGET_PAGE_SIZE;
pag = (char *)page[p / TARGET_PAGE_SIZE];
pag = page[p / TARGET_PAGE_SIZE];
if (!pag) {
pag = g_try_malloc0(TARGET_PAGE_SIZE);
page[p / TARGET_PAGE_SIZE] = pag;

View file

@ -405,7 +405,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
g_mutex_lock(&l1_dcache_locks[cache_idx]);
hit_in_l1 = access_cache(l1_dcaches[cache_idx], effective_addr);
if (!hit_in_l1) {
insn = (InsnData *) userdata;
insn = userdata;
__atomic_fetch_add(&insn->l1_dmisses, 1, __ATOMIC_SEQ_CST);
l1_dcaches[cache_idx]->misses++;
}
@ -419,7 +419,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info,
g_mutex_lock(&l2_ucache_locks[cache_idx]);
if (!access_cache(l2_ucaches[cache_idx], effective_addr)) {
insn = (InsnData *) userdata;
insn = userdata;
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
l2_ucaches[cache_idx]->misses++;
}
@ -440,7 +440,7 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata)
g_mutex_lock(&l1_icache_locks[cache_idx]);
hit_in_l1 = access_cache(l1_icaches[cache_idx], insn_addr);
if (!hit_in_l1) {
insn = (InsnData *) userdata;
insn = userdata;
__atomic_fetch_add(&insn->l1_imisses, 1, __ATOMIC_SEQ_CST);
l1_icaches[cache_idx]->misses++;
}
@ -454,7 +454,7 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata)
g_mutex_lock(&l2_ucache_locks[cache_idx]);
if (!access_cache(l2_ucaches[cache_idx], insn_addr)) {
insn = (InsnData *) userdata;
insn = userdata;
__atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST);
l2_ucaches[cache_idx]->misses++;
}

View file

@ -193,7 +193,7 @@ vub_discard_write_zeroes(VubReq *req, struct iovec *iov, uint32_t iovcnt,
#if defined(__linux__) && defined(BLKDISCARD) && defined(BLKZEROOUT)
VubDev *vdev_blk = req->vdev_blk;
desc = (struct virtio_blk_discard_write_zeroes *)buf;
desc = buf;
uint64_t range[2] = { le64toh(desc->sector) << 9,
le32toh(desc->num_sectors) << 9 };
if (type == VIRTIO_BLK_T_DISCARD) {

View file

@ -134,7 +134,7 @@ void qdev_init_clocks(DeviceState *dev, const ClockPortInitArray clocks)
Clock **clkp;
/* offset cannot be inside the DeviceState part */
assert(elem->offset > sizeof(DeviceState));
clkp = (Clock **)(((void *) dev) + elem->offset);
clkp = ((void *)dev) + elem->offset;
if (elem->is_output) {
*clkp = qdev_init_clock_out(dev, elem->name);
} else {

View file

@ -2104,7 +2104,7 @@ static void process_message(VMBus *vmbus)
goto out;
}
msgdata = hv_msg->payload;
msg = (struct vmbus_message_header *)msgdata;
msg = msgdata;
trace_vmbus_process_incoming_message(msg->message_type);

View file

@ -1429,7 +1429,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
{
CadenceGEMState *s;
uint32_t retval;
s = (CadenceGEMState *)opaque;
s = opaque;
offset >>= 2;
retval = s->regs[offset];

View file

@ -2472,7 +2472,7 @@ static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
VirtioNetRscChain *chain;
VirtioNetRscUnit unit;
chain = (VirtioNetRscChain *)opq;
chain = opq;
hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)

View file

@ -4028,14 +4028,14 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
nr_zones++;
}
}
header = (NvmeZoneReportHeader *)buf;
header = buf;
header->nr_zones = cpu_to_le64(nr_zones);
buf_p = buf + sizeof(NvmeZoneReportHeader);
for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) {
zone = &ns->zone_array[zone_idx];
if (nvme_zone_matches_filter(zrasf, zone)) {
z = (NvmeZoneDescr *)buf_p;
z = buf_p;
buf_p += sizeof(NvmeZoneDescr);
z->zt = zone->d.zt;

View file

@ -269,8 +269,7 @@ static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
r = g_malloc(sizeof(*r));
*ring = r;
r->ring_state = (PvrdmaRingState *)
rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
if (!r->ring_state) {
rdma_error_report("Failed to map to CQ ring state");
@ -405,8 +404,7 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
*rings = sr;
/* Create send ring */
sr->ring_state = (PvrdmaRingState *)
rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
sr->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
if (!sr->ring_state) {
rdma_error_report("Failed to map to QP ring state");
goto out_free_sr_mem;
@ -646,8 +644,7 @@ static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
r = g_malloc(sizeof(*r));
*ring = r;
r->ring_state = (PvrdmaRingState *)
rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
r->ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
if (!r->ring_state) {
rdma_error_report("Failed to map tp SRQ ring state");
goto out_free_ring_mem;

View file

@ -149,7 +149,7 @@ void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle)
ring = (PvrdmaRing *)qp->opaque;
wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring);
wqe = pvrdma_ring_next_elem_read(ring);
while (wqe) {
CompHandlerCtx *comp_ctx;
@ -212,7 +212,7 @@ void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle)
ring = &((PvrdmaRing *)qp->opaque)[1];
wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
wqe = pvrdma_ring_next_elem_read(ring);
while (wqe) {
CompHandlerCtx *comp_ctx;
@ -254,7 +254,7 @@ void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle)
ring = (PvrdmaRing *)srq->opaque;
wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring);
wqe = pvrdma_ring_next_elem_read(ring);
while (wqe) {
CompHandlerCtx *comp_ctx;

View file

@ -775,8 +775,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
output_size = s->config.probe_size + sizeof(tail);
buf = g_malloc0(output_size);
ptail = (struct virtio_iommu_req_tail *)
(buf + s->config.probe_size);
ptail = buf + s->config.probe_size;
ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
break;
}

View file

@ -5471,7 +5471,7 @@ static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
for (i = 0; i < se->nb_fields; i++) {
if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
assert(*field_types == TYPE_PTRVOID);
target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
target_rt_dev_ptr = argptr + src_offsets[i];
host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
if (*target_rt_dev_ptr != 0) {
*host_rt_dev_ptr = (unsigned long)lock_user_string(

View file

@ -388,7 +388,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port,
MemTxAttrs attrs = { 0 };
if (!df) {
ptr = (uint8_t *) buffer;
ptr = buffer;
} else {
ptr = buffer + size * count - size;
}

View file

@ -73,11 +73,11 @@ int main(int argc, char *argv[argc])
ml_printf("stack: %p <- %p\n", info.stack_limit, info.stack_base);
/* finally can we read/write the heap */
ptr_to_heap = (uint32_t *) info.heap_base;
ptr_to_heap = info.heap_base;
for (i = 0; i < 512; i++) {
*ptr_to_heap++ = i;
}
ptr_to_heap = (uint32_t *) info.heap_base;
ptr_to_heap = info.heap_base;
for (i = 0; i < 512; i++) {
uint32_t tmp = *ptr_to_heap;
if (tmp != i) {

View file

@ -271,7 +271,7 @@ static void collect_usable_iova_ranges(QEMUVFIOState *s, void *buf)
if (!cap->next) {
return;
}
cap = (struct vfio_info_cap_header *)(buf + cap->next);
cap = buf + cap->next;
}
cap_iova_range = (struct vfio_iommu_type1_info_cap_iova_range *)cap;