sparc64: Use xcall_deliver() consistently.

There remained some spots still vectoring to the appropriate
*_xcall_deliver() function manually.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2008-08-03 23:07:18 -07:00
parent 5e0797e5b8
commit 622824dbb5

View file

@ -890,40 +890,35 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
__local_flush_dcache_page(page); __local_flush_dcache_page(page);
} else if (cpu_online(cpu)) { } else if (cpu_online(cpu)) {
void *pg_addr = page_address(page); void *pg_addr = page_address(page);
u64 data0; u64 data0 = 0;
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
data0 = data0 = ((u64)&xcall_flush_dcache_page_spitfire);
((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping(page) != NULL) if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
__pa(pg_addr),
(u64) pg_addr,
mask);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) { } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
data0 = data0 = ((u64)&xcall_flush_dcache_page_cheetah);
((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
__pa(pg_addr),
0, mask);
#endif #endif
} }
if (data0) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, mask);
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall); atomic_inc(&dcpage_flushes_xcall);
#endif #endif
} }
}
put_cpu(); put_cpu();
} }
void flush_dcache_page_all(struct mm_struct *mm, struct page *page) void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{ {
void *pg_addr = page_address(page);
cpumask_t mask = cpu_online_map; cpumask_t mask = cpu_online_map;
u64 data0; void *pg_addr;
int this_cpu; int this_cpu;
u64 data0;
if (tlb_type == hypervisor) if (tlb_type == hypervisor)
return; return;
@ -937,25 +932,24 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
#endif #endif
if (cpus_empty(mask)) if (cpus_empty(mask))
goto flush_self; goto flush_self;
data0 = 0;
pg_addr = page_address(page);
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire); data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping(page) != NULL) if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
__pa(pg_addr),
(u64) pg_addr,
mask);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) { } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
data0 = ((u64)&xcall_flush_dcache_page_cheetah); data0 = ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
__pa(pg_addr),
0, mask);
#endif #endif
} }
if (data0) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, mask);
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall); atomic_inc(&dcpage_flushes_xcall);
#endif #endif
}
flush_self: flush_self:
__local_flush_dcache_page(page); __local_flush_dcache_page(page);