Rework how drm maps are handled.

* On 32 bit platforms we steal the upper 4 bits of the map handle
   to store a unique map id.
 * On 64 bit platforms we steal the upper 24 bits.

Resolves issues where the offsets that are handed to mmap may overlap the VRAM on some cards.

Tested on: radeon, intel, mga, and via.

This will break nouveau.  I will spin new patches shortly.
This commit is contained in:
Robert Noland 2010-04-22 18:21:25 +00:00
parent 096924c9b7
commit 9c03c0d88c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207066
23 changed files with 201 additions and 168 deletions

View file

@ -239,22 +239,22 @@ typedef u_int8_t u8;
#define DRM_MEMORYBARRIER() mb()
#define DRM_READ8(map, offset) \
*(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \
*(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \
(vm_offset_t)(offset))
#define DRM_READ16(map, offset) \
*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \
*(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \
(vm_offset_t)(offset))
#define DRM_READ32(map, offset) \
*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \
*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \
(vm_offset_t)(offset))
#define DRM_WRITE8(map, offset, val) \
*(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \
*(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \
(vm_offset_t)(offset)) = val
#define DRM_WRITE16(map, offset, val) \
*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \
*(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \
(vm_offset_t)(offset)) = val
#define DRM_WRITE32(map, offset, val) \
*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \
*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \
(vm_offset_t)(offset)) = val
#define DRM_VERIFYAREA_READ( uaddr, size ) \
@ -481,18 +481,21 @@ typedef struct drm_sg_mem {
struct drm_dma_handle *dmah; /* Handle to PCI memory */
} drm_sg_mem_t;
#define DRM_MAP_HANDLE_BITS (sizeof(void *) == 4 ? 4 : 24)
#define DRM_MAP_HANDLE_SHIFT (sizeof(void *) * 8 - DRM_MAP_HANDLE_BITS)
typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
typedef struct drm_local_map {
unsigned long offset; /* Physical address (0 for SAREA)*/
unsigned long size; /* Physical size (bytes) */
enum drm_map_type type; /* Type of memory mapped */
enum drm_map_flags flags; /* Flags */
void *handle; /* User-space: "Handle" to pass to mmap */
/* Kernel-space: kernel-virtual address */
int mtrr; /* Boolean: MTRR used */
/* Private data */
int rid; /* PCI resource ID for bus_space */
unsigned long offset; /* Physical address (0 for SAREA) */
unsigned long size; /* Physical size (bytes) */
enum drm_map_type type; /* Type of memory mapped */
enum drm_map_flags flags; /* Flags */
void *handle; /* User-space: "Handle" to pass to mmap */
/* Kernel-space: kernel-virtual address */
int mtrr; /* Boolean: MTRR used */
/* Private data */
int rid; /* PCI resource ID for bus_space */
void *virtual; /* Kernel-space: kernel-virtual address */
struct resource *bsr;
bus_space_tag_t bst;
bus_space_handle_t bsh;
@ -643,6 +646,7 @@ struct drm_device {
/* Linked list of mappable regions. Protected by dev_lock */
drm_map_list_t maplist;
struct unrhdr *map_unrhdr;
drm_local_map_t **context_sareas;
int max_context;
@ -973,17 +977,17 @@ drm_free(void *pt, size_t size, struct malloc_type *area)
static __inline__ void
drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
map->handle = drm_ioremap_wc(dev, map);
map->virtual = drm_ioremap_wc(dev, map);
}
static __inline__ void
drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
map->handle = drm_ioremap(dev, map);
map->virtual = drm_ioremap(dev, map);
}
static __inline__ void
drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
if ( map->handle && map->size )
if ( map->virtual && map->size )
drm_ioremapfree(map);
}
@ -994,7 +998,7 @@ drm_core_findmap(struct drm_device *dev, unsigned long offset)
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->offset == offset)
if (offset == (unsigned long)map->handle)
return map;
}
return NULL;

View file

@ -156,10 +156,12 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
map->size = size;
map->type = type;
map->flags = flags;
map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
DRM_MAP_HANDLE_SHIFT);
switch (map->type) {
case _DRM_REGISTERS:
map->handle = drm_ioremap(dev, map);
map->virtual = drm_ioremap(dev, map);
if (!(map->flags & _DRM_WRITE_COMBINING))
break;
/* FALLTHROUGH */
@ -168,25 +170,25 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
map->mtrr = 1;
break;
case _DRM_SHM:
map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
DRM_DEBUG("%lu %d %p\n",
map->size, drm_order(map->size), map->handle);
if (!map->handle) {
map->size, drm_order(map->size), map->virtual);
if (!map->virtual) {
free(map, DRM_MEM_MAPS);
DRM_LOCK();
return ENOMEM;
}
map->offset = (unsigned long)map->handle;
map->offset = (unsigned long)map->virtual;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
DRM_LOCK();
if (dev->lock.hw_lock != NULL) {
DRM_UNLOCK();
free(map->handle, DRM_MEM_MAPS);
free(map->virtual, DRM_MEM_MAPS);
free(map, DRM_MEM_MAPS);
return EBUSY;
}
dev->lock.hw_lock = map->handle; /* Pointer to lock */
dev->lock.hw_lock = map->virtual; /* Pointer to lock */
DRM_UNLOCK();
}
break;
@ -224,7 +226,8 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
DRM_LOCK();
return EINVAL;
}
map->offset += dev->sg->handle;
map->virtual = (void *)(dev->sg->handle + offset);
map->offset = dev->sg->handle + offset;
break;
case _DRM_CONSISTENT:
/* Unfortunately, we don't get any alignment specification from
@ -242,7 +245,7 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
DRM_LOCK();
return ENOMEM;
}
map->handle = map->dmah->vaddr;
map->virtual = map->dmah->vaddr;
map->offset = map->dmah->busaddr;
break;
default:
@ -291,11 +294,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
request->type = map->type;
request->flags = map->flags;
request->mtrr = map->mtrr;
request->handle = map->handle;
if (request->type != _DRM_SHM) {
request->handle = (void *)request->offset;
}
request->handle = (void *)map->handle;
return 0;
}
@ -324,7 +323,7 @@ void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
}
break;
case _DRM_SHM:
free(map->handle, DRM_MEM_MAPS);
free(map->virtual, DRM_MEM_MAPS);
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
@ -342,6 +341,12 @@ void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
map->bsr);
}
DRM_UNLOCK();
if (map->handle)
free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
DRM_MAP_HANDLE_SHIFT);
DRM_LOCK();
free(map, DRM_MEM_MAPS);
}
@ -1054,7 +1059,7 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
goto done;
}
size = round_page(map->size);
foff = map->offset;
foff = (unsigned long)map->handle;
} else {
size = round_page(dma->byte_count),
foff = 0;

View file

@ -147,7 +147,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
map = dev->context_sareas[request->ctx_id];
DRM_UNLOCK();
request->handle = map->handle;
request->handle = (void *)map->handle;
return 0;
}

View file

@ -434,6 +434,12 @@ static int drm_load(struct drm_device *dev)
DRM_DEBUG("\n");
TAILQ_INIT(&dev->maplist);
dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
if (dev->map_unrhdr == NULL) {
DRM_ERROR("Couldn't allocate map number allocator\n");
return EINVAL;
}
drm_mem_init();
drm_sysctl_init(dev);
@ -565,6 +571,7 @@ static void drm_unload(struct drm_device *dev)
}
delete_unrhdr(dev->drw_unrhdr);
delete_unrhdr(dev->map_unrhdr);
drm_mem_uninit();

View file

@ -83,7 +83,7 @@ void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map)
void drm_ioremapfree(drm_local_map_t *map)
{
pmap_unmapdev((vm_offset_t) map->handle, map->size);
pmap_unmapdev((vm_offset_t) map->virtual, map->size);
}
int

View file

@ -100,11 +100,14 @@ drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request)
dev->sg = entry;
DRM_UNLOCK();
DRM_DEBUG("handle=%08lx, kva=%p, contents=%08lx\n", entry->handle,
entry->virtual, *(unsigned long *)entry->virtual);
pmap_change_attr((vm_offset_t)dmah->vaddr, request->size,
PAT_WRITE_COMBINING);
request->handle = entry->handle;
DRM_DEBUG("handle=%08lx, kva=%p, contents=%08lx\n", entry->handle,
entry->virtual, *(unsigned long *)entry->virtual);
return 0;
}

View file

@ -188,7 +188,7 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
DRM_UNLOCK();
DRM_SYSCTL_PRINT("\nslot offset size "
"type flags address mtrr\n");
"type flags address handle mtrr\n");
for (i = 0; i < mapcount; i++) {
map = &tempmaps[i];
@ -204,9 +204,11 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
yesno = "yes";
DRM_SYSCTL_PRINT(
"%4d 0x%016lx 0x%08lx %4.4s 0x%02x 0x%016lx %s\n", i,
map->offset, map->size, type, map->flags,
(unsigned long)map->handle, yesno);
"%4d 0x%016lx 0x%08lx %4.4s 0x%02x 0x%016lx %6d %s\n",
i, map->offset, map->size, type, map->flags,
(unsigned long)map->virtual,
(unsigned int)((unsigned long)map->handle >>
DRM_MAP_HANDLE_SHIFT), yesno);
}
SYSCTL_OUT(req, "", 1);

View file

@ -54,6 +54,7 @@ int drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
if (file_priv && !file_priv->authenticated)
return EACCES;
DRM_DEBUG("called with offset %016jx\n", offset);
if (dev->dma && offset < ptoa(dev->dma->page_count)) {
drm_device_dma_t *dma = dev->dma;
@ -72,31 +73,31 @@ int drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
}
}
/* A sequential search of a linked list is
fine here because: 1) there will only be
about 5-10 entries in the list and, 2) a
DRI client only has to do this mapping
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer. */
/* A sequential search of a linked list is
fine here because: 1) there will only be
about 5-10 entries in the list and, 2) a
DRI client only has to do this mapping
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer.
*/
DRM_LOCK();
TAILQ_FOREACH(map, &dev->maplist, link) {
if (offset >= map->offset && offset < map->offset + map->size)
if (offset >> DRM_MAP_HANDLE_SHIFT ==
(unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT)
break;
}
if (map == NULL) {
DRM_DEBUG("Can't find map, requested offset = %016lx\n",
(unsigned long)offset);
DRM_DEBUG("Can't find map, request offset = %016jx\n", offset);
TAILQ_FOREACH(map, &dev->maplist, link) {
DRM_DEBUG("map offset = %016lx, handle = %016lx\n",
(unsigned long)map->offset,
(unsigned long)map->handle);
map->offset, (unsigned long)map->handle);
}
DRM_UNLOCK();
return -1;
}
if (((map->flags&_DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) {
if (((map->flags & _DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) {
DRM_UNLOCK();
DRM_DEBUG("restricted map\n");
return -1;
@ -104,18 +105,22 @@ int drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
type = map->type;
DRM_UNLOCK();
offset = offset & ((1ULL << DRM_MAP_HANDLE_SHIFT) - 1);
switch (type) {
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
case _DRM_AGP:
phys = offset;
break;
case _DRM_CONSISTENT:
phys = vtophys((char *)map->handle + (offset - map->offset));
*memattr = VM_MEMATTR_WRITE_COMBINING;
/* FALLTHROUGH */
case _DRM_REGISTERS:
phys = map->offset + offset;
break;
case _DRM_SCATTER_GATHER:
*memattr = VM_MEMATTR_WRITE_COMBINING;
/* FALLTHROUGH */
case _DRM_CONSISTENT:
case _DRM_SHM:
phys = vtophys(offset);
phys = vtophys((char *)map->virtual + offset);
break;
default:
DRM_ERROR("bad map type %d\n", type);

View file

@ -151,7 +151,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev_priv->ring.virtual_start) {
drm_core_ioremapfree(&dev_priv->ring.map, dev);
dev_priv->ring.virtual_start = NULL;
dev_priv->ring.map.handle = NULL;
dev_priv->ring.map.virtual = NULL;
dev_priv->ring.map.size = 0;
}
@ -174,7 +174,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
dev_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
if (init->ring_size != 0) {
if (dev_priv->ring.ring_obj != NULL) {
@ -195,7 +195,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
drm_core_ioremap_wc(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
if (dev_priv->ring.map.virtual == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@ -203,7 +203,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->ring.virtual_start = dev_priv->ring.map.virtual;
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
@ -229,7 +229,7 @@ static int i915_dma_resume(struct drm_device * dev)
return -EINVAL;
}
if (dev_priv->ring.map.handle == NULL) {
if (dev_priv->ring.map.virtual == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
@ -823,14 +823,14 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
dev_priv->hws_map.mtrr = 0;
drm_core_ioremap_wc(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
if (dev_priv->hws_map.virtual == NULL) {
i915_dma_cleanup(dev);
dev_priv->status_gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->hws_map.handle;
dev_priv->hw_status_page = dev_priv->hws_map.virtual;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);

View file

@ -1078,11 +1078,11 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
}
dev_priv->sarea_priv = (drm_mach64_sarea_t *)
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
if (!dev_priv->is_pci) {
drm_core_ioremap(dev_priv->ring_map, dev);
if (!dev_priv->ring_map->handle) {
if (!dev_priv->ring_map->virtual) {
DRM_ERROR("can not ioremap virtual address for"
" descriptor ring\n");
dev->dev_private = (void *)dev_priv;
@ -1103,7 +1103,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
dev_priv->dev_buffers = dev->agp_buffer_map;
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev->agp_buffer_map->handle) {
if (!dev->agp_buffer_map->virtual) {
DRM_ERROR("can not ioremap virtual address for"
" dma buffer\n");
dev->dev_private = (void *)dev_priv;
@ -1147,7 +1147,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
}
dev_priv->ring.size = 0x4000; /* 16KB */
dev_priv->ring.start = dev_priv->ring_map->handle;
dev_priv->ring.start = dev_priv->ring_map->virtual;
dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
memset(dev_priv->ring.start, 0, dev_priv->ring.size);

View file

@ -585,11 +585,11 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
drm_core_ioremap(dev_priv->primary, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev_priv->warp->handle ||
!dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
if (!dev_priv->warp->virtual ||
!dev_priv->primary->virtual || !dev->agp_buffer_map->virtual) {
DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
dev_priv->warp->handle, dev_priv->primary->handle,
dev->agp_buffer_map->handle);
dev_priv->warp->virtual, dev_priv->primary->virtual,
dev->agp_buffer_map->virtual);
return -ENOMEM;
}
@ -878,14 +878,14 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
}
dev_priv->sarea_priv =
(drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
(drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->virtual +
init->sarea_priv_offset);
if (!dev_priv->warp->handle ||
!dev_priv->primary->handle ||
if (!dev_priv->warp->virtual ||
!dev_priv->primary->virtual ||
((dev_priv->dma_access != 0) &&
((dev->agp_buffer_map == NULL) ||
(dev->agp_buffer_map->handle == NULL)))) {
(dev->agp_buffer_map->virtual == NULL)))) {
DRM_ERROR("failed to ioremap agp regions!\n");
return -ENOMEM;
}
@ -902,7 +902,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
return ret;
}
dev_priv->prim.status = (u32 *) dev_priv->status->handle;
dev_priv->prim.status = (u32 *) dev_priv->status->virtual;
mga_do_wait_for_idle(dev_priv);
@ -910,8 +910,8 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
*/
MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
dev_priv->prim.start = (u8 *) dev_priv->primary->virtual;
dev_priv->prim.end = ((u8 *) dev_priv->primary->virtual
+ dev_priv->primary->size);
dev_priv->prim.size = dev_priv->primary->size;

View file

@ -96,7 +96,7 @@ unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv)
{
unsigned char *vcbase = dev_priv->warp->handle;
unsigned char *vcbase = dev_priv->warp->virtual;
unsigned long pcbase = dev_priv->warp->offset;
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
@ -124,7 +124,7 @@ static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv)
static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv)
{
unsigned char *vcbase = dev_priv->warp->handle;
unsigned char *vcbase = dev_priv->warp->virtual;
unsigned long pcbase = dev_priv->warp->offset;
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));

View file

@ -509,7 +509,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
}
dev_priv->sarea_priv =
(drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
(drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->virtual +
init->sarea_priv_offset);
#if __OS_HAS_AGP
@ -517,9 +517,9 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
drm_core_ioremap(dev_priv->cce_ring, dev);
drm_core_ioremap(dev_priv->ring_rptr, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev_priv->cce_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
if (!dev_priv->cce_ring->virtual ||
!dev_priv->ring_rptr->virtual ||
!dev->agp_buffer_map->virtual) {
DRM_ERROR("Could not ioremap agp regions!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
@ -528,10 +528,11 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
} else
#endif
{
dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset;
dev_priv->ring_rptr->handle =
dev_priv->cce_ring->virtual =
(void *)dev_priv->cce_ring->offset;
dev_priv->ring_rptr->virtual =
(void *)dev_priv->ring_rptr->offset;
dev->agp_buffer_map->handle =
dev->agp_buffer_map->virtual =
(void *)dev->agp_buffer_map->offset;
}
@ -542,8 +543,8 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
#endif
dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
dev_priv->ring.start = (u32 *) dev_priv->cce_ring->virtual;
dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->virtual
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);

View file

@ -657,7 +657,7 @@ static void r128_cce_dispatch_indirect(struct drm_device * dev,
*/
if (dwords & 1) {
u32 *data = (u32 *)
((char *)dev->agp_buffer_map->handle
((char *)dev->agp_buffer_map->virtual
+ buf->offset + start);
data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
}
@ -722,7 +722,7 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
dwords = (end - start + 3) / sizeof(u32);
data = (u32 *) ((char *)dev->agp_buffer_map->handle
data = (u32 *) ((char *)dev->agp_buffer_map->virtual
+ buf->offset + start);
data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,

View file

@ -1290,8 +1290,8 @@ set_shaders(struct drm_device *dev)
DRM_DEBUG("\n");
/* load shaders */
vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
vs = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset);
ps = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + 256);
shader_size = sizeof(r6xx_vs) / 4;
for (i= 0; i < shader_size; i++)
@ -1718,11 +1718,10 @@ r600_blit_copy(struct drm_device *dev,
u64 vb_addr;
u32 *vb;
vb = (u32 *) ((char *)dev->agp_buffer_map->handle +
vb = (u32 *) ((char *)dev->agp_buffer_map->virtual +
dev_priv->blit_vb->offset + dev_priv->blit_vb->used);
DRM_DEBUG("src=0x%016llx, dst=0x%016llx, size=%d\n",
(unsigned long long)src_gpu_addr,
(unsigned long long)dst_gpu_addr, size_bytes);
DRM_DEBUG("src=0x%016jx, dst=0x%016jx, size=%d\n",
src_gpu_addr, dst_gpu_addr, size_bytes);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192;
@ -1759,7 +1758,7 @@ r600_blit_copy(struct drm_device *dev,
if (!dev_priv->blit_vb)
return;
set_shaders(dev);
vb = (u32 *) ((char *)dev->agp_buffer_map->handle +
vb = (u32 *) ((char *)dev->agp_buffer_map->virtual +
dev_priv->blit_vb->offset + dev_priv->blit_vb->used);
}
@ -1849,7 +1848,7 @@ r600_blit_copy(struct drm_device *dev,
if (!dev_priv->blit_vb)
return;
set_shaders(dev);
vb = (u32 *) ((char *)dev->agp_buffer_map->handle +
vb = (u32 *) ((char *)dev->agp_buffer_map->virtual +
dev_priv->blit_vb->offset + dev_priv->blit_vb->used);
}
@ -1928,7 +1927,7 @@ r600_blit_swap(struct drm_device *dev,
return;
set_shaders(dev);
}
vb = (u32 *) ((char *)dev->agp_buffer_map->handle +
vb = (u32 *) ((char *)dev->agp_buffer_map->virtual +
dev_priv->blit_vb->offset + dev_priv->blit_vb->used);
sx2 = sx + w;

View file

@ -1914,7 +1914,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
}
dev_priv->sarea_priv =
(drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
(drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->virtual +
init->sarea_priv_offset);
#if __OS_HAS_AGP
@ -1923,9 +1923,9 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
drm_core_ioremap_wc(dev_priv->cp_ring, dev);
drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
drm_core_ioremap_wc(dev->agp_buffer_map, dev);
if (!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
if (!dev_priv->cp_ring->virtual ||
!dev_priv->ring_rptr->virtual ||
!dev->agp_buffer_map->virtual) {
DRM_ERROR("could not find ioremap agp regions!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
@ -1933,18 +1933,19 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
} else
#endif
{
dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
dev_priv->ring_rptr->handle =
dev_priv->cp_ring->virtual =
(void *)dev_priv->cp_ring->offset;
dev_priv->ring_rptr->virtual =
(void *)dev_priv->ring_rptr->offset;
dev->agp_buffer_map->handle =
dev->agp_buffer_map->virtual =
(void *)dev->agp_buffer_map->offset;
DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
dev_priv->cp_ring->handle);
DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
dev_priv->ring_rptr->handle);
DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
dev->agp_buffer_map->handle);
DRM_DEBUG("dev_priv->cp_ring->virtual %p\n",
dev_priv->cp_ring->virtual);
DRM_DEBUG("dev_priv->ring_rptr->virtual %p\n",
dev_priv->ring_rptr->virtual);
DRM_DEBUG("dev->agp_buffer_map->virtual %p\n",
dev->agp_buffer_map->virtual);
}
dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24;
@ -2024,8 +2025,8 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n",
dev_priv->gart_buffers_offset);
dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
dev_priv->ring.start = (u32 *) dev_priv->cp_ring->virtual;
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->virtual
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
@ -2064,14 +2065,14 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->gart_info.table_size;
drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
if (!dev_priv->gart_info.mapping.handle) {
if (!dev_priv->gart_info.mapping.virtual) {
DRM_ERROR("ioremap failed.\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
dev_priv->gart_info.addr =
dev_priv->gart_info.mapping.handle;
dev_priv->gart_info.mapping.virtual;
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
dev_priv->gart_info.addr,
@ -2219,7 +2220,7 @@ int r600_cp_dispatch_indirect(struct drm_device *dev,
*/
while (dwords & 0xf) {
u32 *data = (u32 *)
((char *)dev->agp_buffer_map->handle
((char *)dev->agp_buffer_map->virtual
+ buf->offset + start);
data[dwords++] = RADEON_CP_PACKET2;
}
@ -2343,7 +2344,8 @@ int r600_cp_dispatch_texture(struct drm_device * dev,
/* Dispatch the indirect buffer.
*/
buffer =
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
(u32 *) ((char *)dev->agp_buffer_map->virtual +
buf->offset);
if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);

View file

@ -53,7 +53,7 @@ u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off)
val = DRM_READ32(dev_priv->ring_rptr, off);
} else {
val = *(((volatile u32 *)
dev_priv->ring_rptr->handle) +
dev_priv->ring_rptr->virtual) +
(off / sizeof(u32)));
val = le32_to_cpu(val);
}
@ -77,7 +77,7 @@ void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val)
if (dev_priv->flags & RADEON_IS_AGP)
DRM_WRITE32(dev_priv->ring_rptr, off, val);
else
*(((volatile u32 *) dev_priv->ring_rptr->handle) +
*(((volatile u32 *) dev_priv->ring_rptr->virtual) +
(off / sizeof(u32))) = cpu_to_le32(val);
}
@ -1278,7 +1278,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
}
dev_priv->sarea_priv =
(drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
(drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->virtual +
init->sarea_priv_offset);
#if __OS_HAS_AGP
@ -1286,9 +1286,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
drm_core_ioremap_wc(dev_priv->cp_ring, dev);
drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
drm_core_ioremap_wc(dev->agp_buffer_map, dev);
if (!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
if (!dev_priv->cp_ring->virtual ||
!dev_priv->ring_rptr->virtual ||
!dev->agp_buffer_map->virtual) {
DRM_ERROR("could not find ioremap agp regions!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
@ -1296,19 +1296,19 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
} else
#endif
{
dev_priv->cp_ring->handle =
dev_priv->cp_ring->virtual =
(void *)(unsigned long)dev_priv->cp_ring->offset;
dev_priv->ring_rptr->handle =
dev_priv->ring_rptr->virtual =
(void *)(unsigned long)dev_priv->ring_rptr->offset;
dev->agp_buffer_map->handle =
dev->agp_buffer_map->virtual =
(void *)(unsigned long)dev->agp_buffer_map->offset;
DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
dev_priv->cp_ring->handle);
DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
dev_priv->ring_rptr->handle);
DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
dev->agp_buffer_map->handle);
DRM_DEBUG("dev_priv->cp_ring->virtual %p\n",
dev_priv->cp_ring->virtual);
DRM_DEBUG("dev_priv->ring_rptr->virtual %p\n",
dev_priv->ring_rptr->virtual);
DRM_DEBUG("dev->agp_buffer_map->virtual %p\n",
dev->agp_buffer_map->virtual);
}
dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
@ -1386,8 +1386,8 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
dev_priv->gart_buffers_offset);
dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
dev_priv->ring.start = (u32 *) dev_priv->cp_ring->virtual;
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->virtual
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
@ -1423,7 +1423,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr =
dev_priv->gart_info.mapping.handle;
dev_priv->gart_info.mapping.virtual;
if (dev_priv->flags & RADEON_IS_PCIE)
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;

View file

@ -821,7 +821,7 @@ static int r600_ib_get(struct drm_radeon_cs_parser *parser)
}
buf->file_priv = parser->file_priv;
dev_priv->cs_buf = buf;
parser->ib = (void *)((vm_offset_t)dev->agp_buffer_map->handle +
parser->ib = (void *)((vm_offset_t)dev->agp_buffer_map->virtual +
buf->offset);
return 0;

View file

@ -1420,7 +1420,7 @@ static void radeon_cp_dispatch_swap(struct drm_device *dev)
static void radeon_cp_dispatch_flip(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_sarea *sarea = (struct drm_sarea *)dev_priv->sarea->handle;
struct drm_sarea *sarea = (struct drm_sarea *)dev_priv->sarea->virtual;
int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
@ -1582,7 +1582,7 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev,
*/
if (dwords & 1) {
u32 *data = (u32 *)
((char *)dev->agp_buffer_map->handle
((char *)dev->agp_buffer_map->virtual
+ buf->offset + start);
data[dwords++] = RADEON_CP_PACKET2;
}
@ -1629,7 +1629,7 @@ static void radeon_cp_dispatch_indices(struct drm_device *dev,
dwords = (prim->finish - prim->start + 3) / sizeof(u32);
data = (u32 *) ((char *)dev->agp_buffer_map->handle +
data = (u32 *) ((char *)dev->agp_buffer_map->virtual +
elt_buf->offset + prim->start);
data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
@ -1781,7 +1781,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
/* Dispatch the indirect buffer.
*/
buffer =
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
(u32 *) ((char *)dev->agp_buffer_map->virtual + buf->offset);
dwords = size / 4;
#define RADEON_COPY_MT(_buf, _data, _width) \

View file

@ -376,7 +376,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
if (cur + nr_pages < dev_priv->nr_dma_pages) {
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual +
cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
if (n < rest)
rest = n;
@ -392,7 +392,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
dev_priv->dma_pages[i].used = 0;
dev_priv->dma_pages[i].flushed = 0;
}
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual;
dev_priv->first_dma_page = cur = 0;
}
for (i = cur; nr_pages > 0; ++i, --nr_pages) {
@ -443,7 +443,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
/* pad with noops */
if (pad) {
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual +
cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
dev_priv->dma_pages[cur].used += pad;
while (pad != 0) {
@ -517,7 +517,7 @@ static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
for (i = dev_priv->first_dma_page;
i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
++i) {
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual +
i * SAVAGE_DMA_PAGE_SIZE;
#if SAVAGE_DMA_DEBUG
/* Sanity check: all pages except the last one must be full. */
@ -784,7 +784,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
return -EINVAL;
}
drm_core_ioremap(dev_priv->cmd_dma, dev);
if (!dev_priv->cmd_dma->handle) {
if (!dev_priv->cmd_dma->virtual) {
DRM_ERROR("failed to ioremap command "
"DMA region!\n");
savage_do_cleanup_bci(dev);
@ -806,9 +806,9 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
dev_priv->fake_dma.offset = 0;
dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
dev_priv->fake_dma.type = _DRM_SHM;
dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
dev_priv->fake_dma.virtual = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
DRM_MEM_DRIVER);
if (!dev_priv->fake_dma.handle) {
if (!dev_priv->fake_dma.virtual) {
DRM_ERROR("could not allocate faked DMA buffer!\n");
savage_do_cleanup_bci(dev);
return -ENOMEM;
@ -818,7 +818,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
}
dev_priv->sarea_priv =
(drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
(drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->virtual +
init->sarea_priv_offset);
/* setup bitmap descriptors */
@ -857,7 +857,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
dev_priv->event_counter = 0;
dev_priv->event_wrap = 0;
dev_priv->bci_ptr = (volatile uint32_t *)
((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
((uint8_t *)dev_priv->mmio->virtual + SAVAGE_BCI_OFFSET);
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
} else {
@ -865,7 +865,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
}
if (dev_priv->status != NULL) {
dev_priv->status_ptr =
(volatile uint32_t *)dev_priv->status->handle;
(volatile uint32_t *)dev_priv->status->virtual;
dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
dev_priv->wait_evnt = savage_bci_wait_event_shadow;
dev_priv->status_ptr[1023] = dev_priv->event_counter;
@ -905,16 +905,16 @@ static int savage_do_cleanup_bci(struct drm_device *dev)
drm_savage_private_t *dev_priv = dev->dev_private;
if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
if (dev_priv->fake_dma.handle)
drm_free(dev_priv->fake_dma.handle,
if (dev_priv->fake_dma.virtual)
drm_free(dev_priv->fake_dma.virtual,
SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
} else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
} else if (dev_priv->cmd_dma && dev_priv->cmd_dma->virtual &&
dev_priv->cmd_dma->type == _DRM_AGP &&
dev_priv->dma_type == SAVAGE_DMA_AGP)
drm_core_ioremapfree(dev_priv->cmd_dma, dev);
if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
dev->agp_buffer_map && dev->agp_buffer_map->handle) {
dev->agp_buffer_map && dev->agp_buffer_map->virtual) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
/* make sure the next instance (which may be running
* in PCI mode) doesn't try to use an old

View file

@ -158,6 +158,9 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
int via_dma_cleanup(struct drm_device * dev)
{
drm_via_blitq_t *blitq;
int i;
if (dev->dev_private) {
drm_via_private_t *dev_priv =
(drm_via_private_t *) dev->dev_private;
@ -169,6 +172,10 @@ int via_dma_cleanup(struct drm_device * dev)
dev_priv->ring.virtual_start = NULL;
}
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
mtx_destroy(&blitq->blit_lock);
}
}
return 0;
@ -206,14 +213,14 @@ static int via_initialize(struct drm_device * dev,
drm_core_ioremap_wc(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
if (dev_priv->ring.map.virtual == NULL) {
via_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->ring.virtual_start = dev_priv->ring.map.virtual;
dev_priv->dma_ptr = dev_priv->ring.virtual_start;
dev_priv->dma_low = 0;
@ -222,7 +229,7 @@ static int via_initialize(struct drm_device * dev,
dev_priv->dma_offset = init->offset;
dev_priv->last_pause_ptr = NULL;
dev_priv->hw_addr_ptr =
(volatile uint32_t *)((char *)dev_priv->mmio->handle +
(volatile uint32_t *)((char *)dev_priv->mmio->virtual +
init->reg_pause_addr);
via_cmdbuf_start(dev_priv);

View file

@ -59,7 +59,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
}
dev_priv->sarea_priv =
(drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
(drm_via_sarea_t *) ((u8 *) dev_priv->sarea->virtual +
init->sarea_priv_offset);
dev_priv->agpAddr = init->agpAddr;

View file

@ -45,7 +45,6 @@ int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
agp->size >> VIA_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("AGP memory manager initialisation error\n");
return ret;
@ -66,7 +65,6 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
fb->size >> VIA_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("VRAM memory manager initialisation error\n");
return ret;