Sophie

Sophie

distrib > Mandriva > 2009.0 > x86_64 > by-pkgid > 99f5a5c6381d6f8778d26cdd6504d05f > files > 2

dkms-psb-0-1mdv2009.0.src.rpm

diff --git a/Makefile b/Makefile
index d11ebeb..640922e 100644
--- a/Makefile
+++ b/Makefile
@@ -295,51 +295,8 @@ CONFIG_DRM_XGI := n
 
 # Enable module builds for the modules requested/supported.
 
-ifneq (,$(findstring tdfx,$(DRM_MODULES)))
-CONFIG_DRM_TDFX := m
-endif
-ifneq (,$(findstring r128,$(DRM_MODULES)))
-CONFIG_DRM_R128 := m
-endif
-ifneq (,$(findstring radeon,$(DRM_MODULES)))
-CONFIG_DRM_RADEON := m
-endif
-ifneq (,$(findstring sis,$(DRM_MODULES)))
-CONFIG_DRM_SIS := m
-endif
-ifneq (,$(findstring via,$(DRM_MODULES)))
-CONFIG_DRM_VIA := m
-endif
-ifneq (,$(findstring mach64,$(DRM_MODULES)))
-CONFIG_DRM_MACH64 := m
-endif
-ifneq (,$(findstring ffb,$(DRM_MODULES)))
-CONFIG_DRM_FFB := m
-endif
-ifneq (,$(findstring savage,$(DRM_MODULES)))
-CONFIG_DRM_SAVAGE := m
-endif
-ifneq (,$(findstring mga,$(DRM_MODULES)))
-CONFIG_DRM_MGA := m
-endif
-ifneq (,$(findstring nv,$(DRM_MODULES)))
-CONFIG_DRM_NV := m
-endif
-ifneq (,$(findstring nouveau,$(DRM_MODULES)))
-CONFIG_DRM_NOUVEAU := m
-endif
-ifneq (,$(findstring xgi,$(DRM_MODULES)))
-CONFIG_DRM_XGI := m
-endif
-
 # These require AGP support
 
-ifneq (,$(findstring i810,$(DRM_MODULES)))
-CONFIG_DRM_I810 := m
-endif
-ifneq (,$(findstring i915,$(DRM_MODULES)))
-CONFIG_DRM_I915 := m
-endif
 ifneq (,$(findstring psb,$(DRM_MODULES)))
 CONFIG_DRM_PSB := m
 endif
diff --git a/drmP.h b/drmP.h
index abbc975..7ab419c 100644
--- a/drmP.h
+++ b/drmP.h
@@ -34,6 +34,13 @@
 #ifndef _DRM_P_H_
 #define _DRM_P_H_
 
+#ifndef FALSE
+#define FALSE                           (1 == 0)
+#endif
+#ifndef TRUE
+#define TRUE                            (1 == 1)
+#endif
+
 #ifdef __KERNEL__
 #ifdef __alpha__
 /* add include of current.h so that "current" is defined
diff --git a/drm_agpsupport.c b/drm_agpsupport.c
index 5808e21..8f67c72 100644
--- a/drm_agpsupport.c
+++ b/drm_agpsupport.c
@@ -36,6 +36,9 @@
 
 #if __OS_HAS_AGP
 
+
+
+
 /**
  * Get AGP information.
  *
diff --git a/drm_compat.c b/drm_compat.c
index ec664db..4688b33 100644
--- a/drm_compat.c
+++ b/drm_compat.c
@@ -592,7 +592,7 @@ void drm_init_pat(void)
 	if (!boot_cpu_has(X86_FEATURE_PAT)) {
 		return;
 	}
-	if (on_each_cpu(drm_pat_ipi_handler, NULL, 1, 1) != 0) {
+	if (on_each_cpu(drm_pat_ipi_handler, NULL, 1) != 0) {
 		DRM_ERROR("Timed out setting up CPU PAT.\n");
 		return;
 	}	
diff --git a/drm_compat.h b/drm_compat.h
index f7eb1e4..bad648f 100644
--- a/drm_compat.h
+++ b/drm_compat.h
@@ -338,13 +338,13 @@ extern int drm_bo_map_bound(struct vm_area_struct *vma);
 #define _PAGE_PAT 0x080		/* Note that this is the same value as _PAGE_PROTNONE */
 
 
+#endif
+
 extern void drm_init_pat(void);
 extern int drm_use_pat(void);
 
 #endif
 
-#endif
-
 /* fixme when functions are upstreamed - upstreamed for 2.6.23 */
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
 #define DRM_IDR_COMPAT_FN
diff --git a/drm_ttm.c b/drm_ttm.c
index 5c8538d..d4cc140 100644
--- a/drm_ttm.c
+++ b/drm_ttm.c
@@ -37,7 +37,7 @@ static void drm_ttm_ipi_handler(void *null)
 
 void drm_ttm_cache_flush(void)
 {
-	if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
+	if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1) != 0)
 		DRM_ERROR("Timed out waiting for drm cache flush.\n");
 }
 EXPORT_SYMBOL(drm_ttm_cache_flush);
@@ -117,8 +117,6 @@ static int drm_set_caching(struct drm_ttm *ttm, int noncached)
 			}
 		}
 	}
-	if (do_tlbflush)
-		flush_agp_mappings();
 
 	DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
 
diff --git a/drm_vm.c b/drm_vm.c
index 9e629d3..4d5fbb8 100644
--- a/drm_vm.c
+++ b/drm_vm.c
@@ -79,7 +79,7 @@ pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
 
 
 /**
- * \c nopage method for AGP virtual memory.
+ * \c fault method for AGP virtual memory.
  *
  * \param vma virtual memory area.
  * \param address access address.
@@ -89,8 +89,8 @@ pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
  * map, get the page, increment the use count and return it.
  */
 #if __OS_HAS_AGP
-static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
-						unsigned long address)
+static __inline__ int drm_do_vm_fault(struct vm_area_struct *vma,
+						struct vm_fault *vmf)
 {
 	struct drm_file *priv = vma->vm_file->private_data;
 	struct drm_device *dev = priv->head->dev;
@@ -102,19 +102,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
 	 * Find the right map
 	 */
 	if (!drm_core_has_AGP(dev))
-		goto vm_nopage_error;
+		goto vm_fault_error;
 
 	if (!dev->agp || !dev->agp->cant_use_aperture)
-		goto vm_nopage_error;
+		goto vm_fault_error;
 
 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
-		goto vm_nopage_error;
+		goto vm_fault_error;
 
 	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
 	map = r_list->map;
 
 	if (map && map->type == _DRM_AGP) {
-		unsigned long offset = address - vma->vm_start;
+		/*
+		 * Using vm_pgoff as a selector forces us to use this unusual
+		 * addressing scheme.
+		 */
+		unsigned long offset = (unsigned long)vmf->virtual_address -
+								vma->vm_start;
 		unsigned long baddr = map->offset + offset;
 		struct drm_agp_mem *agpmem;
 		struct page *page;
@@ -136,7 +141,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
 		}
 
 		if (!agpmem)
-			goto vm_nopage_error;
+			goto vm_fault_error;
 
 		/*
 		 * Get the page, inc the use count, and return it
@@ -144,6 +149,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
 		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
 		page = virt_to_page(__va(agpmem->memory->memory[offset]));
 		get_page(page);
+		vmf->page = page;
 
 #if 0
 		/* page_count() not defined everywhere */
@@ -153,21 +159,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
 		     page_count(page));
 #endif
 
-		return page;
+		return 0;
 	}
-      vm_nopage_error:
-	return NOPAGE_SIGBUS;	/* Disallow mremap */
+      vm_fault_error:
+	return VM_FAULT_SIGBUS;	/* Disallow mremap */
 }
 #else				/* __OS_HAS_AGP */
-static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
-						unsigned long address)
+static __inline__ int drm_do_vm_fault(struct vm_area_struct *vma,
+						struct vm_fault *vmf)
 {
-	return NOPAGE_SIGBUS;
+	return VM_FAULT_SIGBUS;
 }
 #endif				/* __OS_HAS_AGP */
 
 /**
- * \c nopage method for shared virtual memory.
+ * \c fault method for shared virtual memory.
  *
  * \param vma virtual memory area.
  * \param address access address.
@@ -176,28 +182,27 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
  * Get the mapping, find the real physical page to map, get the page, and
  * return it.
  */
-static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
-						    unsigned long address)
+static __inline__ int drm_do_vm_shm_fault(struct vm_area_struct *vma,
+						    struct vm_fault *vmf)
 {
 	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
 	unsigned long offset;
 	unsigned long i;
 	struct page *page;
 
-	if (address > vma->vm_end)
-		return NOPAGE_SIGBUS;	/* Disallow mremap */
 	if (!map)
-		return NOPAGE_SIGBUS;	/* Nothing allocated */
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 
-	offset = address - vma->vm_start;
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 	i = (unsigned long)map->handle + offset;
 	page = vmalloc_to_page((void *)i);
 	if (!page)
-		return NOPAGE_SIGBUS;
+		return VM_FAULT_SIGBUS;
 	get_page(page);
+	vmf->page = page;
 
-	DRM_DEBUG("shm_nopage 0x%lx\n", address);
-	return page;
+	DRM_DEBUG("shm_fault 0x%lx\n", offset);
+	return 0;
 }
 
 /**
@@ -281,7 +286,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 }
 
 /**
- * \c nopage method for DMA virtual memory.
+ * \c fault method for DMA virtual memory.
  *
  * \param vma virtual memory area.
  * \param address access address.
@@ -289,8 +294,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
  *
  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  */
-static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
-						    unsigned long address)
+static __inline__ int drm_do_vm_dma_fault(struct vm_area_struct *vma,
+						    struct vm_fault *vmf)
 {
 	struct drm_file *priv = vma->vm_file->private_data;
 	struct drm_device *dev = priv->head->dev;
@@ -300,24 +305,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
 	struct page *page;
 
 	if (!dma)
-		return NOPAGE_SIGBUS;	/* Error */
-	if (address > vma->vm_end)
-		return NOPAGE_SIGBUS;	/* Disallow mremap */
+		return VM_FAULT_SIGBUS;	/* Error */
 	if (!dma->pagelist)
-		return NOPAGE_SIGBUS;	/* Nothing allocated */
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 
-	offset = address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
-	page_nr = offset >> PAGE_SHIFT;
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
+	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
 	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
 
 	get_page(page);
+	vmf->page = page;
 
-	DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
-	return page;
+	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
+	return 0;
 }
 
 /**
- * \c nopage method for scatter-gather virtual memory.
+ * \c fault method for scatter-gather virtual memory.
  *
  * \param vma virtual memory area.
  * \param address access address.
@@ -325,8 +329,8 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
  *
  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  */
-static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
-						   unsigned long address)
+static __inline__ int drm_do_vm_sg_fault(struct vm_area_struct *vma,
+						   struct vm_fault *vmf)
 {
 	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
 	struct drm_file *priv = vma->vm_file->private_data;
@@ -339,78 +343,64 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
 
 	DRM_DEBUG("\n");
 	if (!entry)
-		return NOPAGE_SIGBUS;	/* Error */
-	if (address > vma->vm_end)
-		return NOPAGE_SIGBUS;	/* Disallow mremap */
+		return VM_FAULT_SIGBUS;	/* Error */
 	if (!entry->pagelist)
-		return NOPAGE_SIGBUS;	/* Nothing allocated */
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 
-	offset = address - vma->vm_start;
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
 	page = entry->pagelist[page_offset];
 	get_page(page);
+	vmf->page = page;
 
-	return page;
+	return 0;
 }
 
-static struct page *drm_vm_nopage(struct vm_area_struct *vma,
-				  unsigned long address, int *type)
+static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	if (type)
-		*type = VM_FAULT_MINOR;
-	return drm_do_vm_nopage(vma, address);
+	return drm_do_vm_fault(vma, vmf);
 }
 
-static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
-				      unsigned long address, int *type)
+static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	if (type)
-		*type = VM_FAULT_MINOR;
-	return drm_do_vm_shm_nopage(vma, address);
+	return drm_do_vm_shm_fault(vma, vmf);
 }
 
-static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
-				      unsigned long address, int *type)
+static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	if (type)
-		*type = VM_FAULT_MINOR;
-	return drm_do_vm_dma_nopage(vma, address);
+	return drm_do_vm_dma_fault(vma, vmf);
 }
 
-static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
-				     unsigned long address, int *type)
+static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	if (type)
-		*type = VM_FAULT_MINOR;
-	return drm_do_vm_sg_nopage(vma, address);
+	return drm_do_vm_sg_fault(vma, vmf);
 }
 
-
 /** AGP virtual memory operations */
 static struct vm_operations_struct drm_vm_ops = {
-	.nopage = drm_vm_nopage,
+	.fault = drm_vm_fault,
 	.open = drm_vm_open,
 	.close = drm_vm_close,
 };
 
 /** Shared virtual memory operations */
 static struct vm_operations_struct drm_vm_shm_ops = {
-	.nopage = drm_vm_shm_nopage,
+	.fault = drm_vm_shm_fault,
 	.open = drm_vm_open,
 	.close = drm_vm_shm_close,
 };
 
 /** DMA virtual memory operations */
 static struct vm_operations_struct drm_vm_dma_ops = {
-	.nopage = drm_vm_dma_nopage,
+	.fault = drm_vm_dma_fault,
 	.open = drm_vm_open,
 	.close = drm_vm_close,
 };
 
 /** Scatter-gather virtual memory operations */
 static struct vm_operations_struct drm_vm_sg_ops = {
-	.nopage = drm_vm_sg_nopage,
+	.fault = drm_vm_sg_fault,
 	.open = drm_vm_open,
 	.close = drm_vm_close,
 };
@@ -621,7 +611,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 			/*
 			 * On some platforms we can't talk to bus dma address from the CPU, so for
 			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
-			 * pages and mappings in nopage()
+			 * pages and mappings in fault()
 			 */
 #if defined(__powerpc__)
 			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
@@ -651,7 +641,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 		break;
 	case _DRM_CONSISTENT:
 		/* Consistent memory is really like shared memory. But
-		 * it's allocated in a different way, so avoid nopage */
+		 * it's allocated in a different way, so avoid fault */
 		if (remap_pfn_range(vma, vma->vm_start,
 		    page_to_pfn(virt_to_page(map->handle)),
 		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
@@ -720,8 +710,7 @@ EXPORT_SYMBOL(drm_mmap);
 
 #define DRM_NOPFN_EXTRA 15 /* Fault 16 pages at a time in */
 
-unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
-			      unsigned long address)
+int drm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
 	unsigned long page_offset;
@@ -734,25 +723,22 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
 	unsigned long bus_offset;
 	unsigned long bus_size;
 	int i;
-	unsigned long ret = NOPFN_REFAULT;
-
-	if (address > vma->vm_end)
-		return NOPFN_SIGBUS;
+	unsigned long ret = VM_FAULT_NOPAGE;
 
 	dev = bo->dev;
 	err = drm_bo_read_lock(&dev->bm.bm_lock);
 	if (err)
-		return NOPFN_REFAULT;
+		return VM_FAULT_NOPAGE;
 
 	err = mutex_lock_interruptible(&bo->mutex);
 	if (err) {
 		drm_bo_read_unlock(&dev->bm.bm_lock);
-		return NOPFN_REFAULT;
+		return VM_FAULT_NOPAGE;
 	}
 
 	err = drm_bo_wait(bo, 0, 0, 0);
 	if (err) {
-		ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
+		ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
 		goto out_unlock;
 	}
 
@@ -767,7 +753,7 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
 			DRM_BO_FLAG_FORCE_MAPPABLE;
 		err = drm_bo_move_buffer(bo, new_mask, 0, 0);
 		if (err) {
-			ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
+			ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
 			goto out_unlock;
 		}
 	}
@@ -776,11 +762,12 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
 				&bus_size);
 
 	if (err) {
-		ret = NOPFN_SIGBUS;
+		ret = VM_FAULT_SIGBUS;
 		goto out_unlock;
 	}
 
-	page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+	/* XXX: vmf->pgoff may work here, but it adds on vma->vm_pgoff */
+	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
 
 	if (bus_size) {
 		struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
@@ -793,7 +780,7 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
 		drm_ttm_fixup_caching(ttm);
 		page = drm_ttm_get_page(ttm, page_offset);
 		if (!page) {
-			ret = NOPFN_OOM;
+			ret = VM_FAULT_OOM;
 			goto out_unlock;
 		}
 		pfn = page_to_pfn(page);
@@ -802,14 +789,14 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
 			drm_io_prot(_DRM_TTM, vma);
 	}
 
-	err = vm_insert_pfn(vma, address, pfn);
+	err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 	if (err) {
-		ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
+		ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
 		goto out_unlock;
 	}
 
 	for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
-
+	    unsigned long address;
 		if (++page_offset == bo->mem.num_pages)
 			break;
 		address = vma->vm_start + (page_offset << PAGE_SHIFT);
@@ -832,7 +819,7 @@ out_unlock:
 	drm_bo_read_unlock(&dev->bm.bm_lock);
 	return ret;
 }
-EXPORT_SYMBOL(drm_bo_vm_nopfn);
+EXPORT_SYMBOL(drm_bo_vm_fault);
 #endif
 
 static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
@@ -888,7 +875,7 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
 
 static struct vm_operations_struct drm_bo_vm_ops = {
 #ifdef DRM_FULL_MM_COMPAT
-	.nopfn = drm_bo_vm_nopfn,
+	.fault = drm_bo_vm_fault,
 #else
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
 	.nopfn = drm_bo_vm_nopfn,
diff --git a/psb_fb.c b/psb_fb.c
index d445de3..5cd7106 100644
--- a/psb_fb.c
+++ b/psb_fb.c
@@ -990,8 +990,7 @@ void psbfb_resume(struct drm_device *dev)
  * Also, these should be the default vm ops for buffer object type fbs.
  */
 
-extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
-				     unsigned long address);
+extern unsigned long drm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 
 /*
  * This wrapper is a bit ugly and is here because we need access to a mutex
@@ -1001,8 +1000,7 @@ extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
  * recursive locking.
  */
 
-static unsigned long psbfb_nopfn(struct vm_area_struct *vma,
-				 unsigned long address)
+static int psbfb_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
 	struct vm_area_struct tmp_vma;
@@ -1011,7 +1009,7 @@ static unsigned long psbfb_nopfn(struct vm_area_struct *vma,
 	mutex_lock(&vi->vm_mutex);
 	tmp_vma = *vma;
 	tmp_vma.vm_private_data = vi->bo;
-	ret = drm_bo_vm_nopfn(&tmp_vma, address);
+	ret = drm_bo_vm_fault(&tmp_vma, vmf);
 	mutex_unlock(&vi->vm_mutex);
 	return ret;
 }
@@ -1029,8 +1027,7 @@ static void psbfb_vm_close(struct vm_area_struct *vma)
 }
 
 static struct vm_operations_struct psbfb_vm_ops = {
-	.nopfn = psbfb_nopfn,
-	.nopage = NULL,
+	.fault = psbfb_fault,
 	.open = psbfb_vm_open,
 	.close = psbfb_vm_close,
 };