diff --git a/kernel/nvidia-uvm/nvidia-uvm.Kbuild b/kernel/nvidia-uvm/nvidia-uvm.Kbuild index 650c922..0c83fae 100644 --- a/kernel/nvidia-uvm/nvidia-uvm.Kbuild +++ b/kernel/nvidia-uvm/nvidia-uvm.Kbuild @@ -108,10 +108,11 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += bitmap_clear NV_CONFTEST_FUNCTION_COMPILE_TESTS += usleep_range NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_empty NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_replace_slot NV_CONFTEST_FUNCTION_COMPILE_TESTS += do_gettimeofday NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += smp_read_barrier_depends NV_CONFTEST_TYPE_COMPILE_TESTS += proc_dir_entry NV_CONFTEST_TYPE_COMPILE_TESTS += irq_handler_t NV_CONFTEST_TYPE_COMPILE_TESTS += outer_flush_all NV_CONFTEST_TYPE_COMPILE_TESTS += vm_operations_struct diff --git a/kernel/nvidia-uvm/uvm8_tools.c b/kernel/nvidia-uvm/uvm8_tools.c index 86dbb77..be865bc 100644 --- a/kernel/nvidia-uvm/uvm8_tools.c +++ b/kernel/nvidia-uvm/uvm8_tools.c @@ -188,11 +188,15 @@ static NV_STATUS tools_update_status(uvm_va_space_t *va_space); static uvm_tools_event_tracker_t *tools_event_tracker(struct file *filp) { long event_tracker = atomic_long_read((atomic_long_t *)&filp->private_data); +#if defined(NV_SMP_READ_BARRIER_DEPENDS_PRESENT) smp_read_barrier_depends(); +#else + smp_rmb(); +#endif return (uvm_tools_event_tracker_t *)event_tracker; } static bool tracker_is_queue(uvm_tools_event_tracker_t *event_tracker) { diff --git a/kernel/nvidia-uvm/uvm8_va_range.h b/kernel/nvidia-uvm/uvm8_va_range.h index 8cae357..a74ad24 100644 --- a/kernel/nvidia-uvm/uvm8_va_range.h +++ b/kernel/nvidia-uvm/uvm8_va_range.h @@ -715,11 +715,15 @@ static uvm_va_block_t *uvm_va_range_block(uvm_va_range_t *va_range, size_t index // Later accesses in this thread will read state out of block, potentially // as soon as the block pointer is updated by another thread. We have to // make sure that any initialization of this block by the creating thread is // visible to later accesses in this thread, which requires a data // dependency barrier. +#if defined(NV_SMP_READ_BARRIER_DEPENDS_PRESENT) smp_read_barrier_depends(); +#else + smp_mb(); +#endif return block; } // Same as uvm_va_range_block except that the block is created if not already // present in the array. If NV_OK is returned, the block has been allocated