1 # - Tools for building CUDA C files: libraries and build dependencies.
2 # This script locates the NVIDIA CUDA C tools. It should work on linux, windows,
3 # and mac and should be reasonably up to date with CUDA C releases.
5 # This script makes use of the standard find_package arguments of <VERSION>,
6 # REQUIRED and QUIET. CUDA_FOUND will report if an acceptable version of CUDA
9 # The script will prompt the user to specify CUDA_TOOLKIT_ROOT_DIR if the prefix
10 # cannot be determined by the location of nvcc in the system path and REQUIRED
11 # is specified to find_package(). To use a different installed version of the
12 # toolkit set the environment variable CUDA_BIN_PATH before running cmake
13 # (e.g. CUDA_BIN_PATH=/usr/local/cuda1.0 instead of the default /usr/local/cuda)
14 # or set CUDA_TOOLKIT_ROOT_DIR after configuring. If you change the value of
15 # CUDA_TOOLKIT_ROOT_DIR, various components that depend on the path will be
18 # It might be necessary to set CUDA_TOOLKIT_ROOT_DIR manually on certain
19 # platforms, or to use a cuda runtime not installed in the default location. In
20 # newer versions of the toolkit the cuda library is included with the graphics
21 # driver- be sure that the driver version matches what is needed by the cuda
24 # The following variables affect the behavior of the macros in the script (in
25 # alphebetical order). Note that any of these flags can be changed multiple
26 # times in the same directory before calling CUDA_ADD_EXECUTABLE,
27 # CUDA_ADD_LIBRARY, CUDA_COMPILE, CUDA_COMPILE_PTX or CUDA_WRAP_SRCS.
29 # CUDA_64_BIT_DEVICE_CODE (Default matches host bit size)
30 # -- Set to ON to compile for 64 bit device code, OFF for 32 bit device code.
31 # Note that making this different from the host code when generating object
32 # or C files from CUDA code just won't work, because size_t gets defined by
33 # nvcc in the generated source. If you compile to PTX and then load the
34 # file yourself, you can mix bit sizes between device and host.
36 # CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE (Default ON)
37 # -- Set to ON if you want the custom build rule to be attached to the source
38 # file in Visual Studio. Turn OFF if you add the same cuda file to multiple
41 # This allows the user to build the target from the CUDA file; however, bad
42 # things can happen if the CUDA source file is added to multiple targets.
43 # When performing parallel builds it is possible for the custom build
44 # command to be run more than once and in parallel causing cryptic build
45 # errors. VS runs the rules for every source file in the target, and a
46 # source can have only one rule no matter how many projects it is added to.
47 # When the rule is run from multiple targets race conditions can occur on
48 # the generated file. Eventually everything will get built, but if the user
49 # is unaware of this behavior, there may be confusion. It would be nice if
50 # this script could detect the reuse of source files across multiple targets
51 # and turn the option off for the user, but no good solution could be found.
53 # CUDA_BUILD_CUBIN (Default OFF)
54 # -- Set to ON to enable and extra compilation pass with the -cubin option in
55 # Device mode. The output is parsed and register, shared memory usage is
56 # printed during build.
58 # CUDA_BUILD_EMULATION (Default OFF for device mode)
59 # -- Set to ON for Emulation mode. -D_DEVICEEMU is defined for CUDA C files
60 # when CUDA_BUILD_EMULATION is TRUE.
62 # CUDA_GENERATED_OUTPUT_DIR (Default CMAKE_CURRENT_BINARY_DIR)
63 # -- Set to the path you wish to have the generated files placed. If it is
64 # blank output files will be placed in CMAKE_CURRENT_BINARY_DIR.
65 # Intermediate files will always be placed in
66 # CMAKE_CURRENT_BINARY_DIR/CMakeFiles.
68 # CUDA_HOST_COMPILATION_CPP (Default ON)
69 # -- Set to OFF for C compilation of host code.
72 # CUDA_NVCC_FLAGS_<CONFIG>
73 # -- Additional NVCC command line arguments. NOTE: multiple arguments must be
74 # semi-colon delimited (e.g. --compiler-options;-Wall)
76 # CUDA_PROPAGATE_HOST_FLAGS (Default ON)
77 # -- Set to ON to propagate CMAKE_{C,CXX}_FLAGS and their configuration
78 # dependent counterparts (e.g. CMAKE_C_FLAGS_DEBUG) automatically to the
79 # host compiler through nvcc's -Xcompiler flag. This helps make the
80 # generated host code match the rest of the system better. Sometimes
81 # certain flags give nvcc problems, and this will help you turn the flag
82 # propagation off. This does not affect the flags supplied directly to nvcc
83 # via CUDA_NVCC_FLAGS or through the OPTION flags specified through
84 # CUDA_ADD_LIBRARY, CUDA_ADD_EXECUTABLE, or CUDA_WRAP_SRCS. Flags used for
85 # shared library compilation are not affected by this flag.
87 # CUDA_VERBOSE_BUILD (Default OFF)
88 # -- Set to ON to see all the commands used when building the CUDA file. When
89 # using a Makefile generator the value defaults to VERBOSE (run make
90 # VERBOSE=1 to see output), although setting CUDA_VERBOSE_BUILD to ON will
91 # always print the output.
93 # The script creates the following macros (in alphebetical order):
95 # CUDA_ADD_CUFFT_TO_TARGET( cuda_target )
96 # -- Adds the cufft library to the target (can be any target). Handles whether
97 # you are in emulation mode or not.
99 # CUDA_ADD_CUBLAS_TO_TARGET( cuda_target )
100 # -- Adds the cublas library to the target (can be any target). Handles
101 # whether you are in emulation mode or not.
103 # CUDA_ADD_EXECUTABLE( cuda_target file0 file1 ...
104 # [WIN32] [MACOSX_BUNDLE] [EXCLUDE_FROM_ALL] [OPTIONS ...] )
105 # -- Creates an executable "cuda_target" which is made up of the files
106 # specified. All of the non CUDA C files are compiled using the standard
107 # build rules specified by CMAKE and the cuda files are compiled to object
108 # files using nvcc and the host compiler. In addition CUDA_INCLUDE_DIRS is
109 # added automatically to include_directories(). Some standard CMake target
110 # calls can be used on the target after calling this macro
111 # (e.g. set_target_properties and target_link_libraries), but setting
112 # properties that adjust compilation flags will not affect code compiled by
113 # nvcc. Such flags should be modified before calling CUDA_ADD_EXECUTABLE,
114 # CUDA_ADD_LIBRARY or CUDA_WRAP_SRCS.
116 # CUDA_ADD_LIBRARY( cuda_target file0 file1 ...
117 # [STATIC | SHARED | MODULE] [EXCLUDE_FROM_ALL] [OPTIONS ...] )
118 # -- Same as CUDA_ADD_EXECUTABLE except that a library is created.
120 # CUDA_BUILD_CLEAN_TARGET()
121 # -- Creates a convience target that deletes all the dependency files
122 # generated. You should make clean after running this target to ensure the
123 # dependency files get regenerated.
125 # CUDA_COMPILE( generated_files file0 file1 ... [STATIC | SHARED | MODULE]
127 # -- Returns a list of generated files from the input source files to be used
128 # with ADD_LIBRARY or ADD_EXECUTABLE.
130 # CUDA_COMPILE_PTX( generated_files file0 file1 ... [OPTIONS ...] )
131 # -- Returns a list of PTX files generated from the input source files.
133 # CUDA_INCLUDE_DIRECTORIES( path0 path1 ... )
134 # -- Sets the directories that should be passed to nvcc
135 # (e.g. nvcc -Ipath0 -Ipath1 ... ). These paths usually contain other .cu
138 # CUDA_WRAP_SRCS ( cuda_target format generated_files file0 file1 ...
139 # [STATIC | SHARED | MODULE] [OPTIONS ...] )
140 # -- This is where all the magic happens. CUDA_ADD_EXECUTABLE,
141 # CUDA_ADD_LIBRARY, CUDA_COMPILE, and CUDA_COMPILE_PTX all call this
142 # function under the hood.
144 # Given the list of files (file0 file1 ... fileN) this macro generates
145 # custom commands that generate either PTX or linkable objects (use "PTX" or
146 # "OBJ" for the format argument to switch). Files that don't end with .cu
147 # or have the HEADER_FILE_ONLY property are ignored.
149 # The arguments passed in after OPTIONS are extra command line options to
150 # give to nvcc. You can also specify per configuration options by
151 # specifying the name of the configuration followed by the options. General
152 # options must preceed configuration specific options. Not all
153 # configurations need to be specified, only the ones provided will be used.
155 # OPTIONS -DFLAG=2 "-DFLAG_OTHER=space in flag"
157 # RELEASE --use_fast_math
158 # RELWITHDEBINFO --use_fast_math;-g
159 # MINSIZEREL --use_fast_math
161 # For certain configurations (namely VS generating object files with
162 # CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE set to ON), no generated file will
163 # be produced for the given cuda file. This is because when you add the
164 # cuda file to Visual Studio it knows that this file produces an object file
165 # and will link in the resulting object file automatically.
167 # This script will also generate a separate cmake script that is used at
168 # build time to invoke nvcc. This is for serveral reasons.
170 # 1. nvcc can return negative numbers as return values which confuses
171 # Visual Studio into thinking that the command succeeded. The script now
172 # checks the error codes and produces errors when there was a problem.
174 # 2. nvcc has been known to not delete incomplete results when it
175 # encounters problems. This confuses build systems into thinking the
176 # target was generated when in fact an unusable file exists. The script
177 # now deletes the output files if there was an error.
179 # 3. By putting all the options that affect the build into a file and then
180 # make the build rule dependent on the file, the output files will be
181 # regenerated when the options change.
183 # This script also looks at optional arguments STATIC, SHARED, or MODULE to
184 # determine when to target the object compilation for a shared library.
185 # BUILD_SHARED_LIBS is ignored in CUDA_WRAP_SRCS, but it is respected in
186 # CUDA_ADD_LIBRARY. On some systems special flags are added for building
187 # objects intended for shared libraries. A preprocessor macro,
188 # <target_name>_EXPORTS is defined when a shared library compilation is
191 # Flags passed into add_definitions with -D or /D are passed along to nvcc.
193 # The script defines the following variables:
195 # CUDA_VERSION_MAJOR -- The major version of cuda as reported by nvcc.
196 # CUDA_VERSION_MINOR -- The minor version.
198 # CUDA_VERSION_STRING -- CUDA_VERSION_MAJOR.CUDA_VERSION_MINOR
200 # CUDA_TOOLKIT_ROOT_DIR -- Path to the CUDA Toolkit (defined if not set).
201 # CUDA_SDK_ROOT_DIR -- Path to the CUDA SDK. Use this to find files in the
202 # SDK. This script will not directly support finding
203 # specific libraries or headers, as that isn't
204 # supported by NVIDIA. If you want to change
205 # libraries when the path changes see the
206 # FindCUDA.cmake script for an example of how to clear
207 # these variables. There are also examples of how to
208 # use the CUDA_SDK_ROOT_DIR to locate headers or
209 # libraries, if you so choose (at your own risk).
210 # CUDA_INCLUDE_DIRS -- Include directory for cuda headers. Added automatically
211 # for CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY.
212 # CUDA_LIBRARIES -- Cuda RT library.
213 # CUDA_CUFFT_LIBRARIES -- Device or emulation library for the Cuda FFT
214 # implementation (alternative to:
215 # CUDA_ADD_CUFFT_TO_TARGET macro)
216 # CUDA_CUBLAS_LIBRARIES -- Device or emulation library for the Cuda BLAS
217 # implementation (alterative to:
218 # CUDA_ADD_CUBLAS_TO_TARGET macro).
221 # James Bigler, NVIDIA Corp (nvidia.com - jbigler)
222 # Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
224 # Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
226 # Copyright (c) 2007-2009
227 # Scientific Computing and Imaging Institute, University of Utah
229 # This code is licensed under the MIT License. See the FindCUDA.cmake script
230 # for the text of the license.
234 # License for the specific language governing rights and limitations under
235 # Permission is hereby granted, free of charge, to any person obtaining a
236 # copy of this software and associated documentation files (the "Software"),
237 # to deal in the Software without restriction, including without limitation
238 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
239 # and/or sell copies of the Software, and to permit persons to whom the
240 # Software is furnished to do so, subject to the following conditions:
242 # The above copyright notice and this permission notice shall be included
243 # in all copies or substantial portions of the Software.
245 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
246 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
247 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
248 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
249 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
250 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
251 # DEALINGS IN THE SOFTWARE.
253 ###############################################################################
257 # We need to have at least this version to support the VERSION_LESS argument to 'if' (2.6.2) and unset (2.6.3)
259 cmake_minimum_required(VERSION 2.6.3)
262 # This macro helps us find the location of helper files we will need the full path to
263 macro(CUDA_FIND_HELPER_FILE _name _extension)
264 set(_full_name "${_name}.${_extension}")
265 # CMAKE_CURRENT_LIST_FILE contains the full path to the file currently being
266 # processed. Using this variable, we can pull out the current path, and
267 # provide a way to get access to the other files we need local to here.
268 get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
269 find_file(CUDA_${_name} ${_full_name} PATHS ${CMAKE_CURRENT_LIST_DIR}/FindCUDA NO_DEFAULT_PATH)
270 if(NOT CUDA_${_name})
271 set(error_message "${_full_name} not found in CMAKE_MODULE_PATH")
272 if(CUDA_FIND_REQUIRED)
273 message(FATAL_ERROR "${error_message}")
274 else(CUDA_FIND_REQUIRED)
275 if(NOT CUDA_FIND_QUIETLY)
276 message(STATUS "${error_message}")
277 endif(NOT CUDA_FIND_QUIETLY)
278 endif(CUDA_FIND_REQUIRED)
279 endif(NOT CUDA_${_name})
280 # Set this variable as internal, so the user isn't bugged with it.
281 set(CUDA_${_name} ${CUDA_${_name}} CACHE INTERNAL "Location of ${_full_name}" FORCE)
282 endmacro(CUDA_FIND_HELPER_FILE)
284 #####################################################################
285 ## CUDA_INCLUDE_NVCC_DEPENDENCIES
288 # So we want to try and include the dependency file if it exists. If
289 # it doesn't exist then we need to create an empty one, so we can
292 # If it does exist, then we need to check to see if all the files it
293 # depends on exist. If they don't then we should clear the dependency
294 # file and regenerate it later. This covers the case where a header
295 # file has disappeared or moved.
297 macro(CUDA_INCLUDE_NVCC_DEPENDENCIES dependency_file)
298 set(CUDA_NVCC_DEPEND)
299 set(CUDA_NVCC_DEPEND_REGENERATE FALSE)
302 # Include the dependency file. Create it first if it doesn't exist . The
303 # INCLUDE puts a dependency that will force CMake to rerun and bring in the
304 # new info when it changes. DO NOT REMOVE THIS (as I did and spent a few
305 # hours figuring out why it didn't work.
306 if(NOT EXISTS ${dependency_file})
307 file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n")
309 # Always include this file to force CMake to run again next
310 # invocation and rebuild the dependencies.
311 #message("including dependency_file = ${dependency_file}")
312 include(${dependency_file})
314 # Now we need to verify the existence of all the included files
315 # here. If they aren't there we need to just blank this variable and
316 # make the file regenerate again.
317 # if(DEFINED CUDA_NVCC_DEPEND)
318 # message("CUDA_NVCC_DEPEND set")
320 # message("CUDA_NVCC_DEPEND NOT set")
323 #message("CUDA_NVCC_DEPEND true")
324 foreach(f ${CUDA_NVCC_DEPEND})
325 #message("searching for ${f}")
327 #message("file ${f} not found")
328 set(CUDA_NVCC_DEPEND_REGENERATE TRUE)
331 else(CUDA_NVCC_DEPEND)
332 #message("CUDA_NVCC_DEPEND false")
333 # No dependencies, so regenerate the file.
334 set(CUDA_NVCC_DEPEND_REGENERATE TRUE)
335 endif(CUDA_NVCC_DEPEND)
337 #message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}")
338 # No incoming dependencies, so we need to generate them. Make the
339 # output depend on the dependency file itself, which should cause the
341 if(CUDA_NVCC_DEPEND_REGENERATE)
342 file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n")
343 endif(CUDA_NVCC_DEPEND_REGENERATE)
345 endmacro(CUDA_INCLUDE_NVCC_DEPENDENCIES)
347 ###############################################################################
348 ###############################################################################
349 # Setup variables' defaults
350 ###############################################################################
351 ###############################################################################
353 # Allow the user to specify if the device code is supposed to be 32 or 64 bit.
354 if(CMAKE_SIZEOF_VOID_P EQUAL 8)
355 set(CUDA_64_BIT_DEVICE_CODE_DEFAULT ON)
357 set(CUDA_64_BIT_DEVICE_CODE_DEFAULT OFF)
359 option(CUDA_64_BIT_DEVICE_CODE "Compile device code in 64 bit mode" ${CUDA_64_BIT_DEVICE_CODE_DEFAULT})
361 # Attach the build rule to the source file in VS. This option
362 option(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE "Attach the build rule to the CUDA source file. Enable only when the CUDA source file is added to at most one target." ON)
364 # Prints out extra information about the cuda file during compilation
365 option(CUDA_BUILD_CUBIN "Generate and parse .cubin files in Device mode." OFF)
367 # Set whether we are using emulation or device mode.
368 option(CUDA_BUILD_EMULATION "Build in Emulation mode" OFF)
370 # Where to put the generated output.
371 set(CUDA_GENERATED_OUTPUT_DIR "" CACHE PATH "Directory to put all the output files. If blank it will default to the CMAKE_CURRENT_BINARY_DIR")
373 # Parse HOST_COMPILATION mode.
374 option(CUDA_HOST_COMPILATION_CPP "Generated file extension" ON)
376 # Extra user settable flags
377 set(CUDA_NVCC_FLAGS "" CACHE STRING "Semi-colon delimit multiple arguments.")
379 # Propagate the host flags to the host compiler via -Xcompiler
380 option(CUDA_PROPAGATE_HOST_FLAGS "Propage C/CXX_FLAGS and friends to the host compiler via -Xcompile" ON)
382 # Specifies whether the commands used when compiling the .cu file will be printed out.
383 option(CUDA_VERBOSE_BUILD "Print out the commands run while compiling the CUDA source file. With the Makefile generator this defaults to VERBOSE variable specified on the command line, but can be forced on with this option." OFF)
386 CUDA_64_BIT_DEVICE_CODE
387 CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE
388 CUDA_GENERATED_OUTPUT_DIR
389 CUDA_HOST_COMPILATION_CPP
391 CUDA_PROPAGATE_HOST_FLAGS
394 # Makefile and similar generators don't define CMAKE_CONFIGURATION_TYPES, so we
395 # need to add another entry for the CMAKE_BUILD_TYPE. We also need to add the
396 # standerd set of 4 build types (Debug, MinSizeRel, Release, and RelWithDebInfo)
397 # for completeness. We need run this loop in order to accomodate the addition
398 # of extra configuration types. Duplicate entries will be removed by
400 set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo)
401 list(REMOVE_DUPLICATES CUDA_configuration_types)
402 foreach(config ${CUDA_configuration_types})
403 string(TOUPPER ${config} config_upper)
404 set(CUDA_NVCC_FLAGS_${config_upper} "" CACHE STRING "Semi-colon delimit multiple arguments.")
405 mark_as_advanced(CUDA_NVCC_FLAGS_${config_upper})
408 ###############################################################################
409 ###############################################################################
410 # Locate CUDA, Set Build Type, etc.
411 ###############################################################################
412 ###############################################################################
414 # Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed,
415 # if they have then clear the cache variables, so that will be detected again.
416 if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}")
417 unset(CUDA_NVCC_EXECUTABLE CACHE)
418 unset(CUDA_VERSION CACHE)
419 unset(CUDA_TOOLKIT_INCLUDE CACHE)
420 unset(CUDA_CUDART_LIBRARY CACHE)
421 unset(CUDA_CUDA_LIBRARY CACHE)
422 unset(CUDA_cublas_LIBRARY CACHE)
423 unset(CUDA_cublasemu_LIBRARY CACHE)
424 unset(CUDA_cufft_LIBRARY CACHE)
425 unset(CUDA_cufftemu_LIBRARY CACHE)
428 if(NOT "${CUDA_SDK_ROOT_DIR}" STREQUAL "${CUDA_SDK_ROOT_DIR_INTERNAL}")
429 # No specific variables to catch. Use this kind of code before calling
430 # find_package(CUDA) to clean up any variables that may depend on this path.
432 # unset(MY_SPECIAL_CUDA_SDK_INCLUDE_DIR CACHE)
433 # unset(MY_SPECIAL_CUDA_SDK_LIBRARY CACHE)
436 # Search for the cuda distribution.
437 if(NOT CUDA_TOOLKIT_ROOT_DIR)
439 # Search in the CUDA_BIN_PATH first.
440 find_path(CUDA_TOOLKIT_ROOT_DIR
442 PATHS ENV CUDA_BIN_PATH
443 DOC "Toolkit location."
446 # Now search default paths
447 find_path(CUDA_TOOLKIT_ROOT_DIR
451 DOC "Toolkit location."
454 if (CUDA_TOOLKIT_ROOT_DIR)
455 string(REGEX REPLACE "[/\\\\]?bin[64]*[/\\\\]?$" "" CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR})
456 # We need to force this back into the cache.
457 set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR} CACHE PATH "Toolkit location." FORCE)
458 endif(CUDA_TOOLKIT_ROOT_DIR)
459 if (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
460 if(CUDA_FIND_REQUIRED)
461 message(FATAL_ERROR "Specify CUDA_TOOLKIT_ROOT_DIR")
462 elseif(NOT CUDA_FIND_QUIETLY)
463 message("CUDA_TOOLKIT_ROOT_DIR not found or specified")
465 endif (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
466 endif (NOT CUDA_TOOLKIT_ROOT_DIR)
468 # CUDA_NVCC_EXECUTABLE
469 find_program(CUDA_NVCC_EXECUTABLE
471 PATHS "${CUDA_TOOLKIT_ROOT_DIR}/bin"
472 "${CUDA_TOOLKIT_ROOT_DIR}/bin64"
476 # Search default search paths, after we search our own set of paths.
477 find_program(CUDA_NVCC_EXECUTABLE nvcc)
478 mark_as_advanced(CUDA_NVCC_EXECUTABLE)
480 if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION)
481 # Compute the version.
482 execute_process (COMMAND ${CUDA_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT)
483 string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR ${NVCC_OUT})
484 string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR ${NVCC_OUT})
485 set(CUDA_VERSION "${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" CACHE STRING "Version of CUDA as computed from nvcc.")
486 mark_as_advanced(CUDA_VERSION)
489 # Always set this convenience variable
490 set(CUDA_VERSION_STRING "${CUDA_VERSION}")
492 # Here we need to determine if the version we found is acceptable. We will
493 # assume that is unless CUDA_FIND_VERSION_EXACT or CUDA_FIND_VERSION is
494 # specified. The presence of either of these options checks the version
495 # string and signals if the version is acceptable or not.
496 set(_cuda_version_acceptable TRUE)
498 if(CUDA_FIND_VERSION_EXACT AND NOT CUDA_VERSION VERSION_EQUAL CUDA_FIND_VERSION)
499 set(_cuda_version_acceptable FALSE)
502 if(CUDA_FIND_VERSION AND CUDA_VERSION VERSION_LESS CUDA_FIND_VERSION)
503 set(_cuda_version_acceptable FALSE)
506 if(NOT _cuda_version_acceptable)
507 set(_cuda_error_message "Requested CUDA version ${CUDA_FIND_VERSION}, but found unacceptable version ${CUDA_VERSION}")
508 if(CUDA_FIND_REQUIRED)
509 message("${_cuda_error_message}")
510 elseif(NOT CUDA_FIND_QUIETLY)
511 message("${_cuda_error_message}")
515 # CUDA_TOOLKIT_INCLUDE
516 find_path(CUDA_TOOLKIT_INCLUDE
517 device_functions.h # Header included in toolkit
518 PATHS "${CUDA_TOOLKIT_ROOT_DIR}/include"
522 # Search default search paths, after we search our own set of paths.
523 find_path(CUDA_TOOLKIT_INCLUDE device_functions.h)
524 mark_as_advanced(CUDA_TOOLKIT_INCLUDE)
526 # Set the user list of include dir to nothing to initialize it.
527 set (CUDA_NVCC_INCLUDE_ARGS_USER "")
528 set (CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE})
530 macro(FIND_LIBRARY_LOCAL_FIRST _var _names _doc)
531 if(CMAKE_SIZEOF_VOID_P EQUAL 8)
532 set(_cuda_64bit_lib_dir "${CUDA_TOOLKIT_ROOT_DIR}/lib64")
536 PATHS ${_cuda_64bit_lib_dir}
537 "${CUDA_TOOLKIT_ROOT_DIR}/lib"
542 # Search default search paths, after we search our own set of paths.
543 find_library(${_var} NAMES ${_names} DOC ${_doc})
547 find_library_local_first(CUDA_CUDART_LIBRARY cudart "\"cudart\" library")
548 set(CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY})
550 # We need to add the path to cudart to the linker using rpath, since the
551 # library name for the cuda libraries is prepended with @rpath.
552 get_filename_component(_cuda_path_to_cudart "${CUDA_CUDART_LIBRARY}" PATH)
553 if(_cuda_path_to_cudart)
554 list(APPEND CUDA_LIBRARIES -Wl,-rpath "-Wl,${_cuda_path_to_cudart}")
558 # 1.1 toolkit on linux doesn't appear to have a separate library on
560 find_library_local_first(CUDA_CUDA_LIBRARY cuda "\"cuda\" library (older versions only).")
562 # Add cuda library to the link line only if it is found.
563 if (CUDA_CUDA_LIBRARY)
564 set(CUDA_LIBRARIES ${CUDA_LIBRARIES} ${CUDA_CUDA_LIBRARY})
565 endif(CUDA_CUDA_LIBRARY)
572 #######################
573 # Look for some of the toolkit helper libraries
574 macro(FIND_CUDA_HELPER_LIBS _name)
575 find_library_local_first(CUDA_${_name}_LIBRARY ${_name} "\"${_name}\" library")
576 mark_as_advanced(CUDA_${_name}_LIBRARY)
577 endmacro(FIND_CUDA_HELPER_LIBS)
579 # Search for cufft and cublas libraries.
580 find_cuda_helper_libs(cufftemu)
581 find_cuda_helper_libs(cublasemu)
582 find_cuda_helper_libs(cufft)
583 find_cuda_helper_libs(cublas)
585 if (CUDA_BUILD_EMULATION)
586 set(CUDA_CUFFT_LIBRARIES ${CUDA_cufftemu_LIBRARY})
587 set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublasemu_LIBRARY})
589 set(CUDA_CUFFT_LIBRARIES ${CUDA_cufft_LIBRARY})
590 set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublas_LIBRARY})
593 ########################
594 # Look for the SDK stuff
595 find_path(CUDA_SDK_ROOT_DIR common/inc/cutil.h
596 "$ENV{NVSDKCUDA_ROOT}"
597 "[HKEY_LOCAL_MACHINE\\SOFTWARE\\NVIDIA Corporation\\Installed Products\\NVIDIA SDK 10\\Compute;InstallDir]"
598 "/Developer/GPU\ Computing/C"
601 # Keep the CUDA_SDK_ROOT_DIR first in order to be able to override the
602 # environment variables.
603 set(CUDA_SDK_SEARCH_PATH
604 "${CUDA_SDK_ROOT_DIR}"
605 "${CUDA_TOOLKIT_ROOT_DIR}/local/NVSDK0.2"
606 "${CUDA_TOOLKIT_ROOT_DIR}/NVSDK0.2"
607 "${CUDA_TOOLKIT_ROOT_DIR}/NV_CUDA_SDK"
608 "$ENV{HOME}/NVIDIA_CUDA_SDK"
609 "$ENV{HOME}/NVIDIA_CUDA_SDK_MACOSX"
613 # Example of how to find an include file from the CUDA_SDK_ROOT_DIR
615 # find_path(CUDA_CUT_INCLUDE_DIR
617 # PATHS ${CUDA_SDK_SEARCH_PATH}
618 # PATH_SUFFIXES "common/inc"
619 # DOC "Location of cutil.h"
622 # # Now search system paths
623 # find_path(CUDA_CUT_INCLUDE_DIR cutil.h DOC "Location of cutil.h")
625 # mark_as_advanced(CUDA_CUT_INCLUDE_DIR)
628 # Example of how to find a library in the CUDA_SDK_ROOT_DIR
630 # # cutil library is called cutil64 for 64 bit builds on windows. We don't want
631 # # to get these confused, so we are setting the name based on the word size of
634 # if(CMAKE_SIZEOF_VOID_P EQUAL 8)
635 # set(cuda_cutil_name cutil64)
636 # else(CMAKE_SIZEOF_VOID_P EQUAL 8)
637 # set(cuda_cutil_name cutil32)
638 # endif(CMAKE_SIZEOF_VOID_P EQUAL 8)
640 # find_library(CUDA_CUT_LIBRARY
641 # NAMES cutil ${cuda_cutil_name}
642 # PATHS ${CUDA_SDK_SEARCH_PATH}
643 # # The new version of the sdk shows up in common/lib, but the old one is in lib
644 # PATH_SUFFIXES "common/lib" "lib"
645 # DOC "Location of cutil library"
648 # # Now search system paths
649 # find_library(CUDA_CUT_LIBRARY NAMES cutil ${cuda_cutil_name} DOC "Location of cutil library")
650 # mark_as_advanced(CUDA_CUT_LIBRARY)
651 # set(CUDA_CUT_LIBRARIES ${CUDA_CUT_LIBRARY})
655 #############################
656 # Check for required components
659 set(CUDA_TOOLKIT_ROOT_DIR_INTERNAL "${CUDA_TOOLKIT_ROOT_DIR}" CACHE INTERNAL
660 "This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was set successfully." FORCE)
661 set(CUDA_SDK_ROOT_DIR_INTERNAL "${CUDA_SDK_ROOT_DIR}" CACHE INTERNAL
662 "This is the value of the last time CUDA_SDK_ROOT_DIR was set successfully." FORCE)
664 include(FindPackageHandleStandardArgs)
665 find_package_handle_standard_args(CUDA DEFAULT_MSG
666 CUDA_TOOLKIT_ROOT_DIR
670 _cuda_version_acceptable
675 ###############################################################################
676 ###############################################################################
678 ###############################################################################
679 ###############################################################################
681 ###############################################################################
682 # Add include directories to pass to the nvcc command.
683 macro(CUDA_INCLUDE_DIRECTORIES)
685 list(APPEND CUDA_NVCC_INCLUDE_ARGS_USER "-I${dir}")
686 endforeach(dir ${ARGN})
687 endmacro(CUDA_INCLUDE_DIRECTORIES)
690 ##############################################################################
691 cuda_find_helper_file(parse_cubin cmake)
692 cuda_find_helper_file(make2cmake cmake)
693 cuda_find_helper_file(run_nvcc cmake)
695 ##############################################################################
696 # Separate the OPTIONS out from the sources
698 macro(CUDA_GET_SOURCES_AND_OPTIONS _sources _cmake_options _options)
700 set( ${_cmake_options} )
702 set( _found_options FALSE )
704 if(arg STREQUAL "OPTIONS")
705 set( _found_options TRUE )
707 arg STREQUAL "WIN32" OR
708 arg STREQUAL "MACOSX_BUNDLE" OR
709 arg STREQUAL "EXCLUDE_FROM_ALL" OR
710 arg STREQUAL "STATIC" OR
711 arg STREQUAL "SHARED" OR
712 arg STREQUAL "MODULE"
714 list(APPEND ${_cmake_options} "${arg}")
716 if ( _found_options )
717 list(APPEND ${_options} "${arg}")
719 # Assume this is a file
720 list(APPEND ${_sources} "${arg}")
726 ##############################################################################
727 # Parse the OPTIONS from ARGN and set the variables prefixed by _option_prefix
729 macro(CUDA_PARSE_NVCC_OPTIONS _option_prefix)
732 # Determine if we are dealing with a perconfiguration flag
733 foreach(config ${CUDA_configuration_types})
734 string(TOUPPER ${config} config_upper)
735 if (arg STREQUAL "${config_upper}")
736 set( _found_config _${arg})
737 # Set arg to nothing to keep it from being processed further
743 list(APPEND ${_option_prefix}${_found_config} "${arg}")
748 ##############################################################################
749 # Helper to add the include directory for CUDA only once
750 function(CUDA_ADD_CUDA_INCLUDE_ONCE)
751 get_directory_property(_include_directories INCLUDE_DIRECTORIES)
753 if(_include_directories)
754 foreach(dir ${_include_directories})
755 if("${dir}" STREQUAL "${CUDA_INCLUDE_DIRS}")
761 include_directories(${CUDA_INCLUDE_DIRS})
765 function(CUDA_BUILD_SHARED_LIBRARY shared_flag)
766 set(cmake_args ${ARGN})
767 # If SHARED, MODULE, or STATIC aren't already in the list of arguments, then
768 # add SHARED or STATIC based on the value of BUILD_SHARED_LIBS.
769 list(FIND cmake_args SHARED _cuda_found_SHARED)
770 list(FIND cmake_args MODULE _cuda_found_MODULE)
771 list(FIND cmake_args STATIC _cuda_found_STATIC)
772 if( _cuda_found_SHARED GREATER -1 OR
773 _cuda_found_MODULE GREATER -1 OR
774 _cuda_found_STATIC GREATER -1)
775 set(_cuda_build_shared_libs)
777 if (BUILD_SHARED_LIBS)
778 set(_cuda_build_shared_libs SHARED)
780 set(_cuda_build_shared_libs STATIC)
783 set(${shared_flag} ${_cuda_build_shared_libs} PARENT_SCOPE)
786 ##############################################################################
787 # This helper macro populates the following variables and setups up custom
788 # commands and targets to invoke the nvcc compiler to generate C or PTX source
789 # dependant upon the format parameter. The compiler is invoked once with -M
790 # to generate a dependency file and a second time with -cuda or -ptx to generate
791 # a .cpp or .ptx file.
793 # cuda_target - Target name
794 # format - PTX or OBJ
795 # FILE1 .. FILEN - The remaining arguments are the sources to be wrapped.
796 # OPTIONS - Extra options to NVCC
798 # generated_files - List of generated files
799 ##############################################################################
800 ##############################################################################
802 macro(CUDA_WRAP_SRCS cuda_target format generated_files)
804 if( ${format} MATCHES "PTX" )
805 set( compile_to_ptx ON )
806 elseif( ${format} MATCHES "OBJ")
807 set( compile_to_ptx OFF )
809 message( FATAL_ERROR "Invalid format flag passed to CUDA_WRAP_SRCS: '${format}'. Use OBJ or PTX.")
812 # Set up all the command line flags here, so that they can be overriden on a per target basis.
816 # Emulation if the card isn't present.
817 if (CUDA_BUILD_EMULATION)
819 set(nvcc_flags ${nvcc_flags} --device-emulation -D_DEVICEEMU -g)
820 else(CUDA_BUILD_EMULATION)
821 # Device mode. No flags necessary.
822 endif(CUDA_BUILD_EMULATION)
824 if(CUDA_HOST_COMPILATION_CPP)
825 set(CUDA_C_OR_CXX CXX)
826 else(CUDA_HOST_COMPILATION_CPP)
827 if(CUDA_VERSION VERSION_LESS "3.0")
828 set(nvcc_flags ${nvcc_flags} --host-compilation C)
830 message(WARNING "--host-compilation flag is deprecated in CUDA version >= 3.0. Removing --host-compilation C flag" )
833 endif(CUDA_HOST_COMPILATION_CPP)
835 set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION})
837 if(CUDA_64_BIT_DEVICE_CODE)
838 set(nvcc_flags ${nvcc_flags} -m64)
840 set(nvcc_flags ${nvcc_flags} -m32)
843 # This needs to be passed in at this stage, because VS needs to fill out the
844 # value of VCInstallDir from within VS.
845 if(CMAKE_GENERATOR MATCHES "Visual Studio")
846 if( CMAKE_SIZEOF_VOID_P EQUAL 8 )
847 # Add nvcc flag for 64b Windows
848 set(ccbin_flags -D "\"CCBIN:PATH=$(VCInstallDir)bin\"" )
852 # Figure out which configure we will use and pass that in as an argument to
853 # the script. We need to defer the decision until compilation time, because
854 # for VS projects we won't know if we are making a debug or release build
856 if(CMAKE_GENERATOR MATCHES "Visual Studio")
857 set( CUDA_build_configuration "$(ConfigurationName)" )
859 set( CUDA_build_configuration "${CMAKE_BUILD_TYPE}")
862 # Initialize our list of includes with the user ones followed by the CUDA system ones.
863 set(CUDA_NVCC_INCLUDE_ARGS ${CUDA_NVCC_INCLUDE_ARGS_USER} "-I${CUDA_INCLUDE_DIRS}")
864 # Get the include directories for this directory and use them for our nvcc command.
865 get_directory_property(CUDA_NVCC_INCLUDE_DIRECTORIES INCLUDE_DIRECTORIES)
866 if(CUDA_NVCC_INCLUDE_DIRECTORIES)
867 foreach(dir ${CUDA_NVCC_INCLUDE_DIRECTORIES})
868 list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}")
872 # Reset these variables
873 set(CUDA_WRAP_OPTION_NVCC_FLAGS)
874 foreach(config ${CUDA_configuration_types})
875 string(TOUPPER ${config} config_upper)
876 set(CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper})
879 CUDA_GET_SOURCES_AND_OPTIONS(_cuda_wrap_sources _cuda_wrap_cmake_options _cuda_wrap_options ${ARGN})
880 CUDA_PARSE_NVCC_OPTIONS(CUDA_WRAP_OPTION_NVCC_FLAGS ${_cuda_wrap_options})
882 # Figure out if we are building a shared library. BUILD_SHARED_LIBS is
883 # respected in CUDA_ADD_LIBRARY.
884 set(_cuda_build_shared_libs FALSE)
886 list(FIND _cuda_wrap_cmake_options SHARED _cuda_found_SHARED)
887 list(FIND _cuda_wrap_cmake_options MODULE _cuda_found_MODULE)
888 if(_cuda_found_SHARED GREATER -1 OR _cuda_found_MODULE GREATER -1)
889 set(_cuda_build_shared_libs TRUE)
892 list(FIND _cuda_wrap_cmake_options STATIC _cuda_found_STATIC)
893 if(_cuda_found_STATIC GREATER -1)
894 set(_cuda_build_shared_libs FALSE)
898 if(_cuda_build_shared_libs)
899 # If we are setting up code for a shared library, then we need to add extra flags for
900 # compiling objects for shared libraries.
901 set(CUDA_HOST_SHARED_FLAGS ${CMAKE_SHARED_LIBRARY_${CUDA_C_OR_CXX}_FLAGS})
903 set(CUDA_HOST_SHARED_FLAGS)
905 # Only add the CMAKE_{C,CXX}_FLAGS if we are propagating host flags. We
906 # always need to set the SHARED_FLAGS, though.
907 if(CUDA_PROPAGATE_HOST_FLAGS)
908 set(CUDA_HOST_FLAGS "set(CMAKE_HOST_FLAGS ${CMAKE_${CUDA_C_OR_CXX}_FLAGS} ${CUDA_HOST_SHARED_FLAGS})")
910 set(CUDA_HOST_FLAGS "set(CMAKE_HOST_FLAGS ${CUDA_HOST_SHARED_FLAGS})")
913 set(CUDA_NVCC_FLAGS_CONFIG "# Build specific configuration flags")
914 # Loop over all the configuration types to generate appropriate flags for run_nvcc.cmake
915 foreach(config ${CUDA_configuration_types})
916 string(TOUPPER ${config} config_upper)
917 # CMAKE_FLAGS are strings and not lists. By not putting quotes around CMAKE_FLAGS
918 # we convert the strings to lists (like we want).
920 if(CUDA_PROPAGATE_HOST_FLAGS)
921 # nvcc chokes on -g3, so replace it with -g
922 if(CMAKE_COMPILER_IS_GNUCC)
923 string(REPLACE "-g3" "-g" _cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
925 set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
928 set(CUDA_HOST_FLAGS "${CUDA_HOST_FLAGS}\nset(CMAKE_HOST_FLAGS_${config_upper} ${_cuda_C_FLAGS})")
931 # Note that if we ever want CUDA_NVCC_FLAGS_<CONFIG> to be string (instead of a list
932 # like it is currently), we can remove the quotes around the
933 # ${CUDA_NVCC_FLAGS_${config_upper}} variable like the CMAKE_HOST_FLAGS_<CONFIG> variable.
934 set(CUDA_NVCC_FLAGS_CONFIG "${CUDA_NVCC_FLAGS_CONFIG}\nset(CUDA_NVCC_FLAGS_${config_upper} \"${CUDA_NVCC_FLAGS_${config_upper}};;${CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}}\")")
938 # Don't use any of the host compilation flags for PTX targets.
940 set(CUDA_NVCC_FLAGS_CONFIG)
943 # Get the list of definitions from the directory property
944 get_directory_property(CUDA_NVCC_DEFINITIONS COMPILE_DEFINITIONS)
945 if(CUDA_NVCC_DEFINITIONS)
946 foreach(_definition ${CUDA_NVCC_DEFINITIONS})
947 list(APPEND nvcc_flags "-D${_definition}")
951 if(_cuda_build_shared_libs)
952 list(APPEND nvcc_flags "-D${cuda_target}_EXPORTS")
955 # Determine output directory
956 if(CUDA_GENERATED_OUTPUT_DIR)
957 set(cuda_compile_output_dir "${CUDA_GENERATED_OUTPUT_DIR}")
959 set(cuda_compile_output_dir "${CMAKE_CURRENT_BINARY_DIR}")
962 # Reset the output variable
963 set(_cuda_wrap_generated_files "")
965 # Iterate over the macro arguments and create custom
966 # commands for all the .cu files.
967 foreach(file ${ARGN})
968 # Ignore any file marked as a HEADER_FILE_ONLY
969 get_source_file_property(_is_header ${file} HEADER_FILE_ONLY)
970 if(${file} MATCHES ".*\\.cu$" AND NOT _is_header)
972 # Add a custom target to generate a c or ptx file. ######################
974 get_filename_component( basename ${file} NAME )
976 set(generated_file_path "${cuda_compile_output_dir}")
977 set(generated_file_basename "${cuda_target}_generated_${basename}.ptx")
978 set(format_flag "-ptx")
979 file(MAKE_DIRECTORY "${cuda_compile_output_dir}")
980 else( compile_to_ptx )
981 set(generated_file_path "${cuda_compile_output_dir}/${CMAKE_CFG_INTDIR}")
982 set(generated_file_basename "${cuda_target}_generated_${basename}${generated_extension}")
983 set(format_flag "-c")
984 endif( compile_to_ptx )
986 # Set all of our file names. Make sure that whatever filenames that have
987 # generated_file_path in them get passed in through as a command line
988 # argument, so that the ${CMAKE_CFG_INTDIR} gets expanded at run time
989 # instead of configure time.
990 set(generated_file "${generated_file_path}/${generated_file_basename}")
991 set(cmake_dependency_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${generated_file_basename}.depend")
992 set(NVCC_generated_dependency_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${generated_file_basename}.NVCC-depend")
993 set(generated_cubin_file "${generated_file_path}/${generated_file_basename}.cubin.txt")
994 set(custom_target_script "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${generated_file_basename}.cmake")
996 # Setup properties for obj files:
997 if( NOT compile_to_ptx )
998 set_source_files_properties("${generated_file}"
1000 EXTERNAL_OBJECT true # This is an object file not to be compiled, but only be linked.
1004 # Don't add CMAKE_CURRENT_SOURCE_DIR if the path is already an absolute path.
1005 get_filename_component(file_path "${file}" PATH)
1006 if(IS_ABSOLUTE "${file_path}")
1007 set(source_file "${file}")
1009 set(source_file "${CMAKE_CURRENT_SOURCE_DIR}/${file}")
1012 # Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND #######
1013 cuda_include_nvcc_dependencies(${cmake_dependency_file})
1015 # Convience string for output ###########################################
1016 if(CUDA_BUILD_EMULATION)
1017 set(cuda_build_type "Emulation")
1018 else(CUDA_BUILD_EMULATION)
1019 set(cuda_build_type "Device")
1020 endif(CUDA_BUILD_EMULATION)
1022 # Build the NVCC made dependency file ###################################
1023 set(build_cubin OFF)
1024 if ( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN )
1025 if ( NOT compile_to_ptx )
1026 set ( build_cubin ON )
1027 endif( NOT compile_to_ptx )
1028 endif( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN )
1030 # Configure the build script
1031 configure_file("${CUDA_run_nvcc}" "${custom_target_script}" @ONLY)
1033 # So if a user specifies the same cuda file as input more than once, you
1034 # can have bad things happen with dependencies. Here we check an option
1035 # to see if this is the behavior they want.
1036 if(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE)
1037 set(main_dep MAIN_DEPENDENCY ${source_file})
1039 set(main_dep DEPENDS ${source_file})
1042 if(CUDA_VERBOSE_BUILD)
1043 set(verbose_output ON)
1044 elseif(CMAKE_GENERATOR MATCHES "Makefiles")
1045 set(verbose_output "$(VERBOSE)")
1047 set(verbose_output OFF)
1050 # Create up the comment string
1051 file(RELATIVE_PATH generated_file_relative_path "${CMAKE_BINARY_DIR}" "${generated_file}")
1053 set(cuda_build_comment_string "Building NVCC ptx file ${generated_file_relative_path}")
1055 set(cuda_build_comment_string "Building NVCC (${cuda_build_type}) object ${generated_file_relative_path}")
1058 # Build the generated file and dependency file ##########################
1060 OUTPUT ${generated_file}
1061 # These output files depend on the source_file and the contents of cmake_dependency_file
1063 DEPENDS ${CUDA_NVCC_DEPEND}
1064 DEPENDS ${custom_target_script}
1065 # Make sure the output directory exists before trying to write to it.
1066 COMMAND ${CMAKE_COMMAND} -E make_directory "${generated_file_path}"
1067 COMMAND ${CMAKE_COMMAND} ARGS
1068 -D verbose:BOOL=${verbose_output}
1070 -D build_configuration:STRING=${CUDA_build_configuration}
1071 -D "generated_file:STRING=${generated_file}"
1072 -D "generated_cubin_file:STRING=${generated_cubin_file}"
1073 -P "${custom_target_script}"
1074 COMMENT "${cuda_build_comment_string}"
1077 # Make sure the build system knows the file is generated.
1078 set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE)
1080 # Don't add the object file to the list of generated files if we are using
1081 # visual studio and we are attaching the build rule to the cuda file. VS
1082 # will add our object file to the linker automatically for us.
1083 set(cuda_add_generated_file TRUE)
1085 if(NOT compile_to_ptx AND CMAKE_GENERATOR MATCHES "Visual Studio" AND CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE)
1086 # Visual Studio 8 crashes when you close the solution when you don't add the object file.
1087 if(NOT CMAKE_GENERATOR MATCHES "Visual Studio 8")
1088 #message("Not adding ${generated_file}")
1089 set(cuda_add_generated_file FALSE)
1093 if(cuda_add_generated_file)
1094 list(APPEND _cuda_wrap_generated_files ${generated_file})
1097 # Add the other files that we want cmake to clean on a cleanup ##########
1098 list(APPEND CUDA_ADDITIONAL_CLEAN_FILES "${cmake_dependency_file}")
1099 list(REMOVE_DUPLICATES CUDA_ADDITIONAL_CLEAN_FILES)
1100 set(CUDA_ADDITIONAL_CLEAN_FILES ${CUDA_ADDITIONAL_CLEAN_FILES} CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.")
1102 endif(${file} MATCHES ".*\\.cu$" AND NOT _is_header)
1105 # Set the return parameter
1106 set(${generated_files} ${_cuda_wrap_generated_files})
1107 endmacro(CUDA_WRAP_SRCS)
1110 ###############################################################################
1111 ###############################################################################
1113 ###############################################################################
1114 ###############################################################################
1115 macro(CUDA_ADD_LIBRARY cuda_target)
1117 CUDA_ADD_CUDA_INCLUDE_ONCE()
1119 # Separate the sources from the options
1120 CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
1121 CUDA_BUILD_SHARED_LIBRARY(_cuda_shared_flag ${ARGN})
1122 # Create custom commands and targets for each file.
1123 CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources}
1124 ${_cmake_options} ${_cuda_shared_flag}
1125 OPTIONS ${_options} )
1128 add_library(${cuda_target} ${_cmake_options}
1133 target_link_libraries(${cuda_target}
1137 # We need to set the linker language based on what the expected generated file
1138 # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP.
1139 set_target_properties(${cuda_target}
1141 LINKER_LANGUAGE ${CUDA_C_OR_CXX}
1144 endmacro(CUDA_ADD_LIBRARY cuda_target)
1147 ###############################################################################
1148 ###############################################################################
1150 ###############################################################################
1151 ###############################################################################
1152 macro(CUDA_ADD_EXECUTABLE cuda_target)
1154 CUDA_ADD_CUDA_INCLUDE_ONCE()
1156 # Separate the sources from the options
1157 CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
1158 # Create custom commands and targets for each file.
1159 CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} )
1162 add_executable(${cuda_target} ${_cmake_options}
1167 target_link_libraries(${cuda_target}
1171 # We need to set the linker language based on what the expected generated file
1172 # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP.
1173 set_target_properties(${cuda_target}
1175 LINKER_LANGUAGE ${CUDA_C_OR_CXX}
1178 endmacro(CUDA_ADD_EXECUTABLE cuda_target)
1181 ###############################################################################
1182 ###############################################################################
1184 ###############################################################################
1185 ###############################################################################
1186 macro(CUDA_COMPILE generated_files)
1188 # Separate the sources from the options
1189 CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
1190 # Create custom commands and targets for each file.
1191 CUDA_WRAP_SRCS( cuda_compile OBJ _generated_files ${_sources} ${_cmake_options}
1192 OPTIONS ${_options} )
1194 set( ${generated_files} ${_generated_files})
1196 endmacro(CUDA_COMPILE)
1199 ###############################################################################
1200 ###############################################################################
1202 ###############################################################################
1203 ###############################################################################
1204 macro(CUDA_COMPILE_PTX generated_files)
1206 # Separate the sources from the options
1207 CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
1208 # Create custom commands and targets for each file.
1209 CUDA_WRAP_SRCS( cuda_compile_ptx PTX _generated_files ${_sources} ${_cmake_options}
1210 OPTIONS ${_options} )
1212 set( ${generated_files} ${_generated_files})
1214 endmacro(CUDA_COMPILE_PTX)
1216 ###############################################################################
1217 ###############################################################################
1218 # CUDA ADD CUFFT TO TARGET
1219 ###############################################################################
1220 ###############################################################################
1221 macro(CUDA_ADD_CUFFT_TO_TARGET target)
1222 if (CUDA_BUILD_EMULATION)
1223 target_link_libraries(${target} ${CUDA_cufftemu_LIBRARY})
1225 target_link_libraries(${target} ${CUDA_cufft_LIBRARY})
1229 ###############################################################################
1230 ###############################################################################
1231 # CUDA ADD CUBLAS TO TARGET
1232 ###############################################################################
1233 ###############################################################################
1234 macro(CUDA_ADD_CUBLAS_TO_TARGET target)
1235 if (CUDA_BUILD_EMULATION)
1236 target_link_libraries(${target} ${CUDA_cublasemu_LIBRARY})
1238 target_link_libraries(${target} ${CUDA_cublas_LIBRARY})
1242 ###############################################################################
1243 ###############################################################################
1244 # CUDA BUILD CLEAN TARGET
1245 ###############################################################################
1246 ###############################################################################
1247 macro(CUDA_BUILD_CLEAN_TARGET)
1248 # Call this after you add all your CUDA targets, and you will get a convience
1249 # target. You should also make clean after running this target to get the
1250 # build system to generate all the code again.
1252 set(cuda_clean_target_name clean_cuda_depends)
1253 if (CMAKE_GENERATOR MATCHES "Visual Studio")
1254 string(TOUPPER ${cuda_clean_target_name} cuda_clean_target_name)
1256 add_custom_target(${cuda_clean_target_name}
1257 COMMAND ${CMAKE_COMMAND} -E remove ${CUDA_ADDITIONAL_CLEAN_FILES})
1259 # Clear out the variable, so the next time we configure it will be empty.
1260 # This is useful so that the files won't persist in the list after targets
1261 # have been removed.
1262 set(CUDA_ADDITIONAL_CLEAN_FILES "" CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.")
1263 endmacro(CUDA_BUILD_CLEAN_TARGET)