1 # - Tools for building CUDA C files: libraries and build dependencies.
\r
2 # This script locates the Nvidia Compute Unified Driver Architecture (CUDA)
\r
3 # tools. It should work on linux, windows, and mac and should be reasonably
\r
4 # up to date with cuda releases.
\r
6 # This script makes use of the standard find_package arguments of <VERSION>,
\r
7 # REQUIRED and QUIET. CUDA_FOUND will report if an acceptable version of CUDA
\r
10 # The script will prompt the user to specify CUDA_TOOLKIT_ROOT_DIR if the prefix
\r
11 # cannot be determined by the location of nvcc in the system path and REQUIRED
\r
12 # is specified to find_package(). To use a different installed version of the
\r
13 # toolkit set the environment variable CUDA_BIN_PATH before running cmake
\r
14 # (e.g. CUDA_BIN_PATH=/usr/local/cuda1.0 instead of the default
\r
15 # /usr/local/cuda) or set CUDA_TOOLKIT_ROOT_DIR after configuring.
\r
17 # It might be necessary to set CUDA_TOOLKIT_ROOT_DIR manually on certain
\r
18 # platforms, or to use a cuda runtime not installed in the default location. In
\r
19 # newer versions of the toolkit the cuda library is included with the graphics
\r
20 # driver- be sure that the driver version matches what is needed by the cuda
\r
23 # Set CUDA_BUILD_EMULATION to ON for Emulation mode. Defaults to OFF (device
\r
24 # mode). -D_DEVICEEMU is defined for CUDA C files when CUDA_BUILD_EMULATION is
\r
27 # Set CUDA_HOST_COMPILATION_CPP to OFF for C compilation of host code. Default
\r
30 # Set CUDA_BUILD_CUBIN to "ON" or "OFF" to enable and extra compilation pass
\r
31 # with the -cubin option in Device mode. The output is parsed and register,
\r
32 # shared memory usage is printed during build. Default OFF.
\r
34 # Set CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE to ON if you want the custom build
\r
35 # rule to be attached to the source file in Visual Studio. Defaults to ON.
\r
36 # Turn OFF if you add the same cuda file to multiple targets.
\r
38 # This allows the user to build the target from the CUDA file, however bad
\r
39 # things can happen if the CUDA source file is added to multiple targets. When
\r
40 # performing parallel builds it is possible for the custom build command to be
\r
41 # run more than once and in parallel causing cryptic build errors. This is
\r
42 # because VS runs the rules for every source file in the target, and a source
\r
43 # can have only one rule no matter how many projects it is added to. Therefore,
\r
44 # the rule assigned to the source file really only applies to one target you get
\r
45 # clashes when it is run from multiple targets. Eventually everything will get
\r
46 # built, but if the user is unaware of this behavior, there may be confusion.
\r
47 # It would be nice if we could detect the reuse of source files across multiple
\r
48 # targets and turn the option off for the user, but no good solution could be
\r
51 # Set CUDA_64_BIT_DEVICE_CODE to ON to compile for 64 bit devices. Defaults to
\r
52 # match host bit size. Note that making this different than the host code when
\r
53 # generating C files from CUDA code just won't work, because size_t gets defined
\r
54 # by nvcc in the generated source. If you compile to PTX and then load the file
\r
55 # yourself, you can mix bit sizes between device and host.
\r
57 # Set CUDA_VERBOSE_BUILD to ON to see all the commands used when building the
\r
58 # CUDA file. When using a Makefile generator the value defaults to VERBOSE (run
\r
59 # make VERBOSE=1 to see output). You can override this by setting
\r
60 # CUDA_VERBOSE_BUILD to ON.
\r
62 # Set CUDA_GENERATED_OUTPUT_DIR to the path you wish to have the generated files
\r
63 # placed. If it is blank output files will be placed in
\r
64 # CMAKE_CURRENT_BINARY_DIR. Intermediate files will always be placed in
\r
65 # CMAKE_CURRENT_BINARY_DIR.
\r
67 # The script creates the following macros:
\r
69 # CUDA_INCLUDE_DIRECTORIES( path0 path1 ... )
\r
70 # -- Sets the directories that should be passed to nvcc
\r
71 # (e.g. nvcc -Ipath0 -Ipath1 ... ). These paths usually contain other .cu
\r
74 # CUDA_ADD_LIBRARY( cuda_target file0 file1 ...
\r
75 # [STATIC | SHARED | MODULE] [EXCLUDE_FROM_ALL] [OPTIONS ...] )
\r
76 # -- Creates a shared library "cuda_target" which contains all of the source
\r
77 # (*.c, *.cc, etc.) specified and all of the nvcc'ed .cu files specified.
\r
78 # All of the specified source files and generated .cpp files are compiled
\r
79 # using the standard CMake compiler, so the normal INCLUDE_DIRECTORIES,
\r
80 # LINK_DIRECTORIES, and TARGET_LINK_LIBRARIES can be used to affect their
\r
81 # build and link. In addition CUDA_INCLUDE_DIRS is added automatically
\r
82 # added to include_directories().
\r
84 # CUDA_ADD_EXECUTABLE( cuda_target file0 file1 ...
\r
85 # [WIN32] [MACOSX_BUNDLE] [EXCLUDE_FROM_ALL] [OPTIONS ...] )
\r
86 # -- Same as CUDA_ADD_LIBRARY except that an exectuable is created.
\r
88 # CUDA_COMPILE( generated_files file0 file1 ... [STATIC | SHARED | MODULE]
\r
90 # -- Returns a list of generated files from the input source files to be used
\r
91 # with ADD_LIBRARY or ADD_EXECUTABLE.
\r
93 # CUDA_COMPILE_PTX( generated_files file0 file1 ... [OPTIONS ...] )
\r
94 # -- Returns a list of PTX files generated from the input source files.
\r
96 # CUDA_WRAP_SRCS ( cuda_target format generated_files file0 file1 ...
\r
97 # [STATIC | SHARED | MODULE] [OPTIONS ...] )
\r
98 # -- This is where all the magic happens. CUDA_ADD_EXECUTABLE,
\r
99 # CUDA_ADD_LIBRARY, CUDA_COMPILE, and CUDA_COMPILE_PTX all call this
\r
100 # function under the hood.
\r
102 # Given the list of files (file0 file1 ... fileN) this macro generates
\r
103 # custom commands that generate either PTX or linkable objects (use "PTX" or
\r
104 # "OBJ" for the format argument to switch. Files that don't end with .cu or
\r
105 # have the HEADER_FILE_ONLY property are ignored.
\r
107 # The arguments passed in after OPTIONS are extra command line options to
\r
108 # give to NVCC. You can also specify per configuration options by
\r
109 # specifying the name of the configuration followed by the options. General
\r
110 # options must preceed configuration specific options. Not all
\r
111 # configurations need to be specified, only the ones provided will be used.
\r
113 # OPTIONS -DFLAG=2 "-DFLAG_OTHER=space in flag"
\r
115 # RELEASE --use_fast_math
\r
116 # RELWITHDEBINFO --use_fast_math;-g
\r
117 # MINSIZEREL --use_fast_math
\r
119 # For certain configurations (namely VS generating object files with
\r
120 # CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE set to ON), no generated file will
\r
121 # be produced for the given cuda file. This is because when you add the
\r
122 # cuda file to Visual Studio it knows that this file produces and will link
\r
123 # in the resulting object file automatically.
\r
125 # This script also looks at optional arguments STATIC, SHARED, or MODULE to
\r
126 # override the behavior specified by the value of the CMake variable
\r
127 # BUILD_SHARED_LIBS. See BUILD_SHARED_LIBS below for more details.
\r
129 # This script will also generate a separate cmake script that is used at
\r
130 # build time to invoke nvcc. This is for serveral reasons.
\r
132 # 1. nvcc can return negative numbers as return values which confuses
\r
133 # Visual Studio into thinking that the command succeeded. The script now
\r
134 # checks the error codes and produces errors when there was a problem
\r
136 # 2. nvcc has been known to not delete intermediate results when it
\r
137 # encounters problems. The build rules then don't complete, because there
\r
138 # exists a partially written output file. The script now deletes the
\r
139 # output files if there was an error.
\r
141 # 3. By putting all the options that affect the build into a file and then
\r
142 # make the build rule dependent on the file, when the options change the
\r
143 # output files will be regenerated.
\r
145 # CUDA_ADD_CUFFT_TO_TARGET( cuda_target )
\r
146 # -- Adds the cufft library to the target. Handles whether you are in
\r
147 # emulation mode or not.
\r
149 # CUDA_ADD_CUBLAS_TO_TARGET( cuda_target )
\r
150 # -- Adds the cublas library to the target. Handles whether you are in
\r
151 # emulation mode or not.
\r
153 # CUDA_BUILD_CLEAN_TARGET()
\r
154 # -- Creates a convience target that deletes all the dependency files
\r
155 # generated. You should make clean after running this target to ensure the
\r
156 # dependency files get regenerated.
\r
158 # The script defines the following variables:
\r
160 # ( Note CUDA_ADD_* macros setup cuda/cut library dependencies automatically.
\r
161 # These variables are only needed if a cuda API call must be made from code in
\r
162 # a outside library or executable. )
\r
164 # CUDA_VERSION_MAJOR -- The major version of cuda as reported by nvcc.
\r
165 # CUDA_VERSION_MINOR -- The minor version.
\r
167 # CUDA_VERSION_STRING -- CUDA_VERSION_MAJOR.CUDA_VERSION_MINOR
\r
169 # CUDA_INCLUDE_DIRS -- Include directory for cuda headers. Added automatically
\r
170 # for CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY.
\r
171 # CUDA_LIBRARIES -- Cuda RT library.
\r
172 # CUDA_CUT_INCLUDE_DIR -- Include directory for cuda SDK headers (cutil.h).
\r
173 # CUDA_CUT_LIBRARIES -- SDK libraries.
\r
174 # CUDA_NVCC_FLAGS -- Additional NVCC command line arguments. NOTE:
\r
175 # multiple arguments must be semi-colon delimited
\r
176 # e.g. --compiler-options;-Wall
\r
177 # CUDA_NVCC_FLAGS_<CONFIG> -- Confugration specific flags for NVCC.
\r
178 # CUDA_CUFFT_LIBRARIES -- Device or emulation library for the Cuda FFT
\r
179 # implementation (alternative to:
\r
180 # CUDA_ADD_CUFFT_TO_TARGET macro)
\r
181 # CUDA_CUBLAS_LIBRARIES -- Device or emulation library for the Cuda BLAS
\r
182 # implementation (alterative to:
\r
183 # CUDA_ADD_CUBLAS_TO_TARGET macro).
\r
186 # The script generates object files by default. In order to facilitate this,
\r
187 # the script makes use of the CMAKE_{C,CXX}_FLAGS along with their configuration
\r
188 # dependent counterparts (i.e. CMAKE_C_FLAGS_DEBUG). These flags are passed
\r
189 # through nvcc to the native compiler. In addition, on some systems special
\r
190 # flags are added for building objects intended for shared libraries. FindCUDA
\r
191 # make use of the CMake variable BUILD_SHARED_LIBS and the usual STATIC, SHARED,
\r
192 # and MODULE arguments to determine if these flags should be used. Please set
\r
193 # BUILD_SHARED_LIBS or pass in STATIC, SHARED, or MODULE according to how the
\r
194 # objects are to be used before calling CUDA_ADD_LIBRARY. A preprocessor macro,
\r
195 # <target_name>_EXPORTS is defined when BUILD_SHARED_LIBS is defined. In
\r
196 # addition, flags passed into add_definitions with -D or /D are passed along to
\r
199 # Files with the HEADER_FILE_ONLY property set will not be compiled.
\r
201 # James Bigler, NVIDIA Corp
\r
202 # Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
\r
204 # Copyright (c) 2008-2009
\r
207 # Copyright (c) 2007-2009
\r
208 # Scientific Computing and Imaging Institute, University of Utah
\r
210 # This code is licensed under the MIT License. See the FindCUDA.cmake script
\r
211 # for the text of the license.
\r
215 # License for the specific language governing rights and limitations under
\r
216 # Permission is hereby granted, free of charge, to any person obtaining a
\r
217 # copy of this software and associated documentation files (the "Software"),
\r
218 # to deal in the Software without restriction, including without limitation
\r
219 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
\r
220 # and/or sell copies of the Software, and to permit persons to whom the
\r
221 # Software is furnished to do so, subject to the following conditions:
\r
223 # The above copyright notice and this permission notice shall be included
\r
224 # in all copies or substantial portions of the Software.
\r
226 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
\r
227 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
\r
228 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
\r
229 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
\r
230 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
\r
231 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
\r
232 # DEALINGS IN THE SOFTWARE.
\r
234 ###############################################################################
\r
238 # We need to have at least this version to support the VERSION_LESS argument to 'if' (2.6.2) and unset (2.6.3)
\r
240 cmake_minimum_required(VERSION 2.6.3)
\r
243 # This macro helps us find the location of helper files we will need the full path to
\r
244 macro(CUDA_FIND_HELPER_FILE _name _extension)
\r
245 set(_full_name "${_name}.${_extension}")
\r
246 # CMAKE_CURRENT_LIST_FILE contains the full path to the file currently being
\r
247 # processed. Using this variable, we can pull out the current path, and
\r
248 # provide a way to get access to the other files we need local to here.
\r
249 get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
\r
250 find_file(CUDA_${_name} ${_full_name} PATHS ${CMAKE_CURRENT_LIST_DIR}/FindCUDA NO_DEFAULT_PATH)
\r
251 if(NOT CUDA_${_name})
\r
252 set(error_message "${_full_name} not found in CMAKE_MODULE_PATH")
\r
253 if(CUDA_FIND_REQUIRED)
\r
254 message(FATAL_ERROR "${error_message}")
\r
255 else(CUDA_FIND_REQUIRED)
\r
256 if(NOT CUDA_FIND_QUIETLY)
\r
257 message(STATUS "${error_message}")
\r
258 endif(NOT CUDA_FIND_QUIETLY)
\r
259 endif(CUDA_FIND_REQUIRED)
\r
260 endif(NOT CUDA_${_name})
\r
261 # Set this variable as internal, so the user isn't bugged with it.
\r
262 set(CUDA_${_name} ${CUDA_${_name}} CACHE INTERNAL "Location of ${_full_name}" FORCE)
\r
263 endmacro(CUDA_FIND_HELPER_FILE)
\r
265 #####################################################################
\r
266 ## CUDA_INCLUDE_NVCC_DEPENDENCIES
\r
269 # So we want to try and include the dependency file if it exists. If
\r
270 # it doesn't exist then we need to create an empty one, so we can
\r
273 # If it does exist, then we need to check to see if all the files it
\r
274 # depends on exist. If they don't then we should clear the dependency
\r
275 # file and regenerate it later. This covers the case where a header
\r
276 # file has disappeared or moved.
\r
278 macro(CUDA_INCLUDE_NVCC_DEPENDENCIES dependency_file)
\r
279 set(CUDA_NVCC_DEPEND)
\r
280 set(CUDA_NVCC_DEPEND_REGENERATE FALSE)
\r
283 # Include the dependency file. Create it first if it doesn't exist . The
\r
284 # INCLUDE puts a dependency that will force CMake to rerun and bring in the
\r
285 # new info when it changes. DO NOT REMOVE THIS (as I did and spent a few
\r
286 # hours figuring out why it didn't work.
\r
287 if(NOT EXISTS ${dependency_file})
\r
288 file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n")
\r
290 # Always include this file to force CMake to run again next
\r
291 # invocation and rebuild the dependencies.
\r
292 #message("including dependency_file = ${dependency_file}")
\r
293 include(${dependency_file})
\r
295 # Now we need to verify the existence of all the included files
\r
296 # here. If they aren't there we need to just blank this variable and
\r
297 # make the file regenerate again.
\r
298 # if(DEFINED CUDA_NVCC_DEPEND)
\r
299 # message("CUDA_NVCC_DEPEND set")
\r
301 # message("CUDA_NVCC_DEPEND NOT set")
\r
303 if(CUDA_NVCC_DEPEND)
\r
304 #message("CUDA_NVCC_DEPEND true")
\r
305 foreach(f ${CUDA_NVCC_DEPEND})
\r
306 #message("searching for ${f}")
\r
307 if(NOT EXISTS ${f})
\r
308 #message("file ${f} not found")
\r
309 set(CUDA_NVCC_DEPEND_REGENERATE TRUE)
\r
312 else(CUDA_NVCC_DEPEND)
\r
313 #message("CUDA_NVCC_DEPEND false")
\r
314 # No dependencies, so regenerate the file.
\r
315 set(CUDA_NVCC_DEPEND_REGENERATE TRUE)
\r
316 endif(CUDA_NVCC_DEPEND)
\r
318 #message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}")
\r
319 # No incoming dependencies, so we need to generate them. Make the
\r
320 # output depend on the dependency file itself, which should cause the
\r
322 if(CUDA_NVCC_DEPEND_REGENERATE)
\r
323 file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n")
\r
324 endif(CUDA_NVCC_DEPEND_REGENERATE)
\r
326 endmacro(CUDA_INCLUDE_NVCC_DEPENDENCIES)
\r
328 ###############################################################################
\r
329 ###############################################################################
\r
330 # Setup default variables
\r
331 ###############################################################################
\r
332 ###############################################################################
\r
334 # Set whether we are using emulation or device mode.
\r
335 option(CUDA_BUILD_EMULATION "Build in Emulation mode" OFF)
\r
336 # Parse HOST_COMPILATION mode.
\r
337 option(CUDA_HOST_COMPILATION_CPP "Generated file extension" ON)
\r
338 # Allow the user to specify if the device code is supposed to be 32 or 64 bit.
\r
339 if(CMAKE_SIZEOF_VOID_P EQUAL 8)
\r
340 set(CUDA_64_BIT_DEVICE_CODE_DEFAULT ON)
\r
342 set(CUDA_64_BIT_DEVICE_CODE_DEFAULT OFF)
\r
344 option(CUDA_64_BIT_DEVICE_CODE "Compile device code in 64 bit mode" ${CUDA_64_BIT_DEVICE_CODE_DEFAULT})
\r
345 # Prints out extra information about the cuda file during compilation
\r
346 option(CUDA_BUILD_CUBIN "Generate and parse .cubin files in Device mode." OFF)
\r
347 # Extra user settable flags
\r
348 set(CUDA_NVCC_FLAGS "" CACHE STRING "Semi-colon delimit multiple arguments.")
\r
349 # Attach the build rule to the source file in VS. This option
\r
350 option(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE "Attach the build rule to the CUDA source file. Enable only when the CUDA source file is added to at most one target." ON)
\r
351 # Specifies whether the commands used when compiling the .cu file will be printed out.
\r
352 option(CUDA_VERBOSE_BUILD "Print out the commands run while compiling the CUDA source file. With the Makefile generator this defaults to VERBOSE variable specified on the command line, but can be forced on with this option." OFF)
\r
353 # Where to put the generated output.
\r
354 set(CUDA_GENERATED_OUTPUT_DIR "" CACHE PATH "Directory to put all the output files. If blank it will default to the CMAKE_CURRENT_BINARY_DIR")
\r
356 CUDA_HOST_COMPILATION_CPP
\r
357 CUDA_64_BIT_DEVICE_CODE
\r
359 CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE
\r
360 CUDA_GENERATED_OUTPUT_DIR
\r
363 # Makefile and similar generators don't define CMAKE_CONFIGURATION_TYPES, so we
\r
364 # need to add another entry for the CMAKE_BUILD_TYPE. We also need to add the
\r
365 # standerd set of 4 build types (Debug, MinSizeRel, Release, and RelWithDebInfo)
\r
366 # for completeness. We need run this loop in order to accomodate the addition
\r
367 # of extra configuration types. Duplicate entries will be removed by
\r
368 # REMOVE_DUPLICATES.
\r
369 set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo)
\r
370 list(REMOVE_DUPLICATES CUDA_configuration_types)
\r
371 foreach(config ${CUDA_configuration_types})
\r
372 string(TOUPPER ${config} config_upper)
\r
373 set(CUDA_NVCC_FLAGS_${config_upper} "" CACHE STRING "Semi-colon delimit multiple arguments.")
\r
374 mark_as_advanced(CUDA_NVCC_FLAGS_${config_upper})
\r
377 ###############################################################################
\r
378 ###############################################################################
\r
379 # Locate CUDA, Set Build Type, etc.
\r
380 ###############################################################################
\r
381 ###############################################################################
\r
383 # Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed,
\r
384 # if they have then clear the cache variables, so that will be detected again.
\r
385 if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}")
\r
386 unset(CUDA_NVCC_EXECUTABLE CACHE)
\r
387 unset(CUDA_VERSION CACHE)
\r
388 unset(CUDA_TOOLKIT_INCLUDE CACHE)
\r
389 unset(CUDA_CUDART_LIBRARY CACHE)
\r
390 unset(CUDA_CUDA_LIBRARY CACHE)
\r
391 unset(CUDA_cublas_LIBRARY CACHE)
\r
392 unset(CUDA_cublasemu_LIBRARY CACHE)
\r
393 unset(CUDA_cufft_LIBRARY CACHE)
\r
394 unset(CUDA_cufftemu_LIBRARY CACHE)
\r
397 if(NOT "${CUDA_SDK_ROOT_DIR}" STREQUAL "${CUDA_SDK_ROOT_DIR_INTERNAL}")
\r
398 unset(CUDA_CUT_INCLUDE_DIR CACHE)
\r
399 unset(CUDA_CUT_LIBRARY CACHE)
\r
402 # Search for the cuda distribution.
\r
403 if(NOT CUDA_TOOLKIT_ROOT_DIR)
\r
405 # Search in the CUDA_BIN_PATH first.
\r
406 find_path(CUDA_TOOLKIT_ROOT_DIR
\r
407 NAMES nvcc nvcc.exe
\r
408 PATHS ENV CUDA_BIN_PATH
\r
409 DOC "Toolkit location."
\r
412 # Now search default paths
\r
413 find_path(CUDA_TOOLKIT_ROOT_DIR
\r
414 NAMES nvcc nvcc.exe
\r
415 PATHS /usr/local/bin
\r
416 /usr/local/cuda/bin
\r
417 DOC "Toolkit location."
\r
420 if (CUDA_TOOLKIT_ROOT_DIR)
\r
421 string(REGEX REPLACE "[/\\\\]?bin[64]*[/\\\\]?$" "" CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR})
\r
422 # We need to force this back into the cache.
\r
423 set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR} CACHE PATH "Toolkit location." FORCE)
\r
424 endif(CUDA_TOOLKIT_ROOT_DIR)
\r
425 if (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
\r
426 if(CUDA_FIND_REQUIRED)
\r
427 message(FATAL_ERROR "Specify CUDA_TOOLKIT_ROOT_DIR")
\r
428 elseif(NOT CUDA_FIND_QUIETLY)
\r
429 message("CUDA_TOOLKIT_ROOT_DIR not found or specified")
\r
431 endif (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
\r
432 endif (NOT CUDA_TOOLKIT_ROOT_DIR)
\r
434 # CUDA_NVCC_EXECUTABLE
\r
435 find_program(CUDA_NVCC_EXECUTABLE
\r
437 PATHS "${CUDA_TOOLKIT_ROOT_DIR}/bin64"
\r
438 "${CUDA_TOOLKIT_ROOT_DIR}/bin"
\r
442 # Search default search paths, after we search our own set of paths.
\r
443 find_program(CUDA_NVCC_EXECUTABLE nvcc)
\r
444 mark_as_advanced(CUDA_NVCC_EXECUTABLE)
\r
446 if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION)
\r
447 # Compute the version.
\r
448 exec_program(${CUDA_NVCC_EXECUTABLE} ARGS "--version" OUTPUT_VARIABLE NVCC_OUT)
\r
449 string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR ${NVCC_OUT})
\r
450 string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR ${NVCC_OUT})
\r
451 set(CUDA_VERSION "${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" CACHE STRING "Version of CUDA as computed from nvcc.")
\r
452 mark_as_advanced(CUDA_VERSION)
\r
455 # Always set this convenience variable
\r
456 set(CUDA_VERSION_STRING "${CUDA_VERSION}")
\r
458 # Here we need to determine if the version we found is acceptable. We will
\r
459 # assume that is unless CUDA_FIND_VERSION_EXACT or CUDA_FIND_VERSION is
\r
460 # specified. The presence of either of these options checks the version
\r
461 # string and signals if the version is acceptable or not.
\r
462 set(_cuda_version_acceptable TRUE)
\r
464 if(CUDA_FIND_VERSION_EXACT AND NOT CUDA_VERSION VERSION_EQUAL CUDA_FIND_VERSION)
\r
465 set(_cuda_version_acceptable FALSE)
\r
468 if(CUDA_FIND_VERSION AND CUDA_VERSION VERSION_LESS CUDA_FIND_VERSION)
\r
469 set(_cuda_version_acceptable FALSE)
\r
472 if(NOT _cuda_version_acceptable)
\r
473 set(_cuda_error_message "Requested CUDA version ${CUDA_FIND_VERSION}, but found unacceptable version ${CUDA_VERSION}")
\r
474 if(CUDA_FIND_REQUIRED)
\r
475 message("${_cuda_error_message}")
\r
476 elseif(NOT CUDA_FIND_QUIETLY)
\r
477 message("${_cuda_error_message}")
\r
481 # CUDA_TOOLKIT_INCLUDE
\r
482 find_path(CUDA_TOOLKIT_INCLUDE
\r
483 device_functions.h # Header included in toolkit
\r
484 PATHS "${CUDA_TOOLKIT_ROOT_DIR}/include"
\r
488 # Search default search paths, after we search our own set of paths.
\r
489 find_path(CUDA_TOOLKIT_INCLUDE device_functions.h)
\r
490 mark_as_advanced(CUDA_TOOLKIT_INCLUDE)
\r
492 # Set the user list of include dir to nothing to initialize it.
\r
493 set (CUDA_NVCC_INCLUDE_ARGS_USER "")
\r
494 set (CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE})
\r
496 macro(FIND_LIBRARY_LOCAL_FIRST _var _names _doc)
\r
497 if(CMAKE_SIZEOF_VOID_P EQUAL 8)
\r
498 set(_cuda_64bit_lib_dir "${CUDA_TOOLKIT_ROOT_DIR}/lib64")
\r
500 find_library(${_var}
\r
502 PATHS ${_cuda_64bit_lib_dir}
\r
503 "${CUDA_TOOLKIT_ROOT_DIR}/lib"
\r
508 # Search default search paths, after we search our own set of paths.
\r
509 find_library(${_var} NAMES ${_names} DOC ${_doc})
\r
513 find_library_local_first(CUDA_CUDART_LIBRARY cudart "\"cudart\" library")
\r
514 set(CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY})
\r
516 # 1.1 toolkit on linux doesn't appear to have a separate library on
\r
518 find_library_local_first(CUDA_CUDA_LIBRARY cuda "\"cuda\" library (older versions only).")
\r
520 # Add cuda library to the link line only if it is found.
\r
521 if (CUDA_CUDA_LIBRARY)
\r
522 set(CUDA_LIBRARIES ${CUDA_LIBRARIES} ${CUDA_CUDA_LIBRARY})
\r
523 endif(CUDA_CUDA_LIBRARY)
\r
527 CUDA_CUDART_LIBRARY
\r
530 #######################
\r
531 # Look for some of the toolkit helper libraries
\r
532 macro(FIND_CUDA_HELPER_LIBS _name)
\r
533 find_library_local_first(CUDA_${_name}_LIBRARY ${_name} "\"${_name}\" library")
\r
534 mark_as_advanced(CUDA_${_name}_LIBRARY)
\r
535 endmacro(FIND_CUDA_HELPER_LIBS)
\r
537 # Search for cufft and cublas libraries.
\r
538 find_cuda_helper_libs(cufftemu)
\r
539 find_cuda_helper_libs(cublasemu)
\r
540 find_cuda_helper_libs(cufft)
\r
541 find_cuda_helper_libs(cublas)
\r
543 if (CUDA_BUILD_EMULATION)
\r
544 set(CUDA_CUFFT_LIBRARIES ${CUDA_cufftemu_LIBRARY})
\r
545 set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublasemu_LIBRARY})
\r
547 set(CUDA_CUFFT_LIBRARIES ${CUDA_cufft_LIBRARY})
\r
548 set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublas_LIBRARY})
\r
551 ########################
\r
552 # Look for the SDK stuff
\r
553 find_path(CUDA_SDK_ROOT_DIR common/inc/cutil.h
\r
554 "$ENV{NVSDKCUDA_ROOT}"
\r
555 "[HKEY_LOCAL_MACHINE\\SOFTWARE\\NVIDIA Corporation\\Installed Products\\NVIDIA SDK 10\\Compute;InstallDir]"
\r
558 # Keep the CUDA_SDK_ROOT_DIR first in order to be able to override the
\r
559 # environment variables.
\r
560 set(CUDA_SDK_SEARCH_PATH
\r
561 "${CUDA_SDK_ROOT_DIR}"
\r
562 "${CUDA_TOOLKIT_ROOT_DIR}/local/NVSDK0.2"
\r
563 "${CUDA_TOOLKIT_ROOT_DIR}/NVSDK0.2"
\r
564 "${CUDA_TOOLKIT_ROOT_DIR}/NV_CUDA_SDK"
\r
565 "$ENV{HOME}/NVIDIA_CUDA_SDK"
\r
566 "$ENV{HOME}/NVIDIA_CUDA_SDK_MACOSX"
\r
569 # CUDA_CUT_INCLUDE_DIR
\r
570 find_path(CUDA_CUT_INCLUDE_DIR
\r
572 PATHS ${CUDA_SDK_SEARCH_PATH}
\r
573 PATH_SUFFIXES "common/inc"
\r
574 DOC "Location of cutil.h"
\r
577 # Now search system paths
\r
578 find_path(CUDA_CUT_INCLUDE_DIR cutil.h DOC "Location of cutil.h")
\r
580 mark_as_advanced(CUDA_CUT_INCLUDE_DIR)
\r
583 # CUDA_CUT_LIBRARIES
\r
585 # cutil library is called cutil64 for 64 bit builds on windows. We don't want
\r
586 # to get these confused, so we are setting the name based on the word size of
\r
588 if(CMAKE_SIZEOF_VOID_P EQUAL 8)
\r
589 set(cuda_cutil_name cutil64)
\r
590 else(CMAKE_SIZEOF_VOID_P EQUAL 8)
\r
591 set(cuda_cutil_name cutil32)
\r
592 endif(CMAKE_SIZEOF_VOID_P EQUAL 8)
\r
594 find_library(CUDA_CUT_LIBRARY
\r
595 NAMES cutil ${cuda_cutil_name}
\r
596 PATHS ${CUDA_SDK_SEARCH_PATH}
\r
597 # The new version of the sdk shows up in common/lib, but the old one is in lib
\r
598 PATH_SUFFIXES "common/lib" "lib"
\r
599 DOC "Location of cutil library"
\r
602 # Now search system paths
\r
603 find_library(CUDA_CUT_LIBRARY NAMES cutil ${cuda_cutil_name} DOC "Location of cutil library")
\r
604 mark_as_advanced(CUDA_CUT_LIBRARY)
\r
605 set(CUDA_CUT_LIBRARIES ${CUDA_CUT_LIBRARY})
\r
609 #############################
\r
610 # Check for required components
\r
611 set(CUDA_FOUND TRUE)
\r
613 set(CUDA_TOOLKIT_ROOT_DIR_INTERNAL "${CUDA_TOOLKIT_ROOT_DIR}" CACHE INTERNAL
\r
614 "This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was set successfully." FORCE)
\r
615 set(CUDA_SDK_ROOT_DIR_INTERNAL "${CUDA_SDK_ROOT_DIR}" CACHE INTERNAL
\r
616 "This is the value of the last time CUDA_SDK_ROOT_DIR was set successfully." FORCE)
\r
618 include(FindPackageHandleStandardArgs)
\r
619 find_package_handle_standard_args(CUDA DEFAULT_MSG
\r
620 CUDA_TOOLKIT_ROOT_DIR
\r
621 CUDA_NVCC_EXECUTABLE
\r
623 CUDA_CUDART_LIBRARY
\r
624 _cuda_version_acceptable
\r
629 ###############################################################################
\r
630 ###############################################################################
\r
632 ###############################################################################
\r
633 ###############################################################################
\r
635 ###############################################################################
\r
636 # Add include directories to pass to the nvcc command.
\r
637 macro(CUDA_INCLUDE_DIRECTORIES)
\r
638 foreach(dir ${ARGN})
\r
639 list(APPEND CUDA_NVCC_INCLUDE_ARGS_USER "-I${dir}")
\r
640 endforeach(dir ${ARGN})
\r
641 endmacro(CUDA_INCLUDE_DIRECTORIES)
\r
644 ##############################################################################
\r
645 cuda_find_helper_file(parse_cubin cmake)
\r
646 cuda_find_helper_file(make2cmake cmake)
\r
647 cuda_find_helper_file(run_nvcc cmake)
\r
649 ##############################################################################
\r
650 # Separate the OPTIONS out from the sources
\r
652 macro(CUDA_GET_SOURCES_AND_OPTIONS _sources _cmake_options _options)
\r
654 set( ${_cmake_options} )
\r
656 set( _found_options FALSE )
\r
657 foreach(arg ${ARGN})
\r
658 if(arg STREQUAL "OPTIONS")
\r
659 set( _found_options TRUE )
\r
661 arg STREQUAL "WIN32" OR
\r
662 arg STREQUAL "MACOSX_BUNDLE" OR
\r
663 arg STREQUAL "EXCLUDE_FROM_ALL" OR
\r
664 arg STREQUAL "STATIC" OR
\r
665 arg STREQUAL "SHARED" OR
\r
666 arg STREQUAL "MODULE"
\r
668 list(APPEND ${_cmake_options} "${arg}")
\r
670 if ( _found_options )
\r
671 list(APPEND ${_options} "${arg}")
\r
673 # Assume this is a file
\r
674 list(APPEND ${_sources} "${arg}")
\r
680 ##############################################################################
\r
681 # Parse the OPTIONS from ARGN and set the variables prefixed by _option_prefix
\r
683 macro(CUDA_PARSE_NVCC_OPTIONS _option_prefix)
\r
684 set( _found_config )
\r
685 foreach(arg ${ARGN})
\r
686 # Determine if we are dealing with a perconfiguration flag
\r
687 foreach(config ${CUDA_configuration_types})
\r
688 string(TOUPPER ${config} config_upper)
\r
689 if (arg STREQUAL "${config_upper}")
\r
690 set( _found_config _${arg})
\r
691 # Set arg to nothing to keep it from being processed further
\r
697 list(APPEND ${_option_prefix}${_found_config} "${arg}")
\r
702 ##############################################################################
\r
703 # Helper to add the include directory for CUDA only once
\r
704 function(CUDA_ADD_CUDA_INCLUDE_ONCE)
\r
705 get_directory_property(_include_directories INCLUDE_DIRECTORIES)
\r
707 if(_include_directories)
\r
708 foreach(dir ${_include_directories})
\r
709 if("${dir}" STREQUAL "${CUDA_INCLUDE_DIRS}")
\r
715 include_directories(${CUDA_INCLUDE_DIRS})
\r
719 ##############################################################################
\r
720 # This helper macro populates the following variables and setups up custom
\r
721 # commands and targets to invoke the nvcc compiler to generate C or PTX source
\r
722 # dependant upon the format parameter. The compiler is invoked once with -M
\r
723 # to generate a dependency file and a second time with -cuda or -ptx to generate
\r
724 # a .cpp or .ptx file.
\r
726 # cuda_target - Target name
\r
727 # format - PTX or OBJ
\r
728 # FILE1 .. FILEN - The remaining arguments are the sources to be wrapped.
\r
729 # OPTIONS - Extra options to NVCC
\r
731 # generated_files - List of generated files
\r
732 ##############################################################################
\r
733 ##############################################################################
\r
735 macro(CUDA_WRAP_SRCS cuda_target format generated_files)
\r
737 if( ${format} MATCHES "PTX" )
\r
738 set( compile_to_ptx ON )
\r
739 elseif( ${format} MATCHES "OBJ")
\r
740 set( compile_to_ptx OFF )
\r
742 message( FATAL_ERROR "Invalid format flag passed to CUDA_WRAP_SRCS: '${format}'. Use OBJ or PTX.")
\r
745 # Set up all the command line flags here, so that they can be overriden on a per target basis.
\r
749 # Emulation if the card isn't present.
\r
750 if (CUDA_BUILD_EMULATION)
\r
752 set(nvcc_flags ${nvcc_flags} --device-emulation -D_DEVICEEMU -g)
\r
753 else(CUDA_BUILD_EMULATION)
\r
754 # Device mode. No flags necessary.
\r
755 endif(CUDA_BUILD_EMULATION)
\r
757 if(CUDA_HOST_COMPILATION_CPP)
\r
758 set(CUDA_C_OR_CXX CXX)
\r
759 else(CUDA_HOST_COMPILATION_CPP)
\r
760 set(nvcc_flags ${nvcc_flags} --host-compilation C)
\r
761 set(CUDA_C_OR_CXX C)
\r
762 endif(CUDA_HOST_COMPILATION_CPP)
\r
764 set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION})
\r
766 if(CUDA_64_BIT_DEVICE_CODE)
\r
767 set(nvcc_flags ${nvcc_flags} -m64)
\r
769 set(nvcc_flags ${nvcc_flags} -m32)
\r
772 # This needs to be passed in at this stage, because VS needs to fill out the
\r
773 # value of VCInstallDir from within VS.
\r
774 if(CMAKE_GENERATOR MATCHES "Visual Studio")
\r
775 if( CMAKE_SIZEOF_VOID_P EQUAL 8 )
\r
776 # Add nvcc flag for 64b Windows
\r
777 set(ccbin_flags -D "\"CCBIN:PATH=$(VCInstallDir)bin\"" )
\r
781 # Figure out which configure we will use and pass that in as an argument to
\r
782 # the script. We need to defer the decision until compilation time, because
\r
783 # for VS projects we won't know if we are making a debug or release build
\r
784 # until build time.
\r
785 if(CMAKE_GENERATOR MATCHES "Visual Studio")
\r
786 set( CUDA_build_configuration "$(ConfigurationName)" )
\r
788 set( CUDA_build_configuration "${CMAKE_BUILD_TYPE}")
\r
791 # Initialize our list of includes with the user ones followed by the CUDA system ones.
\r
792 set(CUDA_NVCC_INCLUDE_ARGS ${CUDA_NVCC_INCLUDE_ARGS_USER} "-I${CUDA_INCLUDE_DIRS}")
\r
793 # Get the include directories for this directory and use them for our nvcc command.
\r
794 get_directory_property(CUDA_NVCC_INCLUDE_DIRECTORIES INCLUDE_DIRECTORIES)
\r
795 if(CUDA_NVCC_INCLUDE_DIRECTORIES)
\r
796 foreach(dir ${CUDA_NVCC_INCLUDE_DIRECTORIES})
\r
797 list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}")
\r
801 # Reset these variables
\r
802 set(CUDA_WRAP_OPTION_NVCC_FLAGS)
\r
803 foreach(config ${CUDA_configuration_types})
\r
804 string(TOUPPER ${config} config_upper)
\r
805 set(CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper})
\r
808 CUDA_GET_SOURCES_AND_OPTIONS(_cuda_wrap_sources _cuda_wrap_cmake_options _cuda_wrap_options ${ARGN})
\r
809 CUDA_PARSE_NVCC_OPTIONS(CUDA_WRAP_OPTION_NVCC_FLAGS ${_cuda_wrap_options})
\r
811 # Figure out if we are building a shared library. Default the value of BUILD_SHARED_LIBS.
\r
812 set(_cuda_build_shared_libs ${BUILD_SHARED_LIBS})
\r
814 list(FIND _cuda_wrap_cmake_options SHARED _cuda_found_SHARED)
\r
815 list(FIND _cuda_wrap_cmake_options MODULE _cuda_found_MODULE)
\r
816 if(_cuda_found_SHARED GREATER -1 OR _cuda_found_MODULE GREATER -1)
\r
817 set(_cuda_build_shared_libs TRUE)
\r
820 list(FIND _cuda_wrap_cmake_options STATIC _cuda_found_STATIC)
\r
821 if(_cuda_found_STATIC GREATER -1)
\r
822 set(_cuda_build_shared_libs FALSE)
\r
826 if(_cuda_build_shared_libs)
\r
827 # If we are setting up code for a shared library, then we need to add extra flags for
\r
828 # compiling objects for shared libraries.
\r
829 set(CUDA_HOST_SHARED_FLAGS ${CMAKE_SHARED_LIBRARY_${CUDA_C_OR_CXX}_FLAGS})
\r
831 set(CUDA_HOST_FLAGS "set(CMAKE_HOST_FLAGS ${CMAKE_${CUDA_C_OR_CXX}_FLAGS} ${CUDA_HOST_SHARED_FLAGS})")
\r
832 set(CUDA_NVCC_FLAGS_CONFIG "# Build specific configuration flags")
\r
833 # Loop over all the configuration types to generate appropriate flags for run_nvcc.cmake
\r
834 foreach(config ${CUDA_configuration_types})
\r
835 string(TOUPPER ${config} config_upper)
\r
836 # CMAKE_FLAGS are strings and not lists. By not putting quotes around CMAKE_FLAGS
\r
837 # we convert the strings to lists (like we want).
\r
839 # nvcc chokes on -g3, so replace it with -g
\r
840 if(CMAKE_COMPILER_IS_GNUCC)
\r
841 string(REPLACE "-g3" "-g" _cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
\r
843 set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}")
\r
845 set(CUDA_HOST_FLAGS "${CUDA_HOST_FLAGS}\nset(CMAKE_HOST_FLAGS_${config_upper} ${_cuda_C_FLAGS})")
\r
846 # Note that if we ever want CUDA_NVCC_FLAGS_<CONFIG> to be string (instead of a list
\r
847 # like it is currently), we can remove the quotes around the
\r
848 # ${CUDA_NVCC_FLAGS_${config_upper}} variable like the CMAKE_HOST_FLAGS_<CONFIG> variable.
\r
849 set(CUDA_NVCC_FLAGS_CONFIG "${CUDA_NVCC_FLAGS_CONFIG}\nset(CUDA_NVCC_FLAGS_${config_upper} \"${CUDA_NVCC_FLAGS_${config_upper}};${CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}}\")")
\r
853 # Don't use any of the host compilation flags for PTX targets
\r
854 set(CUDA_HOST_FLAGS)
\r
857 # Get the list of definitions from the directory property
\r
858 get_directory_property(CUDA_NVCC_DEFINITIONS COMPILE_DEFINITIONS)
\r
859 if(CUDA_NVCC_DEFINITIONS)
\r
860 foreach(_definition ${CUDA_NVCC_DEFINITIONS})
\r
861 list(APPEND nvcc_flags "-D${_definition}")
\r
865 if(_cuda_build_shared_libs)
\r
866 list(APPEND nvcc_flags "-D${cuda_target}_EXPORTS")
\r
869 # Determine output directory
\r
870 if(CUDA_GENERATED_OUTPUT_DIR)
\r
871 set(cuda_compile_output_dir "${CUDA_GENERATED_OUTPUT_DIR}")
\r
873 set(cuda_compile_output_dir "${CMAKE_CURRENT_BINARY_DIR}")
\r
876 # Reset the output variable
\r
877 set(_cuda_wrap_generated_files "")
\r
879 # Iterate over the macro arguments and create custom
\r
880 # commands for all the .cu files.
\r
881 foreach(file ${ARGN})
\r
882 # Ignore any file marked as a HEADER_FILE_ONLY
\r
883 get_source_file_property(_is_header ${file} HEADER_FILE_ONLY)
\r
884 if(${file} MATCHES ".*\\.cu$" AND NOT _is_header)
\r
886 # Add a custom target to generate a c or ptx file. ######################
\r
888 get_filename_component( basename ${file} NAME )
\r
889 if( compile_to_ptx )
\r
890 set(generated_file_path "${cuda_compile_output_dir}")
\r
891 set(generated_file_basename "${cuda_target}_generated_${basename}.ptx")
\r
892 set(format_flag "-ptx")
\r
893 file(MAKE_DIRECTORY "${cuda_compile_output_dir}")
\r
894 else( compile_to_ptx )
\r
895 set(generated_file_path "${cuda_compile_output_dir}/${CMAKE_CFG_INTDIR}")
\r
896 set(generated_file_basename "${cuda_target}_generated_${basename}${generated_extension}")
\r
897 set(format_flag "-c")
\r
898 endif( compile_to_ptx )
\r
900 # Set all of our file names. Make sure that whatever filenames that have
\r
901 # generated_file_path in them get passed in through as a command line
\r
902 # argument, so that the ${CMAKE_CFG_INTDIR} gets expanded at run time
\r
903 # instead of configure time.
\r
904 set(generated_file "${generated_file_path}/${generated_file_basename}")
\r
905 set(cmake_dependency_file "${CMAKE_CURRENT_BINARY_DIR}/${generated_file_basename}.depend")
\r
906 set(NVCC_generated_dependency_file "${CMAKE_CURRENT_BINARY_DIR}/${generated_file_basename}.NVCC-depend")
\r
907 set(generated_cubin_file "${generated_file_path}/${generated_file_basename}.cubin.txt")
\r
908 set(custom_target_script "${CMAKE_CURRENT_BINARY_DIR}/${generated_file_basename}.cmake")
\r
910 # Setup properties for obj files:
\r
911 if( NOT compile_to_ptx )
\r
912 set_source_files_properties("${generated_file}"
\r
914 EXTERNAL_OBJECT true # This is an object file not to be compiled, but only be linked.
\r
918 # Don't add CMAKE_CURRENT_SOURCE_DIR if the path is already an absolute path.
\r
919 get_filename_component(file_path "${file}" PATH)
\r
920 if(IS_ABSOLUTE "${file_path}")
\r
921 set(source_file "${file}")
\r
923 set(source_file "${CMAKE_CURRENT_SOURCE_DIR}/${file}")
\r
926 # Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND #######
\r
927 cuda_include_nvcc_dependencies(${cmake_dependency_file})
\r
929 # Convience string for output ###########################################
\r
930 if(CUDA_BUILD_EMULATION)
\r
931 set(cuda_build_type "Emulation")
\r
932 else(CUDA_BUILD_EMULATION)
\r
933 set(cuda_build_type "Device")
\r
934 endif(CUDA_BUILD_EMULATION)
\r
936 # Build the NVCC made dependency file ###################################
\r
937 set(build_cubin OFF)
\r
938 if ( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN )
\r
939 if ( NOT compile_to_ptx )
\r
940 set ( build_cubin ON )
\r
941 endif( NOT compile_to_ptx )
\r
942 endif( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN )
\r
944 # Configure the build script
\r
945 configure_file("${CUDA_run_nvcc}" "${custom_target_script}" @ONLY)
\r
947 # So if a user specifies the same cuda file as input more than once, you
\r
948 # can have bad things happen with dependencies. Here we check an option
\r
949 # to see if this is the behavior they want.
\r
950 if(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE)
\r
951 set(main_dep MAIN_DEPENDENCY ${source_file})
\r
953 set(main_dep DEPENDS ${source_file})
\r
956 if(CUDA_VERBOSE_BUILD)
\r
957 set(verbose_output ON)
\r
958 elseif(CMAKE_GENERATOR MATCHES "Makefiles")
\r
959 set(verbose_output "$(VERBOSE)")
\r
961 set(verbose_output OFF)
\r
964 # Create up the comment string
\r
965 file(RELATIVE_PATH generated_file_relative_path "${CMAKE_BINARY_DIR}" "${generated_file}")
\r
967 set(cuda_build_comment_string "Building NVCC ptx file ${generated_file_relative_path}")
\r
969 set(cuda_build_comment_string "Building NVCC (${cuda_build_type}) object ${generated_file_relative_path}")
\r
972 # Build the generated file and dependency file ##########################
\r
973 add_custom_command(
\r
974 OUTPUT ${generated_file}
\r
975 # These output files depend on the source_file and the contents of cmake_dependency_file
\r
977 DEPENDS ${CUDA_NVCC_DEPEND}
\r
978 DEPENDS ${custom_target_script}
\r
979 COMMAND ${CMAKE_COMMAND} ARGS
\r
980 -D verbose:BOOL=${verbose_output}
\r
982 -D build_configuration:STRING=${CUDA_build_configuration}
\r
983 -D "generated_file:STRING=${generated_file}"
\r
984 -D "generated_cubin_file:STRING=${generated_cubin_file}"
\r
985 -P "${custom_target_script}"
\r
986 COMMENT "${cuda_build_comment_string}"
\r
989 # Make sure the build system knows the file is generated.
\r
990 set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE)
\r
992 # Don't add the object file to the list of generated files if we are using
\r
993 # visual studio and we are attaching the build rule to the cuda file. VS
\r
994 # will add our object file to the linker automatically for us.
\r
995 set(cuda_add_generated_file TRUE)
\r
997 if(NOT compile_to_ptx AND CMAKE_GENERATOR MATCHES "Visual Studio" AND CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE)
\r
998 # Visual Studio 8 crashes when you close the solution when you don't add the object file.
\r
999 if(NOT CMAKE_GENERATOR MATCHES "Visual Studio 8")
\r
1000 #message("Not adding ${generated_file}")
\r
1001 set(cuda_add_generated_file FALSE)
\r
1005 if(cuda_add_generated_file)
\r
1006 list(APPEND _cuda_wrap_generated_files ${generated_file})
\r
1009 # Add the other files that we want cmake to clean on a cleanup ##########
\r
1010 list(APPEND CUDA_ADDITIONAL_CLEAN_FILES "${cmake_dependency_file}")
\r
1011 list(REMOVE_DUPLICATES CUDA_ADDITIONAL_CLEAN_FILES)
\r
1012 set(CUDA_ADDITIONAL_CLEAN_FILES ${CUDA_ADDITIONAL_CLEAN_FILES} CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.")
\r
1014 endif(${file} MATCHES ".*\\.cu$" AND NOT _is_header)
\r
1017 # Set the return parameter
\r
1018 set(${generated_files} ${_cuda_wrap_generated_files})
\r
1019 endmacro(CUDA_WRAP_SRCS)
\r
1022 ###############################################################################
\r
1023 ###############################################################################
\r
1025 ###############################################################################
\r
1026 ###############################################################################
\r
1027 macro(CUDA_ADD_LIBRARY cuda_target)
\r
1029 CUDA_ADD_CUDA_INCLUDE_ONCE()
\r
1031 # Separate the sources from the options
\r
1032 CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
\r
1033 # Create custom commands and targets for each file.
\r
1034 CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} ${_cmake_options}
\r
1035 OPTIONS ${_options} )
\r
1037 # Add the library.
\r
1038 add_library(${cuda_target} ${_cmake_options}
\r
1039 ${_generated_files}
\r
1043 target_link_libraries(${cuda_target}
\r
1047 # We need to set the linker language based on what the expected generated file
\r
1048 # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP.
\r
1049 set_target_properties(${cuda_target}
\r
1051 LINKER_LANGUAGE ${CUDA_C_OR_CXX}
\r
1054 endmacro(CUDA_ADD_LIBRARY cuda_target)
\r
1057 ###############################################################################
\r
1058 ###############################################################################
\r
1060 ###############################################################################
\r
1061 ###############################################################################
\r
1062 macro(CUDA_ADD_EXECUTABLE cuda_target)
\r
1064 CUDA_ADD_CUDA_INCLUDE_ONCE()
\r
1066 # Separate the sources from the options
\r
1067 CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
\r
1068 # Create custom commands and targets for each file.
\r
1069 CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} )
\r
1071 # Add the library.
\r
1072 add_executable(${cuda_target} ${_cmake_options}
\r
1073 ${_generated_files}
\r
1077 target_link_libraries(${cuda_target}
\r
1081 # We need to set the linker language based on what the expected generated file
\r
1082 # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP.
\r
1083 set_target_properties(${cuda_target}
\r
1085 LINKER_LANGUAGE ${CUDA_C_OR_CXX}
\r
1088 endmacro(CUDA_ADD_EXECUTABLE cuda_target)
\r
1091 ###############################################################################
\r
1092 ###############################################################################
\r
1094 ###############################################################################
\r
1095 ###############################################################################
\r
1096 macro(CUDA_COMPILE generated_files)
\r
1098 # Separate the sources from the options
\r
1099 CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
\r
1100 # Create custom commands and targets for each file.
\r
1101 CUDA_WRAP_SRCS( cuda_compile OBJ _generated_files ${_sources} ${_cmake_options}
\r
1102 OPTIONS ${_options} )
\r
1104 set( ${generated_files} ${_generated_files})
\r
1106 endmacro(CUDA_COMPILE)
\r
1109 ###############################################################################
\r
1110 ###############################################################################
\r
1111 # CUDA COMPILE PTX
\r
1112 ###############################################################################
\r
1113 ###############################################################################
\r
1114 macro(CUDA_COMPILE_PTX generated_files)
\r
1116 # Separate the sources from the options
\r
1117 CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN})
\r
1118 # Create custom commands and targets for each file.
\r
1119 CUDA_WRAP_SRCS( cuda_compile_ptx PTX _generated_files ${_sources} ${_cmake_options}
\r
1120 OPTIONS ${_options} )
\r
1122 set( ${generated_files} ${_generated_files})
\r
1124 endmacro(CUDA_COMPILE_PTX)
\r
1126 ###############################################################################
\r
1127 ###############################################################################
\r
1128 # CUDA ADD CUFFT TO TARGET
\r
1129 ###############################################################################
\r
1130 ###############################################################################
\r
1131 macro(CUDA_ADD_CUFFT_TO_TARGET target)
\r
1132 if (CUDA_BUILD_EMULATION)
\r
1133 target_link_libraries(${target} ${CUDA_cufftemu_LIBRARY})
\r
1135 target_link_libraries(${target} ${CUDA_cufft_LIBRARY})
\r
1139 ###############################################################################
\r
1140 ###############################################################################
\r
1141 # CUDA ADD CUBLAS TO TARGET
\r
1142 ###############################################################################
\r
1143 ###############################################################################
\r
1144 macro(CUDA_ADD_CUBLAS_TO_TARGET target)
\r
1145 if (CUDA_BUILD_EMULATION)
\r
1146 target_link_libraries(${target} ${CUDA_cublasemu_LIBRARY})
\r
1148 target_link_libraries(${target} ${CUDA_cublas_LIBRARY})
\r
1152 ###############################################################################
\r
1153 ###############################################################################
\r
1154 # CUDA BUILD CLEAN TARGET
\r
1155 ###############################################################################
\r
1156 ###############################################################################
\r
1157 macro(CUDA_BUILD_CLEAN_TARGET)
\r
1158 # Call this after you add all your CUDA targets, and you will get a convience
\r
1159 # target. You should also make clean after running this target to get the
\r
1160 # build system to generate all the code again.
\r
1162 set(cuda_clean_target_name clean_cuda_depends)
\r
1163 if (CMAKE_GENERATOR MATCHES "Visual Studio")
\r
1164 string(TOUPPER ${cuda_clean_target_name} cuda_clean_target_name)
\r
1166 add_custom_target(${cuda_clean_target_name}
\r
1167 COMMAND ${CMAKE_COMMAND} -E remove ${CUDA_ADDITIONAL_CLEAN_FILES})
\r
1169 # Clear out the variable, so the next time we configure it will be empty.
\r
1170 # This is useful so that the files won't persist in the list after targets
\r
1171 # have been removed.
\r
1172 set(CUDA_ADDITIONAL_CLEAN_FILES "" CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.")
\r
1173 endmacro(CUDA_BUILD_CLEAN_TARGET)
\r