[ARM] Split large widening MVE loads
[llvm-core.git] / utils / docker / nvidia-cuda / Dockerfile
blob5878259c27f4e69085b43c6cbca1fa39919eeeb2
1 #===- llvm/utils/docker/nvidia-cuda/build/Dockerfile ---------------------===//
3 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 # See https://llvm.org/LICENSE.txt for license information.
5 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 #===----------------------------------------------------------------------===//
8 # Stage 1. Check out LLVM source code and run the build.
9 FROM nvidia/cuda:8.0-devel as builder
10 LABEL maintainer "LLVM Developers"
11 # Install llvm build dependencies.
12 RUN apt-get update && \
13     apt-get install -y --no-install-recommends ca-certificates cmake python \
14         subversion ninja-build && \
15     rm -rf /var/lib/apt/lists/*
17 ADD checksums /tmp/checksums
18 ADD scripts /tmp/scripts
20 # Checkout the source code.
21 ARG checkout_args
22 RUN /tmp/scripts/checkout.sh ${checkout_args}
23 # Run the build. Results of the build will be available at /tmp/clang-install/.
24 ARG buildscript_args
25 RUN /tmp/scripts/build_install_llvm.sh --to /tmp/clang-install ${buildscript_args}
28 # Stage 2. Produce a minimal release image with build results.
29 FROM nvidia/cuda:8.0-devel
30 LABEL maintainer "LLVM Developers"
31 # Copy clang installation into this container.
32 COPY --from=builder /tmp/clang-install/ /usr/local/
33 # C++ standard library and binutils are already included in the base package.