Compare commits

...

26 Commits

Author SHA1 Message Date
Jeffrey Morgan
5306b0269d Update linux.md 2023-09-25 16:10:32 -07:00
Michael Yang
7de0c8345d Merge pull request #595 from jmorganca/mxyng/install.sh
ignore systemctl is-system-running exit code
2023-09-25 15:49:47 -07:00
Michael Yang
1b9dcab3ab ignore systemctl is-system-running exit code 2023-09-25 15:47:45 -07:00
Bruce MacDonald
86279f4ae3 unbound max num gpu layers (#591)
---------

Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-25 18:36:46 -04:00
Michael Yang
b934bf23e6 exit on unknown distro (#594) 2023-09-25 15:30:58 -07:00
Michael Yang
2b8ef455ad Merge pull request #593 from jmorganca/mxyng/install.sh
update install.sh
2023-09-25 14:09:40 -07:00
Michael Yang
0c5f47177c update install.sh 2023-09-25 14:01:44 -07:00
Michael Yang
1210db2924 Merge pull request #592 from jmorganca/mxyng/install.sh
fix dkms on debian
2023-09-25 12:59:01 -07:00
Michael Yang
d0854bf1e6 fix dkms on debian 2023-09-25 12:57:25 -07:00
Michael Yang
8396463255 Merge pull request #590 from jmorganca/mxyng/install.sh
fix dkms install
2023-09-25 12:17:31 -07:00
Michael Yang
a027bbf4d7 fix dkms install 2023-09-25 12:16:41 -07:00
Michael Yang
ed94a3dd02 Merge pull request #589 from jmorganca/mxyng/install.sh
update install.sh
2023-09-25 11:08:25 -07:00
Michael Yang
f14f62ab3b update install.sh 2023-09-25 11:05:38 -07:00
Jeffrey Morgan
0fb5268496 Update linux.md 2023-09-25 10:06:23 -07:00
Bruce MacDonald
c65edb1506 fix linux installer warning logs (#588) 2023-09-25 11:22:56 -04:00
Twan L
1605af32ec Added a new community project (#574) 2023-09-25 10:40:59 -04:00
Jeffrey Morgan
ee3032ad89 improvements to docs/linux.md 2023-09-24 21:50:07 -07:00
Jeffrey Morgan
5b7a27281d improvements to docs/linux.md 2023-09-24 21:38:23 -07:00
Jeffrey Morgan
d2a784e33e add docs/linux.md 2023-09-24 21:34:44 -07:00
Jeffrey Morgan
413a2e4f91 set DEBIAN_FRONTEND=noninteractive correctly 2023-09-24 20:35:42 -07:00
Patrick Devine
b5614f3ebc fix end-of-line issue with the new prompt (#582) 2023-09-23 17:20:30 -07:00
Jeffrey Morgan
8b2ba9cab8 minor improvements to install.sh 2023-09-23 11:20:39 -04:00
Jeffrey Morgan
e29662ab5c fix minor install script issues on debian 2023-09-23 10:25:47 -04:00
Bruce MacDonald
cbc40aa996 debian installer support (#579)
* debian installer support

- normalize os name to lowercase
- check needed commands are available
- dont check sudo when root user
- share common install commands
- support debian cuda install
- skip aarm cuda install
- system user shared home dir

* refactor and add other platforms (#580)

---------

Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-23 09:46:47 -04:00
Jeffrey Morgan
5cb82540c9 install.sh: update install url 2023-09-23 09:35:14 -04:00
Jeffrey Morgan
d7849a1dc9 add .env to .dockerignore 2023-09-23 00:53:48 -04:00
9 changed files with 323 additions and 157 deletions

View File

@@ -5,3 +5,4 @@ dist
scripts
llm/llama.cpp/ggml
llm/llama.cpp/gguf
.env

View File

@@ -214,6 +214,7 @@ curl -X POST http://localhost:11434/api/generate -d '{
| [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot) | Interact with Ollama as a chatbot on Discord. |
| [Raycast Ollama](https://github.com/MassimilianoPasquini97/raycast_ollama) | Raycast extension to use Ollama for local llama inference on Raycast. |
| [Simple HTML UI](https://github.com/rtcfirefly/ollama-ui) | Also, there is a Chrome extension. |
| [Ollama-GUI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file) | 🖥️ Mac Chat Interface ⚡️ |
| [Emacs client](https://github.com/zweifisch/ollama) | |
[1]: https://python.langchain.com/docs/integrations/llms/ollama

View File

@@ -33,15 +33,22 @@ import (
)
type Painter struct {
HideHint bool
IsMultiLine bool
}
func (p Painter) Paint(line []rune, _ int) []rune {
termType := os.Getenv("TERM")
if termType == "xterm-256color" && len(line) == 0 && !p.HideHint {
prompt := "Send a message (/? for help)"
if termType == "xterm-256color" && len(line) == 0 {
var prompt string
if p.IsMultiLine {
prompt = "Use \"\"\" to end multi-line input"
} else {
prompt = "Send a message (/? for help)"
}
return []rune(fmt.Sprintf("\033[38;5;245m%s\033[%dD\033[0m", prompt, len(prompt)))
}
// add a space and a backspace to prevent the cursor from walking up the screen
line = append(line, []rune(" \b")...)
return line
}
@@ -579,7 +586,7 @@ func generateInteractive(cmd *cobra.Command, model string) error {
case isMultiLine:
if strings.HasSuffix(line, `"""`) {
isMultiLine = false
painter.HideHint = false
painter.IsMultiLine = isMultiLine
multiLineBuffer += strings.TrimSuffix(line, `"""`)
line = multiLineBuffer
multiLineBuffer = ""
@@ -590,9 +597,9 @@ func generateInteractive(cmd *cobra.Command, model string) error {
}
case strings.HasPrefix(line, `"""`):
isMultiLine = true
painter.IsMultiLine = isMultiLine
multiLineBuffer = strings.TrimPrefix(line, `"""`) + " "
scanner.SetPrompt("... ")
painter.HideHint = true
continue
case strings.HasPrefix(line, "/list"):
args := strings.Fields(line)

83
docs/linux.md Normal file
View File

@@ -0,0 +1,83 @@
# Installing Ollama on Linux
> Note: A one line installer for Ollama is available by running:
>
> ```
> curl https://ollama.ai/install.sh | sh
> ```
## Download the `ollama` binary
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
```
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
sudo chmod +x /usr/bin/ollama
```
## Start Ollama
Start Ollama by running `ollama serve`:
```
ollama serve
```
Once Ollama is running, run a model in another terminal session:
```
ollama run llama2
```
## Install CUDA drivers (optional for Nvidia GPUs)
[Download and install](https://developer.nvidia.com/cuda-downloads) CUDA.
Verify that the drivers are installed by running the following command, which should print details about your GPU:
```
nvidia-smi
```
## Adding Ollama as a startup service (optional)
Create a user for Ollama:
```
sudo useradd -r -s /bin/false -m -d /usr/share/ollama ollama
```
Create a service file in `/etc/systemd/system/ollama.service`:
```ini
[Unit]
Description=Ollama Service
After=network-online.target
[Service]
ExecStart=/usr/bin/ollama serve
User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="HOME=/usr/share/ollama"
[Install]
WantedBy=default.target
```
Then start the service:
```
sudo systemctl daemon-reload
sudo systemctl enable ollama
```
### Viewing logs
To view logs of Ollama running as a startup service, run:
```
journalctl -u ollama
```

View File

@@ -77,6 +77,7 @@ type model interface {
ModelFamily() string
ModelType() string
FileType() string
NumLayers() int64
}
type container interface {

View File

@@ -195,6 +195,16 @@ func (llm *ggufModel) Decode(r io.Reader) error {
return nil
}
func (llm *ggufModel) NumLayers() int64 {
value, exists := llm.kv[fmt.Sprintf("%s.block_count", llm.ModelFamily())]
if !exists {
return 0
}
v := value.(uint32)
return int64(v)
}
func (ggufModel) readU8(r io.Reader) uint8 {
var u8 uint8
binary.Read(r, binary.LittleEndian, &u8)

View File

@@ -152,6 +152,10 @@ func (llm *llamaModel) FileType() string {
return fileType(llm.hyperparameters.FileType)
}
func (llm *llamaModel) NumLayers() int64 {
return int64(llm.hyperparameters.NumLayer)
}
type llamaHyperparameters struct {
// NumVocab is the size of the model's vocabulary.
NumVocab uint32
@@ -207,13 +211,13 @@ func CheckVRAM() (int, error) {
return total, nil
}
func NumGPU(opts api.Options) int {
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
if opts.NumGPU != -1 {
return opts.NumGPU
}
n := 1 // default to enable metal on macOS
if runtime.GOOS == "linux" {
vram, err := CheckVRAM()
vramMib, err := CheckVRAM()
if err != nil {
if err.Error() != "nvidia-smi command failed" {
log.Print(err.Error())
@@ -221,33 +225,25 @@ func NumGPU(opts api.Options) int {
// nvidia driver not installed or no nvidia GPU found
return 0
}
// TODO: this is a very rough heuristic, better would be to calculate this based on number of layers and context size
switch {
case vram < 500:
log.Printf("WARNING: Low VRAM detected, disabling GPU")
n = 0
case vram < 1000:
n = 4
case vram < 2000:
n = 8
case vram < 4000:
n = 12
case vram < 8000:
n = 16
case vram < 12000:
n = 24
case vram < 16000:
n = 32
default:
n = 48
}
log.Printf("%d MB VRAM available, loading %d GPU layers", vram, n)
totalVramBytes := int64(vramMib) * 1024 * 1024 // 1 MiB = 1024^2 bytes
// Calculate bytes per layer
// TODO: this is a rough heuristic, better would be to calculate this based on number of layers and context size
bytesPerLayer := fileSizeBytes / numLayer
// set n to the max number of layers we can fit in VRAM
return int(totalVramBytes / bytesPerLayer)
log.Printf("%d MiB VRAM available, loading up to %d GPU layers", vramMib, n)
}
return n
// default to enable metal on macOS
return 1
}
func newLlama(model string, adapters []string, runners []ModelRunner, opts api.Options) (*llama, error) {
if _, err := os.Stat(model); err != nil {
func newLlama(model string, adapters []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
fileInfo, err := os.Stat(model)
if err != nil {
return nil, err
}
@@ -261,7 +257,7 @@ func newLlama(model string, adapters []string, runners []ModelRunner, opts api.O
"--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase),
"--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale),
"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
"--n-gpu-layers", fmt.Sprintf("%d", NumGPU(opts)),
"--n-gpu-layers", fmt.Sprintf("%d", NumGPU(numLayers, fileInfo.Size(), opts)),
"--embedding",
}

View File

@@ -91,9 +91,9 @@ func New(workDir, model string, adapters []string, opts api.Options) (LLM, error
switch ggml.Name() {
case "gguf":
opts.NumGQA = 0 // TODO: remove this when llama.cpp runners differ enough to need separate newLlama functions
return newLlama(model, adapters, chooseRunners(workDir, "gguf"), opts)
return newLlama(model, adapters, chooseRunners(workDir, "gguf"), ggml.NumLayers(), opts)
case "ggml", "ggmf", "ggjt", "ggla":
return newLlama(model, adapters, chooseRunners(workDir, "ggml"), opts)
return newLlama(model, adapters, chooseRunners(workDir, "ggml"), ggml.NumLayers(), opts)
default:
return nil, fmt.Errorf("unknown ggml type: %s", ggml.ModelFamily())
}

View File

@@ -4,123 +4,73 @@
set -eu
check_os() {
if [ "$(uname -s)" != "Linux" ]; then
echo "This script is intended to run on Linux only."
exit 1
fi
}
status() { echo ">>> $*" >&2; }
error() { echo "ERROR $*"; exit 1; }
warning() { echo "WARNING: $*"; }
determine_architecture() {
ARCH=$(uname -m)
case $ARCH in
x86_64)
ARCH_SUFFIX="amd64"
;;
aarch64|arm64)
ARCH_SUFFIX="arm64"
;;
*)
echo "Unsupported architecture: $ARCH"
exit 1
;;
esac
}
TEMP_DIR=$(mktemp -d)
cleanup() { rm -rf $TEMP_DIR; }
trap cleanup EXIT
check_sudo() {
if [ "$(id -u)" -ne 0 ]; then
if command -v sudo >/dev/null 2>&1; then
SUDO_CMD="sudo"
echo "Downloading the ollama executable to the PATH, this will require sudo permissions."
else
echo "Error: sudo is not available. Please run as root or install sudo."
exit 1
available() { command -v $1 >/dev/null; }
require() {
local MISSING=''
for TOOL in $*; do
if ! available $TOOL; then
MISSING="$MISSING $TOOL"
fi
else
SUDO_CMD=""
fi
done
echo $MISSING
}
install_cuda_drivers() {
local os_name os_version
if [ -f "/etc/os-release" ]; then
. /etc/os-release
os_name=$ID
os_version=$VERSION_ID
else
echo "Unable to detect operating system. Skipping CUDA installation."
return 1
[ "$(uname -s)" = "Linux" ] || error 'This script is intended to run on Linux only.'
case "$(uname -m)" in
x86_64) ARCH="amd64" ;;
aarch64|arm64) ARCH="arm64" ;;
*) error "Unsupported architecture: $ARCH" ;;
esac
SUDO=
if [ "$(id -u)" -ne 0 ]; then
# Running as root, no need for sudo
if ! available sudo; then
error "This script requires superuser permissions. Please re-run as root."
fi
# based on https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#package-manager-installation
case $os_name in
CentOS)
$SUDO_CMD yum install yum-utils
$SUDO_CMD yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo
$SUDO_CMD yum clean all
$SUDO_CMD yum -y install nvidia-driver-latest-dkms
$SUDO_CMD yum -y install cuda-driver
$SUDO_CMD yum install kernel-devel-$(uname -r) kernel-headers-$(uname -r)
$SUDO_CMD dkms status | awk -F: '/added/ { print $1 }' | xargs -n1 $SUDO_CMD dkms install
$SUDO_CMD modprobe nvidia
;;
ubuntu)
case $os_version in
20.04)
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.1-1_all.deb
;;
22.04)
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
;;
*)
echo "Skipping automatic CUDA installation, not supported for Ubuntu ($os_version)."
return
;;
esac
$SUDO_CMD dpkg -i cuda-keyring_1.1-1_all.deb
$SUDO_CMD apt-get update
$SUDO_CMD apt-get -y install cuda-drivers
;;
RedHatEnterprise*|Kylin|Fedora|SLES|openSUSE*|Microsoft|Debian)
echo "NVIDIA CUDA drivers may not be installed, you can install them from: https://developer.nvidia.com/cuda-downloads"
;;
*)
echo "Unsupported or unknown distribution, skipping GPU CUDA driver install: $os_name"
;;
esac
}
SUDO="sudo"
fi
check_install_cuda_drivers() {
if lspci -d '10de:' | grep 'NVIDIA' >/dev/null; then
# NVIDIA Corporation [10de] device is available
if command -v nvidia-smi >/dev/null 2>&1; then
CUDA_VERSION=$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")
if [ -z "$CUDA_VERSION" ]; then
echo "Warning: NVIDIA-SMI is available, but the CUDA version cannot be detected. Installing CUDA drivers..."
install_cuda_drivers
else
echo "Detected CUDA version $CUDA_VERSION"
fi
else
echo "Warning: NVIDIA GPU detected but NVIDIA-SMI is not available. Installing CUDA drivers..."
install_cuda_drivers
fi
else
echo "No NVIDIA GPU detected. Skipping driver installation."
fi
}
NEEDS=$(require curl awk grep sed tee xargs)
if [ -n "$NEEDS" ]; then
status "ERROR: The following tools are required but missing:"
for NEED in $NEEDS; do
echo " - $NEED"
done
exit 1
fi
download_ollama() {
$SUDO_CMD mkdir -p /usr/bin
$SUDO_CMD curl -fsSL -o /usr/bin/ollama "https://ollama.ai/download/latest/ollama-linux-$ARCH_SUFFIX"
}
status "Downloading ollama..."
$SUDO curl --fail --show-error --location --progress-bar -o $TEMP_DIR/ollama "https://ollama.ai/download/ollama-linux-$ARCH"
status "Installing ollama to /usr/bin..."
$SUDO install -o0 -g0 -m755 -d /usr/bin
$SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama /usr/bin/ollama
install_success() { status 'Install complete. Run "ollama" from the command line.'; }
trap install_success EXIT
# Everything from this point onwards is optional.
configure_systemd() {
if command -v systemctl >/dev/null 2>&1; then
$SUDO_CMD useradd -r -s /bin/false -m -d /home/ollama ollama 2>/dev/null
if ! id ollama >/dev/null 2>&1; then
status "Creating ollama user..."
$SUDO useradd -r -s /bin/false -m -d /usr/share/ollama ollama
fi
echo "Creating systemd service file for ollama..."
cat <<EOF | $SUDO_CMD tee /etc/systemd/system/ollama.service >/dev/null
status "Creating ollama systemd service..."
cat <<EOF | $SUDO tee /etc/systemd/system/ollama.service >/dev/null
[Unit]
Description=Ollama Service
After=network-online.target
@@ -131,30 +81,147 @@ User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="HOME=/home/ollama"
Environment="HOME=/usr/share/ollama"
[Install]
WantedBy=default.target
EOF
echo "Reloading systemd and enabling ollama service..."
if [ "$(systemctl is-system-running || echo 'not running')" = 'running' ]; then
$SUDO_CMD systemctl daemon-reload
$SUDO_CMD systemctl enable ollama
$SUDO_CMD systemctl restart ollama
fi
else
echo "Run 'ollama serve' from the command line to start the service."
fi
SYSTEMCTL_RUNNING="$(systemctl is-system-running || true)"
case $SYSTEMCTL_RUNNING in
running|degraded)
status "Enabling and starting ollama service..."
$SUDO systemctl daemon-reload
$SUDO systemctl enable ollama
$SUDO systemctl restart ollama
;;
esac
}
main() {
check_os
determine_architecture
check_sudo
download_ollama
if available systemctl; then
configure_systemd
check_install_cuda_drivers
echo "Installation complete. You can now run 'ollama' from the command line."
fi
if ! available lspci && ! available lshw; then
warning "Unable to detect NVIDIA GPU. Install lspci or lshw to automatically detect and install NVIDIA CUDA drivers."
exit 0
fi
check_gpu() {
case $1 in
lspci) available lspci && lspci -d '10de:' | grep -q 'NVIDIA' || return 1 ;;
lshw) available lshw && $SUDO lshw -c display -numeric | grep -q 'vendor: .* \[10DE\]' || return 1 ;;
nvidia-smi) available nvidia-smi || return 1 ;;
esac
}
main
if ! check_gpu lspci && ! check_gpu lshw; then
warning "No NVIDIA GPU detected. Ollama will run in CPU-only mode."
exit 0
fi
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-7-centos-7
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-8-rocky-8
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedora
install_cuda_driver_yum() {
status 'Installing NVIDIA repository...'
case $PACKAGE_MANAGER in
yum)
$SUDO $PACKAGE_MANAGER -y install yum-utils
$SUDO $PACKAGE_MANAGER-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-$1$2.repo
;;
dnf)
$SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-$1$2.repo
;;
esac
case $1 in
rhel)
status 'Installing EPEL repository...'
# EPEL is required for third-party dependencies such as dkms and libvdpau
$SUDO $PACKAGE_MANAGER -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$2.noarch.rpm || true
;;
esac
status 'Installing CUDA driver...'
if [ "$1" = 'centos' ] || [ "$1$2" = 'rhel7' ]; then
$SUDO $PACKAGE_MANAGER -y install nvidia-driver-latest-dkms
fi
$SUDO $PACKAGE_MANAGER -y install cuda-drivers
}
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian
install_cuda_driver_apt() {
status 'Installing NVIDIA repository...'
curl -fsSL -o $TEMP_DIR/cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-keyring_1.1-1_all.deb
case $1 in
debian)
status 'Enabling contrib sources...'
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list | sudo tee /etc/apt/sources.list.d/contrib.list > /dev/null
;;
esac
status 'Installing CUDA driver...'
$SUDO dpkg -i $TEMP_DIR/cuda-keyring.deb
$SUDO apt-get update
[ -n "$SUDO" ] && SUDO_E="$SUDO -E" || SUDO_E=
DEBIAN_FRONTEND=noninteractive $SUDO_E apt-get -y install cuda-drivers -q
}
if [ ! -f "/etc/os-release" ]; then
error "Unknown distribution. Skipping CUDA installation."
fi
. /etc/os-release
OS_NAME=$ID
OS_VERSION=$VERSION_ID
PACKAGE_MANAGER=
for PACKAGE_MANAGER in dnf yum apt-get; do
if available $PACKAGE_MANAGER; then
break
fi
done
if [ -z "$PACKAGE_MANAGER" ]; then
error "Unknown package manager. Skipping CUDA installation."
fi
if ! check_gpu nvidia-smi || [ -z "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then
case $OS_NAME in
centos|rhel) install_cuda_driver_yum 'rhel' $OS_VERSION ;;
rocky) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -c1) ;;
fedora) install_cuda_driver_yum $OS_NAME $OS_VERSION ;;
amzn) install_cuda_driver_yum 'fedora' '35' ;;
debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;;
ubuntu) install_cuda_driver_apt $OS_NAME $(echo $OS_VERSION | sed 's/\.//') ;;
*) exit ;;
esac
fi
if ! lsmod | grep -q nvidia; then
KERNEL_RELEASE="$(uname -r)"
case $OS_NAME in
centos|rhel|rocky|fedora|amzn) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE kernel-headers-$KERNEL_RELEASE ;;
debian|ubuntu) $SUDO apt-get -y install linux-headers-$KERNEL_RELEASE ;;
*) exit ;;
esac
NVIDIA_CUDA_VERSION=$($SUDO dkms status | awk -F: '/added/ { print $1 }')
if [ -n "$NVIDIA_CUDA_VERSION" ]; then
$SUDO dkms install $NVIDIA_CUDA_VERSION
fi
if lsmod | grep -q nouveau; then
status "Removing nouveau..."
$SUDO rmmod nouveau
fi
$SUDO modprobe nvidia
fi