2023-01-11 07:51:40 +00:00
|
|
|
{ lib
|
|
|
|
, stdenv
|
|
|
|
, fetchFromGitHub
|
|
|
|
, SDL2
|
|
|
|
, makeWrapper
|
|
|
|
, wget
|
|
|
|
, Accelerate
|
|
|
|
, CoreGraphics
|
2023-07-15 17:15:38 +00:00
|
|
|
, CoreML
|
2023-01-11 07:51:40 +00:00
|
|
|
, CoreVideo
|
2024-01-13 08:15:51 +00:00
|
|
|
, MetalKit
|
2024-01-25 14:12:00 +00:00
|
|
|
|
|
|
|
, config
|
|
|
|
, cudaSupport ? config.cudaSupport
|
|
|
|
, cudaPackages ? {}
|
2023-01-11 07:51:40 +00:00
|
|
|
}:
|
|
|
|
|
2024-01-25 14:12:00 +00:00
|
|
|
let
|
|
|
|
# It's necessary to consistently use backendStdenv when building with CUDA support,
|
|
|
|
# otherwise we get libstdc++ errors downstream.
|
|
|
|
# cuda imposes an upper bound on the gcc version, e.g. the latest gcc compatible with cudaPackages_11 is gcc11
|
|
|
|
effectiveStdenv = if cudaSupport then cudaPackages.backendStdenv else stdenv;
|
|
|
|
in
|
|
|
|
effectiveStdenv.mkDerivation (finalAttrs: {
|
2023-01-11 07:51:40 +00:00
|
|
|
pname = "whisper-cpp";
|
2024-01-25 14:12:00 +00:00
|
|
|
version = "1.5.4";
|
2023-01-11 07:51:40 +00:00
|
|
|
|
|
|
|
src = fetchFromGitHub {
|
|
|
|
owner = "ggerganov";
|
|
|
|
repo = "whisper.cpp";
|
2024-01-25 14:12:00 +00:00
|
|
|
rev = "refs/tags/v${finalAttrs.version}" ;
|
|
|
|
hash = "sha256-9H2Mlua5zx2WNXbz2C5foxIteuBgeCNALdq5bWyhQCk=";
|
2023-01-11 07:51:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
# The upstream download script tries to download the models to the
|
|
|
|
# directory of the script, which is not writable due to being
|
|
|
|
# inside the nix store. This patch changes the script to download
|
|
|
|
# the models to the current directory of where it is being run from.
|
|
|
|
patches = [ ./download-models.patch ];
|
|
|
|
|
2024-01-25 14:12:00 +00:00
|
|
|
nativeBuildInputs = [
|
|
|
|
makeWrapper
|
|
|
|
] ++ lib.optionals cudaSupport ( with cudaPackages ;[
|
|
|
|
cuda_nvcc
|
|
|
|
|
|
|
|
# TODO: Replace with autoAddDriverRunpath
|
|
|
|
# once https://github.com/NixOS/nixpkgs/pull/275241 has been merged
|
|
|
|
autoAddOpenGLRunpathHook
|
|
|
|
]);
|
|
|
|
|
|
|
|
buildInputs = [
|
|
|
|
SDL2
|
|
|
|
] ++ lib.optionals stdenv.isDarwin [
|
|
|
|
Accelerate
|
|
|
|
CoreGraphics
|
|
|
|
CoreML
|
|
|
|
CoreVideo
|
|
|
|
MetalKit
|
|
|
|
] ++ lib.optionals cudaSupport ( with cudaPackages; [
|
2023-01-11 07:51:40 +00:00
|
|
|
|
2024-01-25 14:12:00 +00:00
|
|
|
# A temporary hack for reducing the closure size, remove once cudaPackages
|
|
|
|
# have stopped using lndir: https://github.com/NixOS/nixpkgs/issues/271792
|
|
|
|
cuda_cudart.dev
|
|
|
|
cuda_cudart.lib
|
|
|
|
cuda_cudart.static
|
|
|
|
libcublas.dev
|
|
|
|
libcublas.lib
|
|
|
|
libcublas.static
|
|
|
|
]);
|
|
|
|
|
|
|
|
postPatch = let
|
|
|
|
cudaOldStr = "-lcuda ";
|
|
|
|
cudaNewStr = "-lcuda -L${cudaPackages.cuda_cudart.lib}/lib/stubs ";
|
|
|
|
in lib.optionalString cudaSupport ''
|
|
|
|
substituteInPlace Makefile \
|
|
|
|
--replace '${cudaOldStr}' '${cudaNewStr}'
|
|
|
|
'';
|
2023-07-15 17:15:38 +00:00
|
|
|
|
|
|
|
env = lib.optionalAttrs stdenv.isDarwin {
|
|
|
|
WHISPER_COREML = "1";
|
|
|
|
WHISPER_COREML_ALLOW_FALLBACK = "1";
|
2024-01-25 14:12:00 +00:00
|
|
|
} // lib.optionalAttrs cudaSupport {
|
|
|
|
WHISPER_CUBLAS = "1";
|
2023-07-15 17:15:38 +00:00
|
|
|
};
|
2023-01-11 07:51:40 +00:00
|
|
|
|
2024-01-02 11:29:13 +00:00
|
|
|
makeFlags = [ "main" "stream" "command" ];
|
2023-01-11 07:51:40 +00:00
|
|
|
|
|
|
|
installPhase = ''
|
|
|
|
runHook preInstall
|
|
|
|
|
|
|
|
mkdir -p $out/bin
|
|
|
|
cp ./main $out/bin/whisper-cpp
|
|
|
|
cp ./stream $out/bin/whisper-cpp-stream
|
2024-01-02 11:29:13 +00:00
|
|
|
cp ./command $out/bin/whisper-cpp-command
|
2023-01-11 07:51:40 +00:00
|
|
|
|
|
|
|
cp models/download-ggml-model.sh $out/bin/whisper-cpp-download-ggml-model
|
|
|
|
|
|
|
|
wrapProgram $out/bin/whisper-cpp-download-ggml-model \
|
|
|
|
--prefix PATH : ${lib.makeBinPath [wget]}
|
|
|
|
|
2024-01-13 08:15:51 +00:00
|
|
|
${lib.optionalString stdenv.isDarwin ''
|
|
|
|
install -Dt $out/share/whisper-cpp ggml-metal.metal
|
|
|
|
|
|
|
|
for bin in whisper-cpp whisper-cpp-stream whisper-cpp-command; do
|
|
|
|
wrapProgram $out/bin/$bin \
|
|
|
|
--set-default GGML_METAL_PATH_RESOURCES $out/share/whisper-cpp
|
|
|
|
done
|
|
|
|
''}
|
|
|
|
|
2023-01-11 07:51:40 +00:00
|
|
|
runHook postInstall
|
|
|
|
'';
|
|
|
|
|
|
|
|
meta = with lib; {
|
|
|
|
description = "Port of OpenAI's Whisper model in C/C++";
|
|
|
|
longDescription = ''
|
|
|
|
To download the models as described in the project's readme, you may
|
|
|
|
use the `whisper-cpp-download-ggml-model` binary from this package.
|
|
|
|
'';
|
|
|
|
homepage = "https://github.com/ggerganov/whisper.cpp";
|
|
|
|
license = licenses.mit;
|
2023-03-04 12:14:45 +00:00
|
|
|
platforms = platforms.all;
|
2023-05-24 13:37:59 +00:00
|
|
|
maintainers = with maintainers; [ dit7ya hughobrien ];
|
2023-01-11 07:51:40 +00:00
|
|
|
};
|
2024-01-25 14:12:00 +00:00
|
|
|
})
|