mirror of
https://github.com/ggerganov/llama.cpp
synced 2026-03-15 11:40:50 +01:00
* tests: add end-to-end tests per model architecture * fixup for rebase * fix use-after-free in llama-model-loader.cpp * fix CI * fix WebGPU * fix CI * disable CI for macOS-latest-cmake-arm64 * use expert_weights_scale only if != 0.0f * comments
20 lines
615 B
Bash
Executable File
20 lines
615 B
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
if [ $# -lt 2 ]; then
|
|
echo "usage: ./scripts/git-bisect.sh <commit_bad> <commit_good> [additional arguments]"
|
|
echo " additional arguments: passed to CMake if they start with \"-D\", to llama-results otherwise"
|
|
exit 1
|
|
fi
|
|
|
|
set -e
|
|
set -x
|
|
|
|
commit_bad=$1
|
|
commit_good=$2
|
|
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
|
git checkout ${commit_good}
|
|
${script_dir}/git-bisect-run.sh --output results.gguf "${@:3}"
|
|
git bisect start ${commit_bad} ${commit_good}
|
|
git bisect run ${script_dir}/git-bisect-run.sh --output results.gguf --check "${@:3}"
|
|
git bisect reset
|