Skip to content

feat: ctxt append API #2339

feat: ctxt append API

feat: ctxt append API #2339

Workflow file for this run

name: Evaluation
on:
push:
branches:
- "main"
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions:
contents: write
jobs:
build:
name: Evaluation of Tactics & Decision Procedures
permissions:
pull-requests: write
# Use expensive self-hosted runner. Reserved for performance benchmarking.
# https://docs.github.com/en/enterprise-cloud@latest/actions/writing-workflows/choosing-where-your-workflow-runs/choosing-the-runner-for-a-job#choosing-self-hosted-runners
runs-on: self-hosted
steps:
- name: Checkout 🛎️
uses: actions/checkout@v3
- name: Install elan 🕑
run: |
set -o pipefail
curl https://raw.githubusercontent.com/leanprover/elan/master/elan-init.sh -sSf | sh -s -- --default-toolchain none -y
~/.elan/bin/lean --version
echo "$HOME/.elan/bin" >> $GITHUB_PATH
- name: Cache `.lake` folder
id: cache-lake
uses: actions/cache@v4
with:
path: .lake
key: ${{ runner.os }}-lake-tools-${{ hashFiles('lake-manifest.json') }}
- name: Install Python Modules
run: |
sudo apt-get update
sudo apt-get install -y python3-matplotlib python3-pandas python3-num2words python3-psutil ripgrep
- name: Get Cache
continue-on-error: true
run: |
lake -R exe cache get # download cache of mathlib docs.
- name: Build
continue-on-error: true
run: |
lake -R build
- name: Run LLVM
continue-on-error: true
run: |
(cd bv-evaluation; python3 ./compare.py instcombine -j48)
- name: Collect data LLVM
continue-on-error: true
run: |
(cd bv-evaluation; python3 ./collect.py instcombine | tee llvm-stats)
- uses: actions/github-script@v6
if: github.event_name == 'pull_request'
with:
script: |
const fs = require('fs')
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: fs.readFileSync('bv-evaluation/llvm-stats', 'utf8')
})
- name: Upload LLVM artifact
uses: actions/upload-artifact@v4
with:
name: LLVM evaluation
path: bv-evaluation/results
- name: Run Alive Symbolic
run: |
(cd bv-evaluation; python3 ./compare-leansat-vs-bitwuzla-alive-sym.py -j48)
- name: Compare Alive All
continue-on-error: true
run: |
(cd bv-evaluation; python3 ./collect-data-alive.py > /dev/null)
- name: Run HDel Symbolic
run: |
(cd bv-evaluation; python3 ./compare-leansat-vs-bitwuzla-hdel-sym.py -j48)
- name: Compare HDEl Symbolic
continue-on-error: true
run: |
(cd bv-evaluation; python3 ./collect-data-hdel-symbolic.py > /dev/null)
- name: Run HDel
run: |
(cd bv-evaluation; python3 ./compare.py hackersdelight -j48)
- name: Collect data HDEl
continue-on-error: true
run: |
(cd bv-evaluation; python3 ./collect.py hackersdelight > /dev/null)
# - name: Run LLVM Symbolic
# continue-on-error: true
# run: |
# (cd bv-evaluation; python3 ./compare-leansat-vs-bitwuzla-llvm-sym.py -j48)
# - name: Compare LLVM Symbolic
# continue-on-error: true
# run: |
# (cd bv-evaluation; python3 ./collect-data-llvm-symbolic.py > /dev/null)
- name: Collect Stats
continue-on-error: true
run: |
(cd bv-evaluation/tools; python3 ./collect-stats-bv-decide.py)