trec-covid
0.595
0.109
0.656
0.114
0.727
0.128
0.596
0.091
0.781
0.141
0.818
0.159
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-trec-covid.flat \
--topics beir-v1.0.0-trec-covid-test \
--output run.beir.bm25-flat.trec-covid.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir.bm25-flat.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir.bm25-flat.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir.bm25-flat.trec-covid.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-trec-covid.multifield \
--topics beir-v1.0.0-trec-covid-test \
--output run.beir.bm25-multifield.trec-covid.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir.bm25-multifield.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir.bm25-multifield.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir.bm25-multifield.trec-covid.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-trec-covid.splade-pp-ed \
--topics beir-v1.0.0-trec-covid.test.splade-pp-ed \
--output run.beir.splade-pp-ed.trec-covid.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir.splade-pp-ed.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir.splade-pp-ed.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir.splade-pp-ed.trec-covid.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-trec-covid.contriever-msmarco \
--topics beir-v1.0.0-trec-covid-test \
--output run.beir.contriever-msmarco.trec-covid.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir.contriever-msmarco.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir.contriever-msmarco.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir.contriever-msmarco.trec-covid.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-trec-covid.bge-base-en-v1.5 \
--topics beir-v1.0.0-trec-covid-test \
--output run.beir.bge-base-en-v1.5.faiss.trec-covid.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir.bge-base-en-v1.5.faiss.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir.bge-base-en-v1.5.faiss.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir.bge-base-en-v1.5.faiss.trec-covid.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-trec-covid.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-trec-covid-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-trec-covid-test \
--output run.beir.cohere-embed-english-v3.0.trec-covid.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
run.beir.cohere-embed-english-v3.0.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-covid-test \
run.beir.cohere-embed-english-v3.0.trec-covid.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-covid-test \
run.beir.cohere-embed-english-v3.0.trec-covid.txt
Copy
bioasq
0.522
0.769
0.465
0.715
0.498
0.739
0.383
0.607
0.415
0.632
0.457
0.679
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-bioasq.flat \
--topics beir-v1.0.0-bioasq-test \
--output run.beir.bm25-flat.bioasq.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir.bm25-flat.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir.bm25-flat.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir.bm25-flat.bioasq.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-bioasq.multifield \
--topics beir-v1.0.0-bioasq-test \
--output run.beir.bm25-multifield.bioasq.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir.bm25-multifield.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir.bm25-multifield.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir.bm25-multifield.bioasq.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-bioasq.splade-pp-ed \
--topics beir-v1.0.0-bioasq.test.splade-pp-ed \
--output run.beir.splade-pp-ed.bioasq.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir.splade-pp-ed.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir.splade-pp-ed.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir.splade-pp-ed.bioasq.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-bioasq.contriever-msmarco \
--topics beir-v1.0.0-bioasq-test \
--output run.beir.contriever-msmarco.bioasq.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir.contriever-msmarco.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir.contriever-msmarco.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir.contriever-msmarco.bioasq.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-bioasq.bge-base-en-v1.5 \
--topics beir-v1.0.0-bioasq-test \
--output run.beir.bge-base-en-v1.5.faiss.bioasq.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir.bge-base-en-v1.5.faiss.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir.bge-base-en-v1.5.faiss.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir.bge-base-en-v1.5.faiss.bioasq.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-bioasq.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-bioasq-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-bioasq-test \
--output run.beir.cohere-embed-english-v3.0.bioasq.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
run.beir.cohere-embed-english-v3.0.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-bioasq-test \
run.beir.cohere-embed-english-v3.0.bioasq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-bioasq-test \
run.beir.cohere-embed-english-v3.0.bioasq.txt
Copy
nfcorpus
0.322
0.246
0.325
0.250
0.347
0.284
0.328
0.301
0.373
0.337
0.386
0.351
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-nfcorpus.flat \
--topics beir-v1.0.0-nfcorpus-test \
--output run.beir.bm25-flat.nfcorpus.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir.bm25-flat.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir.bm25-flat.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir.bm25-flat.nfcorpus.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-nfcorpus.multifield \
--topics beir-v1.0.0-nfcorpus-test \
--output run.beir.bm25-multifield.nfcorpus.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir.bm25-multifield.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir.bm25-multifield.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir.bm25-multifield.nfcorpus.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-nfcorpus.splade-pp-ed \
--topics beir-v1.0.0-nfcorpus.test.splade-pp-ed \
--output run.beir.splade-pp-ed.nfcorpus.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir.splade-pp-ed.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir.splade-pp-ed.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir.splade-pp-ed.nfcorpus.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-nfcorpus.contriever-msmarco \
--topics beir-v1.0.0-nfcorpus-test \
--output run.beir.contriever-msmarco.nfcorpus.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir.contriever-msmarco.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir.contriever-msmarco.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir.contriever-msmarco.nfcorpus.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-nfcorpus.bge-base-en-v1.5 \
--topics beir-v1.0.0-nfcorpus-test \
--output run.beir.bge-base-en-v1.5.faiss.nfcorpus.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir.bge-base-en-v1.5.faiss.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir.bge-base-en-v1.5.faiss.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir.bge-base-en-v1.5.faiss.nfcorpus.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-nfcorpus.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-nfcorpus-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-nfcorpus-test \
--output run.beir.cohere-embed-english-v3.0.nfcorpus.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
run.beir.cohere-embed-english-v3.0.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nfcorpus-test \
run.beir.cohere-embed-english-v3.0.nfcorpus.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nfcorpus-test \
run.beir.cohere-embed-english-v3.0.nfcorpus.txt
Copy
nq
0.305
0.751
0.329
0.760
0.538
0.930
0.498
0.925
0.541
0.942
0.616
0.956
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-nq.flat \
--topics beir-v1.0.0-nq-test \
--output run.beir.bm25-flat.nq.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir.bm25-flat.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir.bm25-flat.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir.bm25-flat.nq.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-nq.multifield \
--topics beir-v1.0.0-nq-test \
--output run.beir.bm25-multifield.nq.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir.bm25-multifield.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir.bm25-multifield.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir.bm25-multifield.nq.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-nq.splade-pp-ed \
--topics beir-v1.0.0-nq.test.splade-pp-ed \
--output run.beir.splade-pp-ed.nq.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir.splade-pp-ed.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir.splade-pp-ed.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir.splade-pp-ed.nq.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-nq.contriever-msmarco \
--topics beir-v1.0.0-nq-test \
--output run.beir.contriever-msmarco.nq.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir.contriever-msmarco.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir.contriever-msmarco.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir.contriever-msmarco.nq.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-nq.bge-base-en-v1.5 \
--topics beir-v1.0.0-nq-test \
--output run.beir.bge-base-en-v1.5.faiss.nq.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir.bge-base-en-v1.5.faiss.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir.bge-base-en-v1.5.faiss.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir.bge-base-en-v1.5.faiss.nq.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-nq.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-nq-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-nq-test \
--output run.beir.cohere-embed-english-v3.0.nq.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-nq-test \
run.beir.cohere-embed-english-v3.0.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-nq-test \
run.beir.cohere-embed-english-v3.0.nq.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-nq-test \
run.beir.cohere-embed-english-v3.0.nq.txt
Copy
hotpotqa
0.633
0.796
0.603
0.740
0.687
0.818
0.638
0.777
0.726
0.873
0.707
0.823
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-hotpotqa.flat \
--topics beir-v1.0.0-hotpotqa-test \
--output run.beir.bm25-flat.hotpotqa.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir.bm25-flat.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir.bm25-flat.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir.bm25-flat.hotpotqa.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-hotpotqa.multifield \
--topics beir-v1.0.0-hotpotqa-test \
--output run.beir.bm25-multifield.hotpotqa.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir.bm25-multifield.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir.bm25-multifield.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir.bm25-multifield.hotpotqa.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-hotpotqa.splade-pp-ed \
--topics beir-v1.0.0-hotpotqa.test.splade-pp-ed \
--output run.beir.splade-pp-ed.hotpotqa.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir.splade-pp-ed.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir.splade-pp-ed.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir.splade-pp-ed.hotpotqa.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-hotpotqa.contriever-msmarco \
--topics beir-v1.0.0-hotpotqa-test \
--output run.beir.contriever-msmarco.hotpotqa.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir.contriever-msmarco.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir.contriever-msmarco.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir.contriever-msmarco.hotpotqa.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-hotpotqa.bge-base-en-v1.5 \
--topics beir-v1.0.0-hotpotqa-test \
--output run.beir.bge-base-en-v1.5.faiss.hotpotqa.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir.bge-base-en-v1.5.faiss.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir.bge-base-en-v1.5.faiss.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir.bge-base-en-v1.5.faiss.hotpotqa.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-hotpotqa.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-hotpotqa-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-hotpotqa-test \
--output run.beir.cohere-embed-english-v3.0.hotpotqa.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
run.beir.cohere-embed-english-v3.0.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-hotpotqa-test \
run.beir.cohere-embed-english-v3.0.hotpotqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-hotpotqa-test \
run.beir.cohere-embed-english-v3.0.hotpotqa.txt
Copy
fiqa
0.236
0.539
0.236
0.539
0.347
0.631
0.329
0.656
0.406
0.742
0.421
0.736
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-fiqa.flat \
--topics beir-v1.0.0-fiqa-test \
--output run.beir.bm25-flat.fiqa.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir.bm25-flat.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir.bm25-flat.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir.bm25-flat.fiqa.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-fiqa.multifield \
--topics beir-v1.0.0-fiqa-test \
--output run.beir.bm25-multifield.fiqa.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir.bm25-multifield.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir.bm25-multifield.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir.bm25-multifield.fiqa.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-fiqa.splade-pp-ed \
--topics beir-v1.0.0-fiqa.test.splade-pp-ed \
--output run.beir.splade-pp-ed.fiqa.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir.splade-pp-ed.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir.splade-pp-ed.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir.splade-pp-ed.fiqa.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-fiqa.contriever-msmarco \
--topics beir-v1.0.0-fiqa-test \
--output run.beir.contriever-msmarco.fiqa.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir.contriever-msmarco.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir.contriever-msmarco.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir.contriever-msmarco.fiqa.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-fiqa.bge-base-en-v1.5 \
--topics beir-v1.0.0-fiqa-test \
--output run.beir.bge-base-en-v1.5.faiss.fiqa.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir.bge-base-en-v1.5.faiss.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir.bge-base-en-v1.5.faiss.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir.bge-base-en-v1.5.faiss.fiqa.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-fiqa.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-fiqa-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-fiqa-test \
--output run.beir.cohere-embed-english-v3.0.fiqa.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
run.beir.cohere-embed-english-v3.0.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fiqa-test \
run.beir.cohere-embed-english-v3.0.fiqa.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fiqa-test \
run.beir.cohere-embed-english-v3.0.fiqa.txt
Copy
signal1m
0.330
0.370
0.330
0.370
0.301
0.340
0.278
0.322
0.289
0.311
0.263
0.283
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-signal1m.flat \
--topics beir-v1.0.0-signal1m-test \
--output run.beir.bm25-flat.signal1m.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir.bm25-flat.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir.bm25-flat.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir.bm25-flat.signal1m.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-signal1m.multifield \
--topics beir-v1.0.0-signal1m-test \
--output run.beir.bm25-multifield.signal1m.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir.bm25-multifield.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir.bm25-multifield.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir.bm25-multifield.signal1m.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-signal1m.splade-pp-ed \
--topics beir-v1.0.0-signal1m.test.splade-pp-ed \
--output run.beir.splade-pp-ed.signal1m.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir.splade-pp-ed.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir.splade-pp-ed.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir.splade-pp-ed.signal1m.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-signal1m.contriever-msmarco \
--topics beir-v1.0.0-signal1m-test \
--output run.beir.contriever-msmarco.signal1m.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir.contriever-msmarco.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir.contriever-msmarco.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir.contriever-msmarco.signal1m.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-signal1m.bge-base-en-v1.5 \
--topics beir-v1.0.0-signal1m-test \
--output run.beir.bge-base-en-v1.5.faiss.signal1m.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir.bge-base-en-v1.5.faiss.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir.bge-base-en-v1.5.faiss.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir.bge-base-en-v1.5.faiss.signal1m.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-signal1m.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-signal1m-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-signal1m-test \
--output run.beir.cohere-embed-english-v3.0.signal1m.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
run.beir.cohere-embed-english-v3.0.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-signal1m-test \
run.beir.cohere-embed-english-v3.0.signal1m.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-signal1m-test \
run.beir.cohere-embed-english-v3.0.signal1m.txt
Copy
trec-news
0.395
0.447
0.398
0.422
0.415
0.441
0.428
0.492
0.442
0.499
0.504
0.543
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-trec-news.flat \
--topics beir-v1.0.0-trec-news-test \
--output run.beir.bm25-flat.trec-news.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir.bm25-flat.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir.bm25-flat.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir.bm25-flat.trec-news.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-trec-news.multifield \
--topics beir-v1.0.0-trec-news-test \
--output run.beir.bm25-multifield.trec-news.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir.bm25-multifield.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir.bm25-multifield.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir.bm25-multifield.trec-news.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-trec-news.splade-pp-ed \
--topics beir-v1.0.0-trec-news.test.splade-pp-ed \
--output run.beir.splade-pp-ed.trec-news.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir.splade-pp-ed.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir.splade-pp-ed.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir.splade-pp-ed.trec-news.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-trec-news.contriever-msmarco \
--topics beir-v1.0.0-trec-news-test \
--output run.beir.contriever-msmarco.trec-news.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir.contriever-msmarco.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir.contriever-msmarco.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir.contriever-msmarco.trec-news.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-trec-news.bge-base-en-v1.5 \
--topics beir-v1.0.0-trec-news-test \
--output run.beir.bge-base-en-v1.5.faiss.trec-news.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir.bge-base-en-v1.5.faiss.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir.bge-base-en-v1.5.faiss.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir.bge-base-en-v1.5.faiss.trec-news.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-trec-news.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-trec-news-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-trec-news-test \
--output run.beir.cohere-embed-english-v3.0.trec-news.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
run.beir.cohere-embed-english-v3.0.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-trec-news-test \
run.beir.cohere-embed-english-v3.0.trec-news.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-trec-news-test \
run.beir.cohere-embed-english-v3.0.trec-news.txt
Copy
robust04
0.407
0.375
0.407
0.375
0.468
0.385
0.473
0.392
0.444
0.351
0.541
0.417
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-robust04.flat \
--topics beir-v1.0.0-robust04-test \
--output run.beir.bm25-flat.robust04.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir.bm25-flat.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir.bm25-flat.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir.bm25-flat.robust04.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-robust04.multifield \
--topics beir-v1.0.0-robust04-test \
--output run.beir.bm25-multifield.robust04.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir.bm25-multifield.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir.bm25-multifield.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir.bm25-multifield.robust04.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-robust04.splade-pp-ed \
--topics beir-v1.0.0-robust04.test.splade-pp-ed \
--output run.beir.splade-pp-ed.robust04.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir.splade-pp-ed.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir.splade-pp-ed.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir.splade-pp-ed.robust04.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-robust04.contriever-msmarco \
--topics beir-v1.0.0-robust04-test \
--output run.beir.contriever-msmarco.robust04.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir.contriever-msmarco.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir.contriever-msmarco.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir.contriever-msmarco.robust04.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-robust04.bge-base-en-v1.5 \
--topics beir-v1.0.0-robust04-test \
--output run.beir.bge-base-en-v1.5.faiss.robust04.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir.bge-base-en-v1.5.faiss.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir.bge-base-en-v1.5.faiss.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir.bge-base-en-v1.5.faiss.robust04.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-robust04.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-robust04-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-robust04-test \
--output run.beir.cohere-embed-english-v3.0.robust04.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
run.beir.cohere-embed-english-v3.0.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-robust04-test \
run.beir.cohere-embed-english-v3.0.robust04.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-robust04-test \
run.beir.cohere-embed-english-v3.0.robust04.txt
Copy
arguana
0.397
0.932
0.414
0.943
0.520
0.974
0.446
0.977
0.636
0.992
0.540
0.982
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-arguana.flat \
--topics beir-v1.0.0-arguana-test \
--output run.beir.bm25-flat.arguana.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir.bm25-flat.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir.bm25-flat.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir.bm25-flat.arguana.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-arguana.multifield \
--topics beir-v1.0.0-arguana-test \
--output run.beir.bm25-multifield.arguana.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir.bm25-multifield.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir.bm25-multifield.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir.bm25-multifield.arguana.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-arguana.splade-pp-ed \
--topics beir-v1.0.0-arguana.test.splade-pp-ed \
--output run.beir.splade-pp-ed.arguana.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir.splade-pp-ed.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir.splade-pp-ed.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir.splade-pp-ed.arguana.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-arguana.contriever-msmarco \
--topics beir-v1.0.0-arguana-test \
--output run.beir.contriever-msmarco.arguana.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir.contriever-msmarco.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir.contriever-msmarco.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir.contriever-msmarco.arguana.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "" \
--index beir-v1.0.0-arguana.bge-base-en-v1.5 \
--topics beir-v1.0.0-arguana-test \
--output run.beir.bge-base-en-v1.5.faiss.arguana.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir.bge-base-en-v1.5.faiss.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir.bge-base-en-v1.5.faiss.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir.bge-base-en-v1.5.faiss.arguana.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-arguana.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-arguana-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-arguana-test \
--output run.beir.cohere-embed-english-v3.0.arguana.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
run.beir.cohere-embed-english-v3.0.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-arguana-test \
run.beir.cohere-embed-english-v3.0.arguana.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-arguana-test \
run.beir.cohere-embed-english-v3.0.arguana.txt
Copy
webis-touche2020
0.442
0.582
0.367
0.538
0.247
0.471
0.204
0.442
0.257
0.487
0.326
0.516
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-webis-touche2020.flat \
--topics beir-v1.0.0-webis-touche2020-test \
--output run.beir.bm25-flat.webis-touche2020.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir.bm25-flat.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir.bm25-flat.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir.bm25-flat.webis-touche2020.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-webis-touche2020.multifield \
--topics beir-v1.0.0-webis-touche2020-test \
--output run.beir.bm25-multifield.webis-touche2020.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir.bm25-multifield.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir.bm25-multifield.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir.bm25-multifield.webis-touche2020.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-webis-touche2020.splade-pp-ed \
--topics beir-v1.0.0-webis-touche2020.test.splade-pp-ed \
--output run.beir.splade-pp-ed.webis-touche2020.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir.splade-pp-ed.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir.splade-pp-ed.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir.splade-pp-ed.webis-touche2020.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-webis-touche2020.contriever-msmarco \
--topics beir-v1.0.0-webis-touche2020-test \
--output run.beir.contriever-msmarco.webis-touche2020.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir.contriever-msmarco.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir.contriever-msmarco.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir.contriever-msmarco.webis-touche2020.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-webis-touche2020.bge-base-en-v1.5 \
--topics beir-v1.0.0-webis-touche2020-test \
--output run.beir.bge-base-en-v1.5.faiss.webis-touche2020.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir.bge-base-en-v1.5.faiss.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir.bge-base-en-v1.5.faiss.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir.bge-base-en-v1.5.faiss.webis-touche2020.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-webis-touche2020.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-webis-touche2020-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-webis-touche2020-test \
--output run.beir.cohere-embed-english-v3.0.webis-touche2020.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
run.beir.cohere-embed-english-v3.0.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-webis-touche2020-test \
run.beir.cohere-embed-english-v3.0.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
run.beir.cohere-embed-english-v3.0.webis-touche2020.txt
Copy
cqadupstack (average)
0.302
0.580
0.299
0.606
0.334
0.650
0.345
0.663
0.424
0.762
0.415
0.745
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-android.flat \
--topics beir-v1.0.0-cqadupstack-android-test \
--output run.beir.bm25-flat.cqadupstack-android.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-english.flat \
--topics beir-v1.0.0-cqadupstack-english-test \
--output run.beir.bm25-flat.cqadupstack-english.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-gaming.flat \
--topics beir-v1.0.0-cqadupstack-gaming-test \
--output run.beir.bm25-flat.cqadupstack-gaming.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-gis.flat \
--topics beir-v1.0.0-cqadupstack-gis-test \
--output run.beir.bm25-flat.cqadupstack-gis.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-mathematica.flat \
--topics beir-v1.0.0-cqadupstack-mathematica-test \
--output run.beir.bm25-flat.cqadupstack-mathematica.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-physics.flat \
--topics beir-v1.0.0-cqadupstack-physics-test \
--output run.beir.bm25-flat.cqadupstack-physics.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-programmers.flat \
--topics beir-v1.0.0-cqadupstack-programmers-test \
--output run.beir.bm25-flat.cqadupstack-programmers.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-stats.flat \
--topics beir-v1.0.0-cqadupstack-stats-test \
--output run.beir.bm25-flat.cqadupstack-stats.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-tex.flat \
--topics beir-v1.0.0-cqadupstack-tex-test \
--output run.beir.bm25-flat.cqadupstack-tex.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-unix.flat \
--topics beir-v1.0.0-cqadupstack-unix-test \
--output run.beir.bm25-flat.cqadupstack-unix.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-webmasters.flat \
--topics beir-v1.0.0-cqadupstack-webmasters-test \
--output run.beir.bm25-flat.cqadupstack-webmasters.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-wordpress.flat \
--topics beir-v1.0.0-cqadupstack-wordpress-test \
--output run.beir.bm25-flat.cqadupstack-wordpress.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-flat.cqadupstack-wordpress.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-android.multifield \
--topics beir-v1.0.0-cqadupstack-android-test \
--output run.beir.bm25-multifield.cqadupstack-android.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-english.multifield \
--topics beir-v1.0.0-cqadupstack-english-test \
--output run.beir.bm25-multifield.cqadupstack-english.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-gaming.multifield \
--topics beir-v1.0.0-cqadupstack-gaming-test \
--output run.beir.bm25-multifield.cqadupstack-gaming.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-gis.multifield \
--topics beir-v1.0.0-cqadupstack-gis-test \
--output run.beir.bm25-multifield.cqadupstack-gis.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-mathematica.multifield \
--topics beir-v1.0.0-cqadupstack-mathematica-test \
--output run.beir.bm25-multifield.cqadupstack-mathematica.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-physics.multifield \
--topics beir-v1.0.0-cqadupstack-physics-test \
--output run.beir.bm25-multifield.cqadupstack-physics.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-programmers.multifield \
--topics beir-v1.0.0-cqadupstack-programmers-test \
--output run.beir.bm25-multifield.cqadupstack-programmers.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-stats.multifield \
--topics beir-v1.0.0-cqadupstack-stats-test \
--output run.beir.bm25-multifield.cqadupstack-stats.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-tex.multifield \
--topics beir-v1.0.0-cqadupstack-tex-test \
--output run.beir.bm25-multifield.cqadupstack-tex.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-unix.multifield \
--topics beir-v1.0.0-cqadupstack-unix-test \
--output run.beir.bm25-multifield.cqadupstack-unix.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-webmasters.multifield \
--topics beir-v1.0.0-cqadupstack-webmasters-test \
--output run.beir.bm25-multifield.cqadupstack-webmasters.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-wordpress.multifield \
--topics beir-v1.0.0-cqadupstack-wordpress-test \
--output run.beir.bm25-multifield.cqadupstack-wordpress.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir.bm25-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir.bm25-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bm25-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bm25-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bm25-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bm25-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bm25-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bm25-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bm25-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bm25-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bm25-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bm25-multifield.cqadupstack-wordpress.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-android.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-android.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-android.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-english.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-english.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-english.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-gaming.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-gaming.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-gaming.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-gis.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-gis.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-gis.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-mathematica.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-mathematica.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-mathematica.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-physics.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-physics.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-physics.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-programmers.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-programmers.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-programmers.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-stats.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-stats.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-stats.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-tex.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-tex.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-tex.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-unix.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-unix.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-unix.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-webmasters.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-webmasters.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-webmasters.txt \
--output-format trec \
--hits 1000 --impact --remove-query
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-cqadupstack-wordpress.splade-pp-ed \
--topics beir-v1.0.0-cqadupstack-wordpress.test.splade-pp-ed \
--output run.beir.splade-pp-ed.cqadupstack-wordpress.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.splade-pp-ed.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.splade-pp-ed.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.splade-pp-ed.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.splade-pp-ed.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.splade-pp-ed.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir.splade-pp-ed.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.splade-pp-ed.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.splade-pp-ed.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.splade-pp-ed.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.splade-pp-ed.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.splade-pp-ed.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir.splade-pp-ed.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.splade-pp-ed.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.splade-pp-ed.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.splade-pp-ed.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.splade-pp-ed.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.splade-pp-ed.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.splade-pp-ed.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.splade-pp-ed.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.splade-pp-ed.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.splade-pp-ed.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.splade-pp-ed.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.splade-pp-ed.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir.splade-pp-ed.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.splade-pp-ed.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.splade-pp-ed.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.splade-pp-ed.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.splade-pp-ed.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.splade-pp-ed.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.splade-pp-ed.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.splade-pp-ed.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.splade-pp-ed.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.splade-pp-ed.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.splade-pp-ed.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.splade-pp-ed.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir.splade-pp-ed.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.splade-pp-ed.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.splade-pp-ed.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.splade-pp-ed.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.splade-pp-ed.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.splade-pp-ed.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.splade-pp-ed.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.splade-pp-ed.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.splade-pp-ed.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.splade-pp-ed.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.splade-pp-ed.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.splade-pp-ed.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir.splade-pp-ed.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.splade-pp-ed.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.splade-pp-ed.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.splade-pp-ed.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.splade-pp-ed.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.splade-pp-ed.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir.splade-pp-ed.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.splade-pp-ed.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.splade-pp-ed.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.splade-pp-ed.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.splade-pp-ed.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.splade-pp-ed.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir.splade-pp-ed.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.splade-pp-ed.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.splade-pp-ed.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.splade-pp-ed.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.splade-pp-ed.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.splade-pp-ed.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.splade-pp-ed.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.splade-pp-ed.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.splade-pp-ed.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.splade-pp-ed.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.splade-pp-ed.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.splade-pp-ed.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.splade-pp-ed.cqadupstack-wordpress.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-android.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-android-test \
--output run.beir.contriever-msmarco.cqadupstack-android.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-english.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-english-test \
--output run.beir.contriever-msmarco.cqadupstack-english.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-gaming.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-gaming-test \
--output run.beir.contriever-msmarco.cqadupstack-gaming.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-gis.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-gis-test \
--output run.beir.contriever-msmarco.cqadupstack-gis.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-mathematica.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-mathematica-test \
--output run.beir.contriever-msmarco.cqadupstack-mathematica.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-physics.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-physics-test \
--output run.beir.contriever-msmarco.cqadupstack-physics.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-programmers.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-programmers-test \
--output run.beir.contriever-msmarco.cqadupstack-programmers.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-stats.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-stats-test \
--output run.beir.contriever-msmarco.cqadupstack-stats.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-tex.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-tex-test \
--output run.beir.contriever-msmarco.cqadupstack-tex.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-unix.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-unix-test \
--output run.beir.contriever-msmarco.cqadupstack-unix.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-webmasters.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-webmasters-test \
--output run.beir.contriever-msmarco.cqadupstack-webmasters.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-cqadupstack-wordpress.contriever-msmarco \
--topics beir-v1.0.0-cqadupstack-wordpress-test \
--output run.beir.contriever-msmarco.cqadupstack-wordpress.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.contriever-msmarco.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.contriever-msmarco.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.contriever-msmarco.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.contriever-msmarco.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.contriever-msmarco.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir.contriever-msmarco.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.contriever-msmarco.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.contriever-msmarco.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.contriever-msmarco.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.contriever-msmarco.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.contriever-msmarco.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir.contriever-msmarco.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.contriever-msmarco.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.contriever-msmarco.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.contriever-msmarco.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.contriever-msmarco.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.contriever-msmarco.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.contriever-msmarco.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.contriever-msmarco.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.contriever-msmarco.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.contriever-msmarco.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.contriever-msmarco.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.contriever-msmarco.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir.contriever-msmarco.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.contriever-msmarco.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.contriever-msmarco.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.contriever-msmarco.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.contriever-msmarco.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.contriever-msmarco.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.contriever-msmarco.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.contriever-msmarco.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.contriever-msmarco.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.contriever-msmarco.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.contriever-msmarco.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.contriever-msmarco.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir.contriever-msmarco.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.contriever-msmarco.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.contriever-msmarco.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.contriever-msmarco.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.contriever-msmarco.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.contriever-msmarco.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.contriever-msmarco.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.contriever-msmarco.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.contriever-msmarco.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.contriever-msmarco.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.contriever-msmarco.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.contriever-msmarco.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir.contriever-msmarco.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.contriever-msmarco.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.contriever-msmarco.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.contriever-msmarco.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.contriever-msmarco.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.contriever-msmarco.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir.contriever-msmarco.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.contriever-msmarco.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.contriever-msmarco.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.contriever-msmarco.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.contriever-msmarco.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.contriever-msmarco.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir.contriever-msmarco.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.contriever-msmarco.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.contriever-msmarco.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.contriever-msmarco.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.contriever-msmarco.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.contriever-msmarco.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.contriever-msmarco.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.contriever-msmarco.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.contriever-msmarco.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.contriever-msmarco.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.contriever-msmarco.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.contriever-msmarco.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.contriever-msmarco.cqadupstack-wordpress.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-android.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-android-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-android.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-english.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-english-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-english.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-gaming.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-gaming-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-gaming.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-gis.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-gis-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-gis.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-mathematica.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-mathematica-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-mathematica.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-physics.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-physics-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-physics.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-programmers.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-programmers-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-programmers.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-stats.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-stats-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-stats.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-tex.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-tex-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-tex.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-unix.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-unix-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-unix.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-webmasters.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-webmasters-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-webmasters.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-cqadupstack-wordpress.bge-base-en-v1.5 \
--topics beir-v1.0.0-cqadupstack-wordpress-test \
--output run.beir.bge-base-en-v1.5.faiss.cqadupstack-wordpress.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.bge-base-en-v1.5.faiss.cqadupstack-wordpress.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-android.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-android-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-android-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-android.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-english.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-english-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-english-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-english.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-gaming.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-gaming-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-gaming-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-gaming.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-gis.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-gis-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-gis-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-gis.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-mathematica.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-mathematica-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-mathematica-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-mathematica.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-physics.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-physics-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-physics-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-physics.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-programmers.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-programmers-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-programmers-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-programmers.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-stats.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-stats-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-stats-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-stats.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-tex.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-tex-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-tex-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-tex.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-unix.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-unix-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-unix-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-unix.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-webmasters.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-webmasters-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-webmasters-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-webmasters.txt \
--hits 1000 --remove-query
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-cqadupstack-wordpress.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-cqadupstack-wordpress-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-cqadupstack-wordpress-test \
--output run.beir.cohere-embed-english-v3.0.cqadupstack-wordpress.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
run.beir.cohere-embed-english-v3.0.cqadupstack-wordpress.txt
Copy
quora
0.789
0.973
0.789
0.973
0.834
0.986
0.865
0.994
0.889
0.997
0.887
0.996
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-quora.flat \
--topics beir-v1.0.0-quora-test \
--output run.beir.bm25-flat.quora.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir.bm25-flat.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir.bm25-flat.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir.bm25-flat.quora.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-quora.multifield \
--topics beir-v1.0.0-quora-test \
--output run.beir.bm25-multifield.quora.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir.bm25-multifield.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir.bm25-multifield.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir.bm25-multifield.quora.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-quora.splade-pp-ed \
--topics beir-v1.0.0-quora.test.splade-pp-ed \
--output run.beir.splade-pp-ed.quora.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir.splade-pp-ed.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir.splade-pp-ed.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir.splade-pp-ed.quora.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-quora.contriever-msmarco \
--topics beir-v1.0.0-quora-test \
--output run.beir.contriever-msmarco.quora.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir.contriever-msmarco.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir.contriever-msmarco.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir.contriever-msmarco.quora.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "" \
--index beir-v1.0.0-quora.bge-base-en-v1.5 \
--topics beir-v1.0.0-quora-test \
--output run.beir.bge-base-en-v1.5.faiss.quora.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir.bge-base-en-v1.5.faiss.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir.bge-base-en-v1.5.faiss.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir.bge-base-en-v1.5.faiss.quora.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-quora.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-quora-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-quora-test \
--output run.beir.cohere-embed-english-v3.0.quora.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-quora-test \
run.beir.cohere-embed-english-v3.0.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-quora-test \
run.beir.cohere-embed-english-v3.0.quora.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-quora-test \
run.beir.cohere-embed-english-v3.0.quora.txt
Copy
dbpedia-entity
0.318
0.468
0.313
0.398
0.437
0.562
0.413
0.541
0.407
0.530
0.434
0.536
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-dbpedia-entity.flat \
--topics beir-v1.0.0-dbpedia-entity-test \
--output run.beir.bm25-flat.dbpedia-entity.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir.bm25-flat.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir.bm25-flat.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir.bm25-flat.dbpedia-entity.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-dbpedia-entity.multifield \
--topics beir-v1.0.0-dbpedia-entity-test \
--output run.beir.bm25-multifield.dbpedia-entity.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir.bm25-multifield.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir.bm25-multifield.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir.bm25-multifield.dbpedia-entity.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-dbpedia-entity.splade-pp-ed \
--topics beir-v1.0.0-dbpedia-entity.test.splade-pp-ed \
--output run.beir.splade-pp-ed.dbpedia-entity.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir.splade-pp-ed.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir.splade-pp-ed.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir.splade-pp-ed.dbpedia-entity.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-dbpedia-entity.contriever-msmarco \
--topics beir-v1.0.0-dbpedia-entity-test \
--output run.beir.contriever-msmarco.dbpedia-entity.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir.contriever-msmarco.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir.contriever-msmarco.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir.contriever-msmarco.dbpedia-entity.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-dbpedia-entity.bge-base-en-v1.5 \
--topics beir-v1.0.0-dbpedia-entity-test \
--output run.beir.bge-base-en-v1.5.faiss.dbpedia-entity.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir.bge-base-en-v1.5.faiss.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir.bge-base-en-v1.5.faiss.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir.bge-base-en-v1.5.faiss.dbpedia-entity.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-dbpedia-entity.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-dbpedia-entity-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-dbpedia-entity-test \
--output run.beir.cohere-embed-english-v3.0.dbpedia-entity.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
run.beir.cohere-embed-english-v3.0.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
run.beir.cohere-embed-english-v3.0.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
run.beir.cohere-embed-english-v3.0.dbpedia-entity.txt
Copy
scidocs
0.149
0.348
0.158
0.356
0.159
0.373
0.165
0.378
0.217
0.496
0.203
0.451
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-scidocs.flat \
--topics beir-v1.0.0-scidocs-test \
--output run.beir.bm25-flat.scidocs.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir.bm25-flat.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir.bm25-flat.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir.bm25-flat.scidocs.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-scidocs.multifield \
--topics beir-v1.0.0-scidocs-test \
--output run.beir.bm25-multifield.scidocs.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir.bm25-multifield.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir.bm25-multifield.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir.bm25-multifield.scidocs.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-scidocs.splade-pp-ed \
--topics beir-v1.0.0-scidocs.test.splade-pp-ed \
--output run.beir.splade-pp-ed.scidocs.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir.splade-pp-ed.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir.splade-pp-ed.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir.splade-pp-ed.scidocs.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-scidocs.contriever-msmarco \
--topics beir-v1.0.0-scidocs-test \
--output run.beir.contriever-msmarco.scidocs.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir.contriever-msmarco.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir.contriever-msmarco.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir.contriever-msmarco.scidocs.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-scidocs.bge-base-en-v1.5 \
--topics beir-v1.0.0-scidocs-test \
--output run.beir.bge-base-en-v1.5.faiss.scidocs.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir.bge-base-en-v1.5.faiss.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir.bge-base-en-v1.5.faiss.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir.bge-base-en-v1.5.faiss.scidocs.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-scidocs.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-scidocs-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-scidocs-test \
--output run.beir.cohere-embed-english-v3.0.scidocs.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
run.beir.cohere-embed-english-v3.0.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scidocs-test \
run.beir.cohere-embed-english-v3.0.scidocs.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scidocs-test \
run.beir.cohere-embed-english-v3.0.scidocs.txt
Copy
fever
0.651
0.918
0.753
0.931
0.788
0.946
0.758
0.949
0.863
0.972
0.890
0.965
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-fever.flat \
--topics beir-v1.0.0-fever-test \
--output run.beir.bm25-flat.fever.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir.bm25-flat.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir.bm25-flat.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir.bm25-flat.fever.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-fever.multifield \
--topics beir-v1.0.0-fever-test \
--output run.beir.bm25-multifield.fever.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir.bm25-multifield.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir.bm25-multifield.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir.bm25-multifield.fever.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-fever.splade-pp-ed \
--topics beir-v1.0.0-fever.test.splade-pp-ed \
--output run.beir.splade-pp-ed.fever.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir.splade-pp-ed.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir.splade-pp-ed.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir.splade-pp-ed.fever.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-fever.contriever-msmarco \
--topics beir-v1.0.0-fever-test \
--output run.beir.contriever-msmarco.fever.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir.contriever-msmarco.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir.contriever-msmarco.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir.contriever-msmarco.fever.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-fever.bge-base-en-v1.5 \
--topics beir-v1.0.0-fever-test \
--output run.beir.bge-base-en-v1.5.faiss.fever.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir.bge-base-en-v1.5.faiss.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir.bge-base-en-v1.5.faiss.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir.bge-base-en-v1.5.faiss.fever.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-fever.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-fever-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-fever-test \
--output run.beir.cohere-embed-english-v3.0.fever.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-fever-test \
run.beir.cohere-embed-english-v3.0.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-fever-test \
run.beir.cohere-embed-english-v3.0.fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-fever-test \
run.beir.cohere-embed-english-v3.0.fever.txt
Copy
climate-fever
0.165
0.425
0.213
0.436
0.230
0.521
0.237
0.575
0.312
0.636
0.259
0.581
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-climate-fever.flat \
--topics beir-v1.0.0-climate-fever-test \
--output run.beir.bm25-flat.climate-fever.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir.bm25-flat.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir.bm25-flat.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir.bm25-flat.climate-fever.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-climate-fever.multifield \
--topics beir-v1.0.0-climate-fever-test \
--output run.beir.bm25-multifield.climate-fever.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir.bm25-multifield.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir.bm25-multifield.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir.bm25-multifield.climate-fever.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-climate-fever.splade-pp-ed \
--topics beir-v1.0.0-climate-fever.test.splade-pp-ed \
--output run.beir.splade-pp-ed.climate-fever.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir.splade-pp-ed.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir.splade-pp-ed.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir.splade-pp-ed.climate-fever.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-climate-fever.contriever-msmarco \
--topics beir-v1.0.0-climate-fever-test \
--output run.beir.contriever-msmarco.climate-fever.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir.contriever-msmarco.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir.contriever-msmarco.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir.contriever-msmarco.climate-fever.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-climate-fever.bge-base-en-v1.5 \
--topics beir-v1.0.0-climate-fever-test \
--output run.beir.bge-base-en-v1.5.faiss.climate-fever.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir.bge-base-en-v1.5.faiss.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir.bge-base-en-v1.5.faiss.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir.bge-base-en-v1.5.faiss.climate-fever.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-climate-fever.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-climate-fever-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-climate-fever-test \
--output run.beir.cohere-embed-english-v3.0.climate-fever.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
run.beir.cohere-embed-english-v3.0.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-climate-fever-test \
run.beir.cohere-embed-english-v3.0.climate-fever.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-climate-fever-test \
run.beir.cohere-embed-english-v3.0.climate-fever.txt
Copy
scifact
0.679
0.925
0.665
0.908
0.704
0.935
0.677
0.947
0.741
0.967
0.718
0.963
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-scifact.flat \
--topics beir-v1.0.0-scifact-test \
--output run.beir.bm25-flat.scifact.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir.bm25-flat.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir.bm25-flat.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir.bm25-flat.scifact.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-scifact.multifield \
--topics beir-v1.0.0-scifact-test \
--output run.beir.bm25-multifield.scifact.txt \
--output-format trec \
--hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir.bm25-multifield.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir.bm25-multifield.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir.bm25-multifield.scifact.txt
Copy
Command to generate run:
python -m pyserini.search.lucene \
--threads 16 --batch-size 128 \
--index beir-v1.0.0-scifact.splade-pp-ed \
--topics beir-v1.0.0-scifact.test.splade-pp-ed \
--output run.beir.splade-pp-ed.scifact.txt \
--output-format trec \
--hits 1000 --impact --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir.splade-pp-ed.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir.splade-pp-ed.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir.splade-pp-ed.scifact.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class contriever --encoder facebook/contriever-msmarco \
--index beir-v1.0.0-scifact.contriever-msmarco \
--topics beir-v1.0.0-scifact-test \
--output run.beir.contriever-msmarco.scifact.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir.contriever-msmarco.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir.contriever-msmarco.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir.contriever-msmarco.scifact.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
--query-prefix "Represent this sentence for searching relevant passages:" \
--index beir-v1.0.0-scifact.bge-base-en-v1.5 \
--topics beir-v1.0.0-scifact-test \
--output run.beir.bge-base-en-v1.5.faiss.scifact.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir.bge-base-en-v1.5.faiss.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir.bge-base-en-v1.5.faiss.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir.bge-base-en-v1.5.faiss.scifact.txt
Copy
Command to generate run:
python -m pyserini.search.faiss \
--threads 16 --batch-size 512 \
--index beir-v1.0.0-scifact.cohere-embed-english-v3.0 \
--topics beir-v1.0.0-scifact-test \
--encoded-queries cohere-embed-english-v3.0-beir-v1.0.0-scifact-test \
--output run.beir.cohere-embed-english-v3.0.scifact.txt \
--hits 1000 --remove-query
Copy
Evaluation commands:
python -m pyserini.eval.trec_eval \
-c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
run.beir.cohere-embed-english-v3.0.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.100 beir-v1.0.0-scifact-test \
run.beir.cohere-embed-english-v3.0.scifact.txt
python -m pyserini.eval.trec_eval \
-c -m recall.1000 beir-v1.0.0-scifact-test \
run.beir.cohere-embed-english-v3.0.scifact.txt
Copy