-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathretrieval_colbert_preprocess.py
71 lines (56 loc) · 2.31 KB
/
retrieval_colbert_preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import logging
from dataclasses import dataclass, field
from logging.config import fileConfig
from pathlib import Path
from typing import Literal
import simple_parsing
from tqdm.auto import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from peerqa.data_loader import PaperLoader, QuestionLoader
from peerqa.utils import url_save_hash, url_save_str
fileConfig("logging.ini")
logger = logging.getLogger(__name__)
@dataclass
class Args:
output_dir: Path = field(default=Path("out"))
qa_file: Path = field(default=Path("data/qa.jsonl"))
papers_file: Path = field(default=Path("data/papers.jsonl"))
granularity: Literal["sentences", "paragraphs"] = "sentences"
template: str = None
def main(args):
paper_loader = PaperLoader(args.papers_file)
qa_loader = QuestionLoader(args.qa_file)
subdir = f"colbert-{args.granularity}"
if args.template is not None:
template_hash = url_save_hash(args.template)
logger.info(f"Adding template hash {template_hash} to subdir.")
subdir += f"-{template_hash}"
for paper_id, document_ids, documents in paper_loader(
granularity=args.granularity, template=args.template
):
question_ids, questions = qa_loader.questions_by_paper_id_with_answer_evidence(
paper_id=paper_id
)
if len(questions) == 0:
continue
# Queries: each line is qid \t query text.
queries_path = (
args.output_dir / subdir / f"{url_save_str(paper_id)}" / "queries.tsv"
)
queries_path.parent.mkdir(parents=True, exist_ok=True)
with open(queries_path, "w") as fh:
for question_id, question in zip(question_ids, questions):
fh.write(f"{question_id}\t{question}\n")
# Collection: each line is pid \t passage text.
collection_path = (
args.output_dir / subdir / f"{url_save_str(paper_id)}" / "collection.tsv"
)
collection_path.parent.mkdir(parents=True, exist_ok=True)
with open(collection_path, "w") as fh:
for document_id, document in zip(document_ids, documents):
fh.write(f"{document_id}\t{document}\n")
if __name__ == "__main__":
args, _ = simple_parsing.parse_known_args(Args)
with logging_redirect_tqdm():
logger.info(args)
main(args)