| |
| import argparse |
| import gc |
| import json |
| import os |
|
|
| import datasets |
| import pandas as pd |
| import torch |
| from tqdm import tqdm |
| from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
|
| TOTAL_NUM_FILES_C4_TRAIN = 1024 |
|
|
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser() |
|
|
| parser.add_argument( |
| "--start", |
| type=int, |
| required=True, |
| help="Starting file number to download. Valid values: 0 - 1023", |
| ) |
| parser.add_argument( |
| "--end", |
| type=int, |
| required=True, |
| help="Ending file number to download. Valid values: 0 - 1023", |
| ) |
| parser.add_argument("--batch_size", type=int, default=8, help="Batch size") |
| parser.add_argument( |
| "--model_name", |
| type=str, |
| default="taskydata/deberta-v3-base_10xp3nirstbbflanseuni_10xc4", |
| help="Model name", |
| ) |
| parser.add_argument( |
| "--local_cache_location", |
| type=str, |
| default="c4_download", |
| help="local cache location from where the dataset will be loaded", |
| ) |
| parser.add_argument( |
| "--use_local_cache_location", |
| type=bool, |
| default=True, |
| help="Set True if you want to load the dataset from local cache.", |
| ) |
| parser.add_argument( |
| "--clear_dataset_cache", |
| type=bool, |
| default=False, |
| help="Set True if you want to delete the dataset files from the cache after inference.", |
| ) |
| parser.add_argument( |
| "--release_memory", |
| type=bool, |
| default=True, |
| help="Set True if you want to release the memory of used variables.", |
| ) |
|
|
| args = parser.parse_args() |
| return args |
|
|
|
|
| def chunks(l, n): |
| for i in range(0, len(l), n): |
| yield l[i : i + n] |
|
|
|
|
| def batch_tokenize(data, batch_size): |
| batches = list(chunks(data, batch_size)) |
| tokenized_batches = [] |
| for batch in batches: |
| |
| tensor = tokenizer( |
| batch, |
| return_tensors="pt", |
| padding="max_length", |
| truncation=True, |
| max_length=512, |
| ) |
| tokenized_batches.append(tensor) |
| return tokenized_batches, batches |
|
|
|
|
| def batch_inference(data, batch_size=32): |
| preds = [] |
| tokenized_batches, batches = batch_tokenize(data, batch_size) |
| for i in tqdm(range(len(batches))): |
| with torch.no_grad(): |
| logits = model(**tokenized_batches[i].to(device)).logits.cpu() |
| preds.extend(logits) |
| return preds |
|
|
|
|
| if __name__ == "__main__": |
| args = parse_args() |
|
|
| tasky_commits_path = f"tasky_commits_java_{args.start}_{args.end}.jsonl" |
| if os.path.exists(f"java_add/{tasky_commits_path}"): |
| print("Exists:", tasky_commits_path) |
| exit() |
|
|
| path = "java_add_messages.jsonl" |
| ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"] |
| if args.start > len(ds): exit() |
| ds = ds[range(args.start, min(args.end, len(ds)))] |
| df = pd.DataFrame(ds, index=None) |
|
|
| tokenizer = AutoTokenizer.from_pretrained(args.model_name) |
| model = AutoModelForSequenceClassification.from_pretrained(args.model_name) |
| device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
| model.to(device) |
| model.eval() |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| texts = df["message"].to_list() |
| commits = df["commit"].to_list() |
| preds = batch_inference(texts, batch_size=args.batch_size) |
|
|
| assert len(preds) == len(texts) |
|
|
| |
| |
| |
| tasky_commits_path = f"java_add/tasky_commits_java_{args.start}_{args.end}.jsonl" |
|
|
| with open(tasky_commits_path, "w") as f: |
| for i in range(len(preds)): |
| predicted_class_id = preds[i].argmax().item() |
| pred = model.config.id2label[predicted_class_id] |
| tasky_proba = torch.softmax(preds[i], dim=-1)[-1].item() |
|
|
| f.write( |
| json.dumps( |
| { |
| "commit": commits[i], |
| "message": texts[i], |
| "proba": tasky_proba, |
| } |
| ) |
| + "\n" |
| ) |
|
|
|
|