donfu
commited on
Commit
·
007df59
1
Parent(s):
aae6ba6
Add hacky support for huge files
Browse files- process.py +154 -63
process.py
CHANGED
|
@@ -10,70 +10,80 @@ import sys
|
|
| 10 |
import re
|
| 11 |
from html2text import html2text
|
| 12 |
from datasets import load_dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
|
|
|
|
| 15 |
SOURCE = "stackexchange-{0}"
|
| 16 |
MAX_ANSWERS = 10
|
| 17 |
QUESTION_SCORE_TRESHOLD = 0
|
| 18 |
ANSWER_SCORE_TRESHOLD = 0
|
| 19 |
HF_DATASET = "donfu/oa-stackexchange"
|
| 20 |
-
|
| 21 |
-
xml_format_map = {
|
| 22 |
-
"Id": int,
|
| 23 |
-
"PostTypeId": int,
|
| 24 |
-
"CreationDate": str,
|
| 25 |
-
"Score": int,
|
| 26 |
-
"ViewCount": int,
|
| 27 |
-
"Body": str,
|
| 28 |
-
"AnswerCount": int,
|
| 29 |
-
"CommentCount": int,
|
| 30 |
-
"ContentLicense": str,
|
| 31 |
-
"AcceptedAnswerId": int,
|
| 32 |
-
"ParentId": int,
|
| 33 |
-
}
|
| 34 |
|
| 35 |
|
| 36 |
def main():
|
| 37 |
datasets = sys.argv[1:] if len(sys.argv) > 1 else list_cached_datasets()
|
|
|
|
|
|
|
| 38 |
for dataset in datasets:
|
| 39 |
process_dataset(dataset)
|
| 40 |
|
| 41 |
|
| 42 |
def list_cached_datasets():
|
| 43 |
-
xml_files = glob.glob(f"{
|
| 44 |
datasets = [os.path.splitext(os.path.basename(file))[0] for file in xml_files]
|
| 45 |
datasets.sort()
|
| 46 |
return datasets
|
| 47 |
|
| 48 |
|
| 49 |
def process_dataset(dataset):
|
| 50 |
-
xml_file = f"{
|
|
|
|
| 51 |
source = SOURCE.format(dataset)
|
| 52 |
-
if os.path.exists(xml_file):
|
| 53 |
-
df =
|
| 54 |
-
oa = convert_to_oa(df)
|
| 55 |
save_parquet(oa, dataset)
|
| 56 |
# upload_hf(dataset)
|
| 57 |
else:
|
| 58 |
print(f"XML file {xml_file} not found, please download first. Skipping...")
|
| 59 |
|
| 60 |
|
| 61 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
"""
|
| 63 |
Convert dataframe to Open Assistant format with INSTRUCTION, RESPONSE, SOURCE, METADATA columns
|
| 64 |
|
| 65 |
Only include questions with an AcceptedAnswerId
|
| 66 |
"""
|
| 67 |
-
|
| 68 |
-
"
|
| 69 |
-
.replace("-", " ")
|
| 70 |
.replace("><", ", ")
|
| 71 |
.replace("<", "")
|
| 72 |
.replace(">", "")
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
"
|
| 76 |
-
"
|
|
|
|
| 77 |
}
|
| 78 |
questions = all[all["AcceptedAnswerId"] != 0]
|
| 79 |
merged = pd.merge(
|
|
@@ -84,11 +94,13 @@ def convert_to_oa(all):
|
|
| 84 |
right_on="Id",
|
| 85 |
suffixes=("_q", "_a"),
|
| 86 |
)
|
|
|
|
|
|
|
| 87 |
merged["INSTRUCTION"] = (
|
| 88 |
merged["Title_q"] + "\n" + merged["Body_q"].apply(to_markdown)
|
| 89 |
)
|
| 90 |
merged["RESPONSE"] = merged["Body_a"].apply(to_markdown)
|
| 91 |
-
merged["SOURCE"] =
|
| 92 |
merged["METADATA"] = merged.apply(create_metadata, axis=1)
|
| 93 |
|
| 94 |
return merged[["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]]
|
|
@@ -99,63 +111,142 @@ def save_parquet(df, dataset):
|
|
| 99 |
Save Dataframe to Parquet. See here for specs:
|
| 100 |
https://projects.laion.ai/Open-Assistant/docs/data/datasets#creating-a-dataset-on-hugging-face
|
| 101 |
"""
|
| 102 |
-
parquet_file =
|
| 103 |
df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
|
| 104 |
-
print("Converted
|
| 105 |
|
| 106 |
|
| 107 |
def upload_hf(dataset):
|
| 108 |
"""
|
| 109 |
Upload to Hugging Face
|
| 110 |
"""
|
| 111 |
-
parquet_file =
|
| 112 |
dataset = load_dataset("parquet", data_files=parquet_file, name=dataset)
|
| 113 |
dataset.push_to_hub(HF_DATASET, max_shard_size="500MB")
|
| 114 |
print("Uploaded to Hugging Face: " + HF_DATASET)
|
| 115 |
|
| 116 |
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
"""
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
pd.read_xml() errors when XML trees are too large, this is just a hack to
|
| 122 |
-
download a XML file and parse into a Dataframe. **Not Tested on huge XML files**
|
| 123 |
-
|
| 124 |
-
Parameters:
|
| 125 |
-
response (Requests.Response): Requests response object with the XML data
|
| 126 |
-
|
| 127 |
-
Returns:
|
| 128 |
-
df (DataFrame): A Dataframe from the XML file
|
| 129 |
"""
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
|
| 145 |
|
| 146 |
remove_markdown_links_pattern = r"\[([^\]]+)\]\(([^\)]+)\)"
|
| 147 |
remove_remaining_links = r"https?:\/\/[^\s]+"
|
| 148 |
|
| 149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
# Replace HTML content to markdown but remove links
|
| 151 |
def to_markdown(text):
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
text = re.sub(remove_markdown_links_pattern, r"\1", text)
|
| 154 |
-
text =
|
| 155 |
-
|
| 156 |
-
if "http" in text:
|
| 157 |
-
raise "Found http in markdown: " + text
|
| 158 |
-
return text
|
| 159 |
|
| 160 |
|
| 161 |
if __name__ == "__main__":
|
|
|
|
| 10 |
import re
|
| 11 |
from html2text import html2text
|
| 12 |
from datasets import load_dataset
|
| 13 |
+
from lxml import etree
|
| 14 |
+
from tqdm import tqdm
|
| 15 |
+
import subprocess
|
| 16 |
+
from merge_parquets import merge_parquet_dir
|
| 17 |
|
| 18 |
+
|
| 19 |
+
XML_DIR = "./xml"
|
| 20 |
SOURCE = "stackexchange-{0}"
|
| 21 |
MAX_ANSWERS = 10
|
| 22 |
QUESTION_SCORE_TRESHOLD = 0
|
| 23 |
ANSWER_SCORE_TRESHOLD = 0
|
| 24 |
HF_DATASET = "donfu/oa-stackexchange"
|
| 25 |
+
PARQUET_FILE = "{0}.parquet"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
def main():
|
| 29 |
datasets = sys.argv[1:] if len(sys.argv) > 1 else list_cached_datasets()
|
| 30 |
+
if "temp" in datasets:
|
| 31 |
+
process_temp_datasets()
|
| 32 |
for dataset in datasets:
|
| 33 |
process_dataset(dataset)
|
| 34 |
|
| 35 |
|
| 36 |
def list_cached_datasets():
|
| 37 |
+
xml_files = glob.glob(f"{XML_DIR}/*.xml")
|
| 38 |
datasets = [os.path.splitext(os.path.basename(file))[0] for file in xml_files]
|
| 39 |
datasets.sort()
|
| 40 |
return datasets
|
| 41 |
|
| 42 |
|
| 43 |
def process_dataset(dataset):
|
| 44 |
+
xml_file = f"{XML_DIR}/{dataset}.xml"
|
| 45 |
+
parquet_file = PARQUET_FILE.format(dataset)
|
| 46 |
source = SOURCE.format(dataset)
|
| 47 |
+
if os.path.exists(xml_file) and not os.path.exists(parquet_file):
|
| 48 |
+
df = parse_xml(xml_file)
|
| 49 |
+
oa = convert_to_oa(df, source)
|
| 50 |
save_parquet(oa, dataset)
|
| 51 |
# upload_hf(dataset)
|
| 52 |
else:
|
| 53 |
print(f"XML file {xml_file} not found, please download first. Skipping...")
|
| 54 |
|
| 55 |
|
| 56 |
+
def process_temp_datasets():
|
| 57 |
+
parquet_files = glob.glob(f"temp/?.parquet")
|
| 58 |
+
for file in parquet_files:
|
| 59 |
+
print("Reading parquet file: ", file)
|
| 60 |
+
df = pd.read_parquet(file)
|
| 61 |
+
print("Converting to Open Assistant format...")
|
| 62 |
+
oa = convert_to_oa(df, SOURCE.format("stackoverflow"))
|
| 63 |
+
num = re.search(r"\d", file)[0]
|
| 64 |
+
parquet_file = f"so/stackoverflow-{num}.parquet"
|
| 65 |
+
df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
|
| 66 |
+
print("Wrote parquet file: ", parquet_file)
|
| 67 |
+
|
| 68 |
+
merge_parquet_dir("so", "stackoverflow.parquet")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def convert_to_oa(all, source):
|
| 72 |
"""
|
| 73 |
Convert dataframe to Open Assistant format with INSTRUCTION, RESPONSE, SOURCE, METADATA columns
|
| 74 |
|
| 75 |
Only include questions with an AcceptedAnswerId
|
| 76 |
"""
|
| 77 |
+
convert_tags = (
|
| 78 |
+
lambda raw: raw.replace("-", " ")
|
|
|
|
| 79 |
.replace("><", ", ")
|
| 80 |
.replace("<", "")
|
| 81 |
.replace(">", "")
|
| 82 |
+
)
|
| 83 |
+
create_metadata = lambda row: {
|
| 84 |
+
"tags": convert_tags(row["Tags_q"]),
|
| 85 |
+
"question_score": row["Score_q"],
|
| 86 |
+
"answer_score": row["Score_a"],
|
| 87 |
}
|
| 88 |
questions = all[all["AcceptedAnswerId"] != 0]
|
| 89 |
merged = pd.merge(
|
|
|
|
| 94 |
right_on="Id",
|
| 95 |
suffixes=("_q", "_a"),
|
| 96 |
)
|
| 97 |
+
del all
|
| 98 |
+
|
| 99 |
merged["INSTRUCTION"] = (
|
| 100 |
merged["Title_q"] + "\n" + merged["Body_q"].apply(to_markdown)
|
| 101 |
)
|
| 102 |
merged["RESPONSE"] = merged["Body_a"].apply(to_markdown)
|
| 103 |
+
merged["SOURCE"] = source
|
| 104 |
merged["METADATA"] = merged.apply(create_metadata, axis=1)
|
| 105 |
|
| 106 |
return merged[["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]]
|
|
|
|
| 111 |
Save Dataframe to Parquet. See here for specs:
|
| 112 |
https://projects.laion.ai/Open-Assistant/docs/data/datasets#creating-a-dataset-on-hugging-face
|
| 113 |
"""
|
| 114 |
+
parquet_file = PARQUET_FILE.format(dataset)
|
| 115 |
df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
|
| 116 |
+
print(f"Converted {len(df)} instructions into {parquet_file}")
|
| 117 |
|
| 118 |
|
| 119 |
def upload_hf(dataset):
|
| 120 |
"""
|
| 121 |
Upload to Hugging Face
|
| 122 |
"""
|
| 123 |
+
parquet_file = PARQUET_FILE.format(dataset)
|
| 124 |
dataset = load_dataset("parquet", data_files=parquet_file, name=dataset)
|
| 125 |
dataset.push_to_hub(HF_DATASET, max_shard_size="500MB")
|
| 126 |
print("Uploaded to Hugging Face: " + HF_DATASET)
|
| 127 |
|
| 128 |
|
| 129 |
+
# Define a custom SAX ContentHandler to extract data from the XML file
|
| 130 |
+
class StackExchangeHandler:
|
| 131 |
+
def __init__(self, total_rows):
|
| 132 |
+
self.total_rows = total_rows
|
| 133 |
+
self.progress_bar = tqdm(total=self.total_rows)
|
| 134 |
+
self.df = pd.DataFrame(
|
| 135 |
+
columns=[
|
| 136 |
+
"Id",
|
| 137 |
+
"PostTypeId",
|
| 138 |
+
"Body",
|
| 139 |
+
"Title",
|
| 140 |
+
"Tags",
|
| 141 |
+
"Score",
|
| 142 |
+
"AcceptedAnswerId",
|
| 143 |
+
"ParentId",
|
| 144 |
+
]
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
def startElement(self, name, attrs):
|
| 148 |
+
if name == "row":
|
| 149 |
+
row = {}
|
| 150 |
+
row["Id"] = int(attrs.getValue("Id"))
|
| 151 |
+
row["PostTypeId"] = int(attrs.getValue("PostTypeId"))
|
| 152 |
+
row["Body"] = str(attrs.getValue("Body"))
|
| 153 |
+
row["Title"] = str(attrs.get("Title", ""))
|
| 154 |
+
row["Tags"] = str(attrs.get("Tags", ""))
|
| 155 |
+
row["Score"] = int(attrs.get("Score", 0))
|
| 156 |
+
row["ParentId"] = int(attrs.get("ParentId", 0))
|
| 157 |
+
row["AcceptedAnswerId"] = int(attrs.get("AcceptedAnswerId", 0))
|
| 158 |
+
|
| 159 |
+
self.df = pd.concat(
|
| 160 |
+
[self.df, pd.DataFrame([row], columns=self.df.columns)],
|
| 161 |
+
ignore_index=True,
|
| 162 |
+
)
|
| 163 |
+
self.progress_bar.update(1)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def parse_xml(path: str):
|
| 167 |
"""
|
| 168 |
+
Parse (very large) XML files with sax parser and load it into a pandas Dataframe
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
"""
|
| 170 |
+
total_rows = int(subprocess.getoutput(f"grep -c '<row' {path}"))
|
| 171 |
+
print(f"Parsing {total_rows} rows from {path}...")
|
| 172 |
+
columns = "Id PostTypeId Body Title Tags Score AcceptedAnswerId ParentId"
|
| 173 |
+
rows = []
|
| 174 |
+
if total_rows > 50000000:
|
| 175 |
+
huge_file = True
|
| 176 |
+
temp_file = 1
|
| 177 |
+
os.makedirs("temp", exist_ok=True)
|
| 178 |
+
|
| 179 |
+
context = etree.iterparse(path, events=("start", "end"))
|
| 180 |
+
|
| 181 |
+
for event, element in tqdm(
|
| 182 |
+
context, total=total_rows * 2
|
| 183 |
+
): # somehow it does not work just with start event, hence *2
|
| 184 |
+
if event == "start" and element.tag == "row":
|
| 185 |
+
row = [
|
| 186 |
+
int(element.get("Id")),
|
| 187 |
+
int(element.get("PostTypeId")),
|
| 188 |
+
element.get("Body"),
|
| 189 |
+
element.get("Title", ""),
|
| 190 |
+
element.get("Tags", ""),
|
| 191 |
+
int(element.get("Score", 0)),
|
| 192 |
+
int(element.get("AcceptedAnswerId", 0)),
|
| 193 |
+
int(element.get("ParentId", 0)),
|
| 194 |
+
]
|
| 195 |
+
rows.append(row)
|
| 196 |
+
if huge_file and len(rows) >= 10000000:
|
| 197 |
+
df = pd.DataFrame(rows, columns=columns.split())
|
| 198 |
+
df.to_parquet(
|
| 199 |
+
f"temp/{temp_file}.parquet", engine="pyarrow", index=False
|
| 200 |
+
)
|
| 201 |
+
print(f"Wrote temp/{temp_file}.parquet file")
|
| 202 |
+
rows = []
|
| 203 |
+
temp_file += 1
|
| 204 |
+
del df
|
| 205 |
+
element.clear()
|
| 206 |
+
element.getparent().remove(element)
|
| 207 |
+
|
| 208 |
+
df = pd.DataFrame(rows, columns=columns.split())
|
| 209 |
+
if huge_file:
|
| 210 |
+
df.to_parquet(f"temp/{temp_file}.parquet", engine="pyarrow", index=False)
|
| 211 |
+
del rows
|
| 212 |
+
del df
|
| 213 |
+
print("Merging all temp files...")
|
| 214 |
+
merge_parquet_dir("temp", "temp/merged.parquet")
|
| 215 |
+
df = pd.read_parquet("temp/merged.parquet")
|
| 216 |
+
print(f"Loaded full dataset with {len(df)} rows")
|
| 217 |
+
|
| 218 |
+
return df
|
| 219 |
|
| 220 |
|
| 221 |
remove_markdown_links_pattern = r"\[([^\]]+)\]\(([^\)]+)\)"
|
| 222 |
remove_remaining_links = r"https?:\/\/[^\s]+"
|
| 223 |
|
| 224 |
|
| 225 |
+
def remove_emojis(string):
|
| 226 |
+
emoji_pattern = re.compile(
|
| 227 |
+
"["
|
| 228 |
+
"\U0001F600-\U0001F64F" # emoticons
|
| 229 |
+
"\U0001F300-\U0001F5FF" # symbols & pictographs
|
| 230 |
+
"\U0001F680-\U0001F6FF" # transport & map symbols
|
| 231 |
+
"\U0001F1E0-\U0001F1FF" # flags (iOS)
|
| 232 |
+
"\U00002702-\U000027B0"
|
| 233 |
+
"\U000024C2-\U0001F251"
|
| 234 |
+
"]+",
|
| 235 |
+
flags=re.UNICODE,
|
| 236 |
+
)
|
| 237 |
+
return emoji_pattern.sub(r"", string)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
# Replace HTML content to markdown but remove links
|
| 241 |
def to_markdown(text):
|
| 242 |
+
try:
|
| 243 |
+
text = html2text(text, bodywidth=0).strip()
|
| 244 |
+
except Exception as e:
|
| 245 |
+
print(e)
|
| 246 |
+
text = re.sub(r"<[^>]*>", "", str(text))
|
| 247 |
text = re.sub(remove_markdown_links_pattern, r"\1", text)
|
| 248 |
+
text = remove_emojis(text)
|
| 249 |
+
return re.sub(remove_remaining_links, "", text)
|
|
|
|
|
|
|
|
|
|
| 250 |
|
| 251 |
|
| 252 |
if __name__ == "__main__":
|