From 098fdfef0df789c86e52936d7684118dd33944f1 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Tue, 25 Feb 2025 18:30:44 -0600 Subject: [PATCH 01/50] #42 add some splits for HuggingFaceTB/finemath dataset Big splits for - finemath 3 plus is about 21.4M rows - finemath 4 plus is about 6.7M rows - infiwebmath 3 plus is about 13.9M rows - infiwebmath 4 plus is about 6.3M rows which result in: * train_finemath_3plus and val_finemath_3plus * train_finemath_4plus and val_finemath_4plus * train_infiwebmath_3plus and val_infiwebmath_3plus * train_infiwebmath_4plus and val_infiwebmath_4plus And also some normal "tiny" splits * train 3M rows * train_small 100k rows * train_xsmall 30k rows * train_xxsmall 10k rows and * val 300k rows * val_small 10k rows * val_xsmall 3k rows * val_xxsmall 100 rows --- .../data_prep/convert_dataset_hf.py | 124 +++++++++++++++++- 1 file changed, 123 insertions(+), 1 deletion(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index 69b5e68..474b36d 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -158,7 +158,129 @@ def __init__( truncated_samples=100, ) -CONSTS = {'allenai/c4': c4constants, 'the_pile': pileconstants} + +finemathconstants = DatasetConstants( + chars_per_sample=(6212+2163)//2, # TODO: Compute over validation set + chars_per_token=4, # TODO: using same OpenAI estimate +) + +# finemath 3 plus is about 21.4M rows +finemathconstants.splits["train_finemath_3plus"] = DataSplitConstants( + hf_split='train', + folder_split='train_finemath_3plus"]', + raw_samples=19_000_000, + truncated_samples=None, +) + +finemathconstants.splits["val_finemath_3plus"] = DataSplitConstants( + hf_split='train', + folder_split='val_finemath_3plus', + raw_samples=1_900_000, + truncated_samples=None, +) + +# finemath 4 plus is about 6.7M rows +finemathconstants.splits["train_finemath_4plus"] = DataSplitConstants( + hf_split='train', + folder_split='train_finemath_4plus', + raw_samples=6_000_000, + truncated_samples=None, +) + +finemathconstants.splits["val_finemath_4plus"] = DataSplitConstants( + hf_split='train', + folder_split='val_finemath_4plus', + raw_samples=600_000, + truncated_samples=None, +) + +# infiwebmath 3 plus is about 13.9M rows +finemathconstants.splits["train_infiwebmath_3plus"] = DataSplitConstants( + hf_split='train', + folder_split='train_infiwebmath_3plus"]', + raw_samples=12_000_000, + truncated_samples=None, +) + +finemathconstants.splits["val_infiwebmath_3plus"] = DataSplitConstants( + hf_split='train', + folder_split='val_infiwebmath_3plus', + raw_samples=1_200_000, + truncated_samples=None, +) + +# infiwebmath 4 plus is about 6.3M rows +finemathconstants.splits["train_infiwebmath_4plus"] = DataSplitConstants( + hf_split='train', + folder_split='train_infiwebmath_4plus"]', + raw_samples=5_000_000, + truncated_samples=None, +) + +finemathconstants.splits["val_infiwebmath_4plus"] = DataSplitConstants( + hf_split='train', + folder_split='val_infiwebmath_4plus', + raw_samples=500_000, + truncated_samples=None, +) + + +# general splits which max is 3M samples (less than above specific full splits) +finemathconstants.splits["train"] = DataSplitConstants( + hf_split='train', + folder_split='train', + raw_samples=3_000_000, + truncated_samples=None, +) + +finemathconstants.splits['train_small'] = DataSplitConstants( + hf_split='train', + folder_split='train_small', + raw_samples=100_000, + truncated_samples=100_000, +) + +finemathconstants.splits['train_xsmall'] = DataSplitConstants( + hf_split='train', + folder_split='train_xsmall', + raw_samples=30_000, + truncated_samples=30_000, +) + +finemathconstants.splits['train_xxsmall'] = DataSplitConstants( + hf_split='train', + folder_split='train_xxsmall', + raw_samples=1000, + truncated_samples=1000, +) + +finemathconstants.splits['val'] = DataSplitConstants( + hf_split='train', + folder_split='val', + raw_samples=300_000, + truncated_samples=None, +) +finemathconstants.splits['val_small'] = DataSplitConstants( + hf_split='train', + folder_split='val_small', + raw_samples=10_000, + truncated_samples=10_000, +) +finemathconstants.splits['val_xsmall'] = DataSplitConstants( + hf_split='train', + folder_split='val_xsmall', + raw_samples=3000, + truncated_samples=3000, +) +finemathconstants.splits['val_xxsmall'] = DataSplitConstants( + hf_split='train', + folder_split='val_xxsmall', + raw_samples=100, + truncated_samples=100, +) + +CONSTS = {'allenai/c4': c4constants, 'the_pile': pileconstants, "HuggingFaceTB/finemath": finemathconstants} + def build_hf_dataset( From d7afb9363906b3955db4e9eb33d23c3cdfde34f2 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 1 Mar 2025 02:25:53 -0600 Subject: [PATCH 02/50] #42 generate train and validation from single train split As finemath only contains the train split, we need to generate the training and validation splits from the IterableDataset and the only tools we have are: shuffle, skip, take. Basically we follow this structure to create corresponding dataset: * pairs of val and training (9 times the validation amount) * if validation skip 9 times the amount and take the amount * if training take the amount --- .../data_prep/convert_dataset_hf.py | 94 +++++++++++-------- 1 file changed, 54 insertions(+), 40 deletions(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index 474b36d..de994b1 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -168,110 +168,113 @@ def __init__( finemathconstants.splits["train_finemath_3plus"] = DataSplitConstants( hf_split='train', folder_split='train_finemath_3plus"]', - raw_samples=19_000_000, - truncated_samples=None, + raw_samples=int(21_300_000 * .1) * 9, + truncated_samples=int(21_300_000 * .1) * 9, ) finemathconstants.splits["val_finemath_3plus"] = DataSplitConstants( hf_split='train', folder_split='val_finemath_3plus', - raw_samples=1_900_000, - truncated_samples=None, + raw_samples=int(21_300_000 * .1), + truncated_samples=int(21_300_000 * .1) * 9, ) # finemath 4 plus is about 6.7M rows finemathconstants.splits["train_finemath_4plus"] = DataSplitConstants( hf_split='train', folder_split='train_finemath_4plus', - raw_samples=6_000_000, - truncated_samples=None, + raw_samples=int(6_600_000 * .1) * 9, + truncated_samples=int(6_600_000 * .1) * 9, ) finemathconstants.splits["val_finemath_4plus"] = DataSplitConstants( hf_split='train', folder_split='val_finemath_4plus', - raw_samples=600_000, - truncated_samples=None, + raw_samples=int(6_600_000 * .1), + truncated_samples=int(6_600_000 * .1), ) # infiwebmath 3 plus is about 13.9M rows finemathconstants.splits["train_infiwebmath_3plus"] = DataSplitConstants( hf_split='train', folder_split='train_infiwebmath_3plus"]', - raw_samples=12_000_000, - truncated_samples=None, + raw_samples=int(13_800_000 * .1) * 9, + truncated_samples=int(13_800_000 * .1) * 9, ) finemathconstants.splits["val_infiwebmath_3plus"] = DataSplitConstants( hf_split='train', folder_split='val_infiwebmath_3plus', - raw_samples=1_200_000, - truncated_samples=None, + raw_samples=int(13_800_000 * .1), + truncated_samples=int(13_800_000 * .1), ) # infiwebmath 4 plus is about 6.3M rows finemathconstants.splits["train_infiwebmath_4plus"] = DataSplitConstants( hf_split='train', folder_split='train_infiwebmath_4plus"]', - raw_samples=5_000_000, - truncated_samples=None, + raw_samples=int(6_200_000 * .1) * 9, + truncated_samples=int(6_200_000 * .1) * 9, ) finemathconstants.splits["val_infiwebmath_4plus"] = DataSplitConstants( hf_split='train', folder_split='val_infiwebmath_4plus', - raw_samples=500_000, - truncated_samples=None, + raw_samples=int(6_200_000 * .1), + truncated_samples=int(6_200_000 * .1), ) -# general splits which max is 3M samples (less than above specific full splits) +# tiny pairs of splits finemathconstants.splits["train"] = DataSplitConstants( hf_split='train', folder_split='train', - raw_samples=3_000_000, - truncated_samples=None, + raw_samples=100_000 * 9, + truncated_samples=100_000 * 9, ) -finemathconstants.splits['train_small'] = DataSplitConstants( +finemathconstants.splits['val'] = DataSplitConstants( hf_split='train', - folder_split='train_small', + folder_split='val', raw_samples=100_000, truncated_samples=100_000, ) -finemathconstants.splits['train_xsmall'] = DataSplitConstants( - hf_split='train', - folder_split='train_xsmall', - raw_samples=30_000, - truncated_samples=30_000, -) - -finemathconstants.splits['train_xxsmall'] = DataSplitConstants( +finemathconstants.splits['train_small'] = DataSplitConstants( hf_split='train', - folder_split='train_xxsmall', - raw_samples=1000, - truncated_samples=1000, + folder_split='train_small', + raw_samples=10_000 * 9, + truncated_samples=10_000 * 9, ) -finemathconstants.splits['val'] = DataSplitConstants( - hf_split='train', - folder_split='val', - raw_samples=300_000, - truncated_samples=None, -) finemathconstants.splits['val_small'] = DataSplitConstants( hf_split='train', folder_split='val_small', raw_samples=10_000, truncated_samples=10_000, ) + +finemathconstants.splits['train_xsmall'] = DataSplitConstants( + hf_split='train', + folder_split='train_xsmall', + raw_samples=3_000 * 9, + truncated_samples=3_000 * 9, +) + finemathconstants.splits['val_xsmall'] = DataSplitConstants( hf_split='train', folder_split='val_xsmall', - raw_samples=3000, - truncated_samples=3000, + raw_samples=3_000, + truncated_samples=3_000, +) + +finemathconstants.splits['train_xxsmall'] = DataSplitConstants( + hf_split='train', + folder_split='train_xxsmall', + raw_samples=100 * 9, + truncated_samples=100 * 9, ) + finemathconstants.splits['val_xxsmall'] = DataSplitConstants( hf_split='train', folder_split='val_xxsmall', @@ -293,6 +296,7 @@ def build_hf_dataset( no_wrap: bool = False, tokenizer: PreTrainedTokenizerBase = None, data_subset: Union[str, None] = None, + current_split = None, ) -> IterableDataset: """Build an IterableDataset over the HF C4 or pile source data. @@ -318,6 +322,15 @@ def build_hf_dataset( streaming=True, trust_remote_code=True, ) + # Before converting to No/ConcatTokensDataset shuffle, skip and take + if dataset_name in ["HuggingFaceTB/finemath"]: + hf_dataset = hf_dataset.shuffle(seed=42) + if current_split.folder_split.startswith("val"): + # if validation skip 90% of training set + hf_dataset = hf_dataset.skip(current_split.raw_samples * 9) + hf_dataset = hf_dataset.take(int(current_split.raw_samples)) + else: + hf_dataset = hf_dataset.take(current_split.raw_samples) if mode == ConcatMode.NO_CONCAT: dataset = NoConcatDataset(hf_dataset) else: @@ -496,6 +509,7 @@ def convert_dataset_hf( eos_text=eos_text, no_wrap=no_wrap, tokenizer=built_tokenizer, + current_split=split, ) loader = build_dataloader( dataset=hf_dataset, From d416b7b4fa74af4b79e07c51c4462e333f4e9e92 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 1 Mar 2025 02:54:08 -0600 Subject: [PATCH 03/50] donf't shuffle Take the dataset as is. Annotate number of rows of train/val pair --- .../command_utils/data_prep/convert_dataset_hf.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index de994b1..03e27f5 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -176,7 +176,7 @@ def __init__( hf_split='train', folder_split='val_finemath_3plus', raw_samples=int(21_300_000 * .1), - truncated_samples=int(21_300_000 * .1) * 9, + truncated_samples=int(21_300_000 * .1), ) # finemath 4 plus is about 6.7M rows @@ -225,14 +225,13 @@ def __init__( ) -# tiny pairs of splits +# train/val 1,000,000 rows finemathconstants.splits["train"] = DataSplitConstants( hf_split='train', folder_split='train', raw_samples=100_000 * 9, truncated_samples=100_000 * 9, ) - finemathconstants.splits['val'] = DataSplitConstants( hf_split='train', folder_split='val', @@ -240,13 +239,13 @@ def __init__( truncated_samples=100_000, ) +# train_small/val_small 100,000 rows finemathconstants.splits['train_small'] = DataSplitConstants( hf_split='train', folder_split='train_small', raw_samples=10_000 * 9, truncated_samples=10_000 * 9, ) - finemathconstants.splits['val_small'] = DataSplitConstants( hf_split='train', folder_split='val_small', @@ -254,13 +253,13 @@ def __init__( truncated_samples=10_000, ) +# train_xsmall/val_xsmall 30,000 rows finemathconstants.splits['train_xsmall'] = DataSplitConstants( hf_split='train', folder_split='train_xsmall', raw_samples=3_000 * 9, truncated_samples=3_000 * 9, ) - finemathconstants.splits['val_xsmall'] = DataSplitConstants( hf_split='train', folder_split='val_xsmall', @@ -268,13 +267,13 @@ def __init__( truncated_samples=3_000, ) +# train_xxsmall/val_xxsmall 1,000 rows finemathconstants.splits['train_xxsmall'] = DataSplitConstants( hf_split='train', folder_split='train_xxsmall', raw_samples=100 * 9, truncated_samples=100 * 9, ) - finemathconstants.splits['val_xxsmall'] = DataSplitConstants( hf_split='train', folder_split='val_xxsmall', @@ -324,7 +323,6 @@ def build_hf_dataset( ) # Before converting to No/ConcatTokensDataset shuffle, skip and take if dataset_name in ["HuggingFaceTB/finemath"]: - hf_dataset = hf_dataset.shuffle(seed=42) if current_split.folder_split.startswith("val"): # if validation skip 90% of training set hf_dataset = hf_dataset.skip(current_split.raw_samples * 9) From aaa61de6241a53177e319c9a525346f51fc5e4d4 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Tue, 4 Mar 2025 23:50:20 -0600 Subject: [PATCH 04/50] Create test and validation for finemath-4plus and generate sets of 1M, 100k, 10k, 1k --- scripts/data_prep/convert_finemath.py | 66 +++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 scripts/data_prep/convert_finemath.py diff --git a/scripts/data_prep/convert_finemath.py b/scripts/data_prep/convert_finemath.py new file mode 100644 index 0000000..d69b309 --- /dev/null +++ b/scripts/data_prep/convert_finemath.py @@ -0,0 +1,66 @@ +from datasets import load_dataset, load_from_disk, DatasetDict +from huggingface_hub import HfApi #, login + +# login() +hf_dataset = "tyoc213/test2" + +dataset = load_dataset( + path="HuggingFaceTB/finemath", + name="finemath-4plus", + split="train", + # streaming=True, + ) + +# Split with a fixed seed +split_dataset = dataset.train_test_split(test_size=0.1, seed=42) + +combined = DatasetDict({ + "train": split_dataset["train"], + "valid": split_dataset["test"] +}) + +# push full dataset as is +combined.push_to_hub( + hf_dataset, + config_name="full", + private=True, + max_shard_size="300MB") + +# save to parquet locally +for split, dataset in combined.items(): + dataset.to_parquet(f"dataset-{split}.parquet") + +# load from local +data_files = { + "train": "dataset-train.parquet", + "valid": "dataset-valid.parquet" +} +raw_datasets = load_dataset("parquet", data_dir=".", data_files=data_files) + + + +def create_size_ablation(dataset, total_rows): + """Create a subset with a given percentage of the original data""" + train_size = int(total_rows * .9) + return { + "train": dataset["train"].shuffle(42).select(range(train_size)), + "valid": dataset["valid"].shuffle(42).select(range(total_rows - train_size)), + } + + +a = [1_000_000, 100_000, 10_000, 1000] +l = ["1M", "100k", "10k", "1k"] + +for amount, label in zip(a, l): + ds =create_size_ablation(raw_datasets, amount) + + dsdict = DatasetDict({ + "train": ds["train"], + "valid": ds["valid"], + }) + + dsdict.push_to_hub( + hf_dataset, + config_name=label, + private=False + ) From ff706b38937a542b978738b480b99c5fc123ae2b Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Tue, 4 Mar 2025 23:54:13 -0600 Subject: [PATCH 05/50] revert changes to convert_dataset_hf.py to generate finemath splits --- .../data_prep/convert_dataset_hf.py | 136 +----------------- 1 file changed, 1 insertion(+), 135 deletions(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index 03e27f5..69b5e68 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -158,131 +158,7 @@ def __init__( truncated_samples=100, ) - -finemathconstants = DatasetConstants( - chars_per_sample=(6212+2163)//2, # TODO: Compute over validation set - chars_per_token=4, # TODO: using same OpenAI estimate -) - -# finemath 3 plus is about 21.4M rows -finemathconstants.splits["train_finemath_3plus"] = DataSplitConstants( - hf_split='train', - folder_split='train_finemath_3plus"]', - raw_samples=int(21_300_000 * .1) * 9, - truncated_samples=int(21_300_000 * .1) * 9, -) - -finemathconstants.splits["val_finemath_3plus"] = DataSplitConstants( - hf_split='train', - folder_split='val_finemath_3plus', - raw_samples=int(21_300_000 * .1), - truncated_samples=int(21_300_000 * .1), -) - -# finemath 4 plus is about 6.7M rows -finemathconstants.splits["train_finemath_4plus"] = DataSplitConstants( - hf_split='train', - folder_split='train_finemath_4plus', - raw_samples=int(6_600_000 * .1) * 9, - truncated_samples=int(6_600_000 * .1) * 9, -) - -finemathconstants.splits["val_finemath_4plus"] = DataSplitConstants( - hf_split='train', - folder_split='val_finemath_4plus', - raw_samples=int(6_600_000 * .1), - truncated_samples=int(6_600_000 * .1), -) - -# infiwebmath 3 plus is about 13.9M rows -finemathconstants.splits["train_infiwebmath_3plus"] = DataSplitConstants( - hf_split='train', - folder_split='train_infiwebmath_3plus"]', - raw_samples=int(13_800_000 * .1) * 9, - truncated_samples=int(13_800_000 * .1) * 9, -) - -finemathconstants.splits["val_infiwebmath_3plus"] = DataSplitConstants( - hf_split='train', - folder_split='val_infiwebmath_3plus', - raw_samples=int(13_800_000 * .1), - truncated_samples=int(13_800_000 * .1), -) - -# infiwebmath 4 plus is about 6.3M rows -finemathconstants.splits["train_infiwebmath_4plus"] = DataSplitConstants( - hf_split='train', - folder_split='train_infiwebmath_4plus"]', - raw_samples=int(6_200_000 * .1) * 9, - truncated_samples=int(6_200_000 * .1) * 9, -) - -finemathconstants.splits["val_infiwebmath_4plus"] = DataSplitConstants( - hf_split='train', - folder_split='val_infiwebmath_4plus', - raw_samples=int(6_200_000 * .1), - truncated_samples=int(6_200_000 * .1), -) - - -# train/val 1,000,000 rows -finemathconstants.splits["train"] = DataSplitConstants( - hf_split='train', - folder_split='train', - raw_samples=100_000 * 9, - truncated_samples=100_000 * 9, -) -finemathconstants.splits['val'] = DataSplitConstants( - hf_split='train', - folder_split='val', - raw_samples=100_000, - truncated_samples=100_000, -) - -# train_small/val_small 100,000 rows -finemathconstants.splits['train_small'] = DataSplitConstants( - hf_split='train', - folder_split='train_small', - raw_samples=10_000 * 9, - truncated_samples=10_000 * 9, -) -finemathconstants.splits['val_small'] = DataSplitConstants( - hf_split='train', - folder_split='val_small', - raw_samples=10_000, - truncated_samples=10_000, -) - -# train_xsmall/val_xsmall 30,000 rows -finemathconstants.splits['train_xsmall'] = DataSplitConstants( - hf_split='train', - folder_split='train_xsmall', - raw_samples=3_000 * 9, - truncated_samples=3_000 * 9, -) -finemathconstants.splits['val_xsmall'] = DataSplitConstants( - hf_split='train', - folder_split='val_xsmall', - raw_samples=3_000, - truncated_samples=3_000, -) - -# train_xxsmall/val_xxsmall 1,000 rows -finemathconstants.splits['train_xxsmall'] = DataSplitConstants( - hf_split='train', - folder_split='train_xxsmall', - raw_samples=100 * 9, - truncated_samples=100 * 9, -) -finemathconstants.splits['val_xxsmall'] = DataSplitConstants( - hf_split='train', - folder_split='val_xxsmall', - raw_samples=100, - truncated_samples=100, -) - -CONSTS = {'allenai/c4': c4constants, 'the_pile': pileconstants, "HuggingFaceTB/finemath": finemathconstants} - +CONSTS = {'allenai/c4': c4constants, 'the_pile': pileconstants} def build_hf_dataset( @@ -295,7 +171,6 @@ def build_hf_dataset( no_wrap: bool = False, tokenizer: PreTrainedTokenizerBase = None, data_subset: Union[str, None] = None, - current_split = None, ) -> IterableDataset: """Build an IterableDataset over the HF C4 or pile source data. @@ -321,14 +196,6 @@ def build_hf_dataset( streaming=True, trust_remote_code=True, ) - # Before converting to No/ConcatTokensDataset shuffle, skip and take - if dataset_name in ["HuggingFaceTB/finemath"]: - if current_split.folder_split.startswith("val"): - # if validation skip 90% of training set - hf_dataset = hf_dataset.skip(current_split.raw_samples * 9) - hf_dataset = hf_dataset.take(int(current_split.raw_samples)) - else: - hf_dataset = hf_dataset.take(current_split.raw_samples) if mode == ConcatMode.NO_CONCAT: dataset = NoConcatDataset(hf_dataset) else: @@ -507,7 +374,6 @@ def convert_dataset_hf( eos_text=eos_text, no_wrap=no_wrap, tokenizer=built_tokenizer, - current_split=split, ) loader = build_dataloader( dataset=hf_dataset, From 4a0a2132e10e1dc29d18067846123a153c7f932a Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 6 Mar 2025 22:42:40 -0600 Subject: [PATCH 06/50] Add some logs and file checks --- scripts/data_prep/convert_finemath.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/scripts/data_prep/convert_finemath.py b/scripts/data_prep/convert_finemath.py index d69b309..3ce5b6d 100644 --- a/scripts/data_prep/convert_finemath.py +++ b/scripts/data_prep/convert_finemath.py @@ -1,14 +1,16 @@ from datasets import load_dataset, load_from_disk, DatasetDict -from huggingface_hub import HfApi #, login +from huggingface_hub import HfApi, login -# login() -hf_dataset = "tyoc213/test2" +hf_dataset = "LocalResearchGroup/finemath-4plus" +print(f"Enter you credentials to upload dataset {hf_dataset}") +login() + +print(f"Downloading finemath-4plus") dataset = load_dataset( path="HuggingFaceTB/finemath", name="finemath-4plus", split="train", - # streaming=True, ) # Split with a fixed seed @@ -20,6 +22,7 @@ }) # push full dataset as is +print(f"Uploading finemath-4plus full train/val") combined.push_to_hub( hf_dataset, config_name="full", @@ -28,16 +31,21 @@ # save to parquet locally for split, dataset in combined.items(): - dataset.to_parquet(f"dataset-{split}.parquet") + filename = f"dataset-{split}.parquet" + if not Path().exists(filename).exists(): + print(f"Saving dataset-{split}.parquet") + dataset.to_parquet(filename) + else: + print(f"dataset-{split}.parquet exist. Skipping...") # load from local data_files = { "train": "dataset-train.parquet", "valid": "dataset-valid.parquet" } -raw_datasets = load_dataset("parquet", data_dir=".", data_files=data_files) - +print("Loading parquet training/val") +raw_datasets = load_dataset("parquet", data_dir=".", data_files=data_files) def create_size_ablation(dataset, total_rows): """Create a subset with a given percentage of the original data""" @@ -47,11 +55,11 @@ def create_size_ablation(dataset, total_rows): "valid": dataset["valid"].shuffle(42).select(range(total_rows - train_size)), } - a = [1_000_000, 100_000, 10_000, 1000] l = ["1M", "100k", "10k", "1k"] for amount, label in zip(a, l): + print(f"Creating ablation {label} train/val") ds =create_size_ablation(raw_datasets, amount) dsdict = DatasetDict({ @@ -59,6 +67,7 @@ def create_size_ablation(dataset, total_rows): "valid": ds["valid"], }) + print(f"Uploading ablation {label} train/val") dsdict.push_to_hub( hf_dataset, config_name=label, From 2a5820ed53af677ddc86676637257a155f3c4d7c Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 6 Mar 2025 23:46:18 -0600 Subject: [PATCH 07/50] fix missing import and path check --- scripts/data_prep/convert_finemath.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/data_prep/convert_finemath.py b/scripts/data_prep/convert_finemath.py index 3ce5b6d..c3e7ffd 100644 --- a/scripts/data_prep/convert_finemath.py +++ b/scripts/data_prep/convert_finemath.py @@ -1,5 +1,6 @@ from datasets import load_dataset, load_from_disk, DatasetDict from huggingface_hub import HfApi, login +from pathlib import Path hf_dataset = "LocalResearchGroup/finemath-4plus" print(f"Enter you credentials to upload dataset {hf_dataset}") @@ -32,7 +33,7 @@ # save to parquet locally for split, dataset in combined.items(): filename = f"dataset-{split}.parquet" - if not Path().exists(filename).exists(): + if not Path(filename).exists(): print(f"Saving dataset-{split}.parquet") dataset.to_parquet(filename) else: From c29df23fe06a3d6d94d193df33d8f0c295d1d034 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 8 Mar 2025 11:02:55 -0600 Subject: [PATCH 08/50] Generate train/val for tulu-3-sft-olmo-2-mixture without aya source and ablations of 100k, 10k, 1k --- .../convert_tulu-3-sft-olmo-2-mixture.py | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 scripts/data_prep/convert_tulu-3-sft-olmo-2-mixture.py diff --git a/scripts/data_prep/convert_tulu-3-sft-olmo-2-mixture.py b/scripts/data_prep/convert_tulu-3-sft-olmo-2-mixture.py new file mode 100644 index 0000000..d8587a7 --- /dev/null +++ b/scripts/data_prep/convert_tulu-3-sft-olmo-2-mixture.py @@ -0,0 +1,81 @@ +from datasets import load_dataset, load_from_disk, DatasetDict +from huggingface_hub import HfApi, login +from pathlib import Path + +hf_dataset = "LocalResearchGroup/tulu-3-train-val" +print(f"Enter you credentials to upload dataset {hf_dataset}") +login() + + +print(f"Downloading allenai/tulu-3-sft-olmo-2-mixture") +dataset = load_dataset( + path="allenai/tulu-3-sft-olmo-2-mixture", + split="train", + ) + +print(f"Original dataset rows {len(dataset)}") +dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"]) +print(f"Without aya rows {len(dataset)}") + +# Split with a fixed seed +split_dataset = dataset.train_test_split(test_size=0.1, seed=42) + +combined = DatasetDict({ + "train": split_dataset["train"], + "valid": split_dataset["test"] +}) +print(f"train/splits: {len(combined['train'])}/{len(combined['valid'])} rows") + +# push full dataset as is +print(f"Uploading {hf_dataset} full train/val") +combined.push_to_hub( + hf_dataset, + config_name="full", + private=True, + max_shard_size="300MB") + +# save to parquet locally +for split, dataset in combined.items(): + filename = f"dataset-{split}.parquet" + if not Path(filename).exists(): + print(f"Saving dataset-{split}.parquet with {len(dataset)} rows") + dataset.to_parquet(filename) + else: + print(f"dataset-{split}.parquet exist. Skipping...") + +############################### +# load from local +data_files = { + "train": "dataset-train.parquet", + "valid": "dataset-valid.parquet" +} + +print("Loading parquet training/val") +raw_datasets = load_dataset("parquet", data_dir=".", data_files=data_files) + +def create_size_ablation(dataset, total_rows): + """Create a subset with a given percentage of the original data""" + train_size = int(total_rows * .9) + return { + "train": dataset["train"].shuffle(42).select(range(train_size)), + "valid": dataset["valid"].shuffle(42).select(range(total_rows - train_size)), + } + +a = [100_000, 10_000, 1000] +l = ["100k", "10k", "1k"] + +for amount, label in zip(a, l): + print(f"Creating ablation {label} from original train/val sets") + ds =create_size_ablation(raw_datasets, amount) + + dsdict = DatasetDict({ + "train": ds["train"], + "valid": ds["valid"], + }) + + print(f"Uploading ablation {label} train/val") + dsdict.push_to_hub( + hf_dataset, + config_name=label, + private=False + ) From d00bf8fa932f4a4f4844afdec7390287bccef232 Mon Sep 17 00:00:00 2001 From: Mat Miller Date: Sun, 9 Mar 2025 03:48:17 +0000 Subject: [PATCH 09/50] starting to work on split_hf_datasets.py --- scripts/data_prep/split_hf_datasets.py | 74 ++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 scripts/data_prep/split_hf_datasets.py diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py new file mode 100644 index 0000000..17e65bd --- /dev/null +++ b/scripts/data_prep/split_hf_datasets.py @@ -0,0 +1,74 @@ +from datasets import load_dataset, load_from_disk, DatasetDict +from huggingface_hub import HfApi, login +from pathlib import Path +import os + + +def split_dataset( + inp_ds_path: str, + inp_ds_name: str | None = None, + out_ds_path: str | None = None, + test_size: float = 0.1, + seed: int = 42, + shard_size: str = "300MB", +): + """Split a dataset into train and test sets and save to disk""" + if not os.environ.get("HUGGING_FACE_HUB_TOKEN"): + print("No Hugging Face token found. Please login.") + login() + + print(f"Loading dataset {inp_ds_path}/{inp_ds_name}") + dataset = load_dataset(path=inp_ds_path, name=inp_ds_name, split="train") + + print(f"Splitting dataset") + split_dataset = dataset.train_test_split(test_size=test_size, seed=seed) + + print(f"Saving dataset to {out_ds_path}") + split_dataset.save_to_disk(out_ds_path) + + combined = DatasetDict({"train": split_dataset["train"], "valid": split_dataset["test"]}) + + print(f"Uploading dataset to {out_ds_path}") + combined.push_to_hub(out_ds_path, config_name="full", private=True, max_shard_size=shard_size) + + # save splits locally + for split, dataset in combined.items(): + filename = f"dataset-{split}.parquet" + if not Path(filename).exists(): + print(f"Saving dataset-{split}.parquet") + dataset.to_parquet(filename) + else: + print(f"dataset-{split}.parquet exist. Skipping...") + + # load from local + data_files = {"train": "dataset-train.parquet", "valid": "dataset-valid.parquet"} + + print("Loading parquet training/val") + raw_datasets = load_dataset("parquet", data_dir=".", data_files=data_files) + + +def create_size_ablation(dataset, total_rows): + """Create a subset with a given percentage of the original data""" + train_size = int(total_rows * 0.9) + return { + "train": dataset["train"].shuffle(42).select(range(train_size)), + "valid": dataset["valid"].shuffle(42).select(range(total_rows - train_size)), + } + + +a = [1_000_000, 100_000, 10_000, 1000] +l = ["1M", "100k", "10k", "1k"] + +for amount, label in zip(a, l): + print(f"Creating ablation {label} train/val") + ds = create_size_ablation(raw_datasets, amount) + + dsdict = DatasetDict( + { + "train": ds["train"], + "valid": ds["valid"], + } + ) + + print(f"Uploading ablation {label} train/val") + dsdict.push_to_hub(hf_dataset, config_name=label, private=False) From 5d4d2f77a7ed4300af6daa31a8f01bc3d112834a Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 13 Mar 2025 20:37:49 -0600 Subject: [PATCH 10/50] Refactor to single file and multiple calls for datasets: finemath, tulu and numina --- scripts/data_prep/convert_finemath.py | 76 ---------- .../convert_tulu-3-sft-olmo-2-mixture.py | 81 ----------- scripts/data_prep/split_hf_datasets.py | 137 +++++++++++------- 3 files changed, 81 insertions(+), 213 deletions(-) delete mode 100644 scripts/data_prep/convert_finemath.py delete mode 100644 scripts/data_prep/convert_tulu-3-sft-olmo-2-mixture.py diff --git a/scripts/data_prep/convert_finemath.py b/scripts/data_prep/convert_finemath.py deleted file mode 100644 index c3e7ffd..0000000 --- a/scripts/data_prep/convert_finemath.py +++ /dev/null @@ -1,76 +0,0 @@ -from datasets import load_dataset, load_from_disk, DatasetDict -from huggingface_hub import HfApi, login -from pathlib import Path - -hf_dataset = "LocalResearchGroup/finemath-4plus" -print(f"Enter you credentials to upload dataset {hf_dataset}") -login() - - -print(f"Downloading finemath-4plus") -dataset = load_dataset( - path="HuggingFaceTB/finemath", - name="finemath-4plus", - split="train", - ) - -# Split with a fixed seed -split_dataset = dataset.train_test_split(test_size=0.1, seed=42) - -combined = DatasetDict({ - "train": split_dataset["train"], - "valid": split_dataset["test"] -}) - -# push full dataset as is -print(f"Uploading finemath-4plus full train/val") -combined.push_to_hub( - hf_dataset, - config_name="full", - private=True, - max_shard_size="300MB") - -# save to parquet locally -for split, dataset in combined.items(): - filename = f"dataset-{split}.parquet" - if not Path(filename).exists(): - print(f"Saving dataset-{split}.parquet") - dataset.to_parquet(filename) - else: - print(f"dataset-{split}.parquet exist. Skipping...") - -# load from local -data_files = { - "train": "dataset-train.parquet", - "valid": "dataset-valid.parquet" -} - -print("Loading parquet training/val") -raw_datasets = load_dataset("parquet", data_dir=".", data_files=data_files) - -def create_size_ablation(dataset, total_rows): - """Create a subset with a given percentage of the original data""" - train_size = int(total_rows * .9) - return { - "train": dataset["train"].shuffle(42).select(range(train_size)), - "valid": dataset["valid"].shuffle(42).select(range(total_rows - train_size)), - } - -a = [1_000_000, 100_000, 10_000, 1000] -l = ["1M", "100k", "10k", "1k"] - -for amount, label in zip(a, l): - print(f"Creating ablation {label} train/val") - ds =create_size_ablation(raw_datasets, amount) - - dsdict = DatasetDict({ - "train": ds["train"], - "valid": ds["valid"], - }) - - print(f"Uploading ablation {label} train/val") - dsdict.push_to_hub( - hf_dataset, - config_name=label, - private=False - ) diff --git a/scripts/data_prep/convert_tulu-3-sft-olmo-2-mixture.py b/scripts/data_prep/convert_tulu-3-sft-olmo-2-mixture.py deleted file mode 100644 index d8587a7..0000000 --- a/scripts/data_prep/convert_tulu-3-sft-olmo-2-mixture.py +++ /dev/null @@ -1,81 +0,0 @@ -from datasets import load_dataset, load_from_disk, DatasetDict -from huggingface_hub import HfApi, login -from pathlib import Path - -hf_dataset = "LocalResearchGroup/tulu-3-train-val" -print(f"Enter you credentials to upload dataset {hf_dataset}") -login() - - -print(f"Downloading allenai/tulu-3-sft-olmo-2-mixture") -dataset = load_dataset( - path="allenai/tulu-3-sft-olmo-2-mixture", - split="train", - ) - -print(f"Original dataset rows {len(dataset)}") -dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"]) -print(f"Without aya rows {len(dataset)}") - -# Split with a fixed seed -split_dataset = dataset.train_test_split(test_size=0.1, seed=42) - -combined = DatasetDict({ - "train": split_dataset["train"], - "valid": split_dataset["test"] -}) -print(f"train/splits: {len(combined['train'])}/{len(combined['valid'])} rows") - -# push full dataset as is -print(f"Uploading {hf_dataset} full train/val") -combined.push_to_hub( - hf_dataset, - config_name="full", - private=True, - max_shard_size="300MB") - -# save to parquet locally -for split, dataset in combined.items(): - filename = f"dataset-{split}.parquet" - if not Path(filename).exists(): - print(f"Saving dataset-{split}.parquet with {len(dataset)} rows") - dataset.to_parquet(filename) - else: - print(f"dataset-{split}.parquet exist. Skipping...") - -############################### -# load from local -data_files = { - "train": "dataset-train.parquet", - "valid": "dataset-valid.parquet" -} - -print("Loading parquet training/val") -raw_datasets = load_dataset("parquet", data_dir=".", data_files=data_files) - -def create_size_ablation(dataset, total_rows): - """Create a subset with a given percentage of the original data""" - train_size = int(total_rows * .9) - return { - "train": dataset["train"].shuffle(42).select(range(train_size)), - "valid": dataset["valid"].shuffle(42).select(range(total_rows - train_size)), - } - -a = [100_000, 10_000, 1000] -l = ["100k", "10k", "1k"] - -for amount, label in zip(a, l): - print(f"Creating ablation {label} from original train/val sets") - ds =create_size_ablation(raw_datasets, amount) - - dsdict = DatasetDict({ - "train": ds["train"], - "valid": ds["valid"], - }) - - print(f"Uploading ablation {label} train/val") - dsdict.push_to_hub( - hf_dataset, - config_name=label, - private=False - ) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 17e65bd..03142ba 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -1,74 +1,99 @@ +from argparse import ArgumentParser, Namespace + from datasets import load_dataset, load_from_disk, DatasetDict from huggingface_hub import HfApi, login from pathlib import Path import os - -def split_dataset( - inp_ds_path: str, - inp_ds_name: str | None = None, - out_ds_path: str | None = None, - test_size: float = 0.1, - seed: int = 42, - shard_size: str = "300MB", -): - """Split a dataset into train and test sets and save to disk""" - if not os.environ.get("HUGGING_FACE_HUB_TOKEN"): - print("No Hugging Face token found. Please login.") - login() - - print(f"Loading dataset {inp_ds_path}/{inp_ds_name}") - dataset = load_dataset(path=inp_ds_path, name=inp_ds_name, split="train") - - print(f"Splitting dataset") - split_dataset = dataset.train_test_split(test_size=test_size, seed=seed) - - print(f"Saving dataset to {out_ds_path}") - split_dataset.save_to_disk(out_ds_path) - - combined = DatasetDict({"train": split_dataset["train"], "valid": split_dataset["test"]}) - - print(f"Uploading dataset to {out_ds_path}") - combined.push_to_hub(out_ds_path, config_name="full", private=True, max_shard_size=shard_size) - - # save splits locally +def save_to_parquet(combined: DatasetDict, out_ds_path: Path): + data_files = dict() for split, dataset in combined.items(): - filename = f"dataset-{split}.parquet" + filename = out_ds_path /f"{split}.parquet" + data_files[split] = filename if not Path(filename).exists(): - print(f"Saving dataset-{split}.parquet") + print(f"Saving {filename}") dataset.to_parquet(filename) else: - print(f"dataset-{split}.parquet exist. Skipping...") - - # load from local - data_files = {"train": "dataset-train.parquet", "valid": "dataset-valid.parquet"} - - print("Loading parquet training/val") - raw_datasets = load_dataset("parquet", data_dir=".", data_files=data_files) - + print(f"{filename} already exist. Skipping...") + return data_files def create_size_ablation(dataset, total_rows): """Create a subset with a given percentage of the original data""" train_size = int(total_rows * 0.9) return { "train": dataset["train"].shuffle(42).select(range(train_size)), - "valid": dataset["valid"].shuffle(42).select(range(total_rows - train_size)), + "test": dataset["test"].shuffle(42).select(range(total_rows - train_size)), } -a = [1_000_000, 100_000, 10_000, 1000] -l = ["1M", "100k", "10k", "1k"] - -for amount, label in zip(a, l): - print(f"Creating ablation {label} train/val") - ds = create_size_ablation(raw_datasets, amount) - - dsdict = DatasetDict( - { - "train": ds["train"], - "valid": ds["valid"], - } - ) - - print(f"Uploading ablation {label} train/val") - dsdict.push_to_hub(hf_dataset, config_name=label, private=False) +def push_ablations(raw_datasets, ablations, hf_repo, config_name, private, shard_size): + print(f"creating ablations from {len(raw_datasets['train'])}/{len(raw_datasets['test'])}") + for label in ablations: + match label[-1]: + case "M": + ds = create_size_ablation(raw_datasets, int(label[:-1]) * 1_000_000) + case "k": + ds = create_size_ablation(raw_datasets, int(label[:-1]) * 1_000) + case _: + ds = raw_datasets + + dsdict = DatasetDict( + { + "train": ds["train"], + "test": ds["test"], + } + ) + + print(f"Uploading ablation {label} train/val") + + dsdict.push_to_hub(hf_repo, config_name=label, private=private, max_shard_size=shard_size) + +def pull_n_push( + hf_ds_tgt, + hf_ds_src, + ds_name=None, + after_pull=None, + test_size: float = 0.1, + seed: int = 42, + saving2parquet=False, + ablations = ("full", "1M", "100k", "10k", "1k"), + private=True, + shard_size: str = "300MB", + purge_cache=False, +): + banner = f"Loading dataset {hf_ds_src}/{'default' if ds_name is None else ds_name}" + print("#"*len(banner)) + print(banner) + print("#"*len(banner)) + dataset = load_dataset(path=hf_ds_src, name=ds_name, split="train") + if after_pull is not None: + dataset = after_pull(dataset) + dataset = dataset.train_test_split(test_size=test_size, seed=seed) + dsd = DatasetDict({"train": dataset["train"], "test": dataset["test"]}) + + if saving2parquet: + print(f"Saving parquet to {hf_ds_tgt} train/test") + out_ds_path = Path(hf_ds_tgt) + out_ds_path.mkdir(parents=True, exist_ok=True) + data_files = save_to_parquet(dsd, out_ds_path.absolute()) + # print(f"Loading parquet training/val from {str(out_ds_path)}\n\n\n") + # dsd = load_dataset(str(out_ds_path)) + + push_ablations(dsd, ablations, hf_ds_tgt, ds_name, private, shard_size) + + if purge_cache: + dataset.cleanup_cache_files() + +def filter_tulu(dataset): + print(f"Original dataset rows {len(dataset)}") + dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"]) + print(f" current rows {len(dataset)}") + return dataset + +if __name__ == "__main__": + if not os.environ.get("HUGGING_FACE_HUB_TOKEN"): + print("No Hugging Face token found. Please login.") + login() + pull_n_push("LocalResearchGroup/split-finemath", "HuggingFaceTB/finemath", "finemath-4plus") + pull_n_push("LocalResearchGroup/split-tulu-3-sft-olmo-2-mixture", "allenai/tulu-3-sft-olmo-2-mixture", after_pull=filter_tulu, ablations=("full", "100k", "10k", "1k")) + pull_n_push("LocalResearchGroup/split-NuminaMath-CoT", "AI-MO/NuminaMath-CoT", ablations=("full", "100k", "10k", "1k")) \ No newline at end of file From 367393d3efa90ea0f37e1788a6fdea4b8b89470e Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 15 Mar 2025 12:13:40 -0600 Subject: [PATCH 11/50] Adding configurations to allow script tokenize new datasets Exported DataSplitConstants, DatsetConstants and CONSTS and added add_dataset_config in `convert_dataset_hf` to allow adding dynamically new datasets. --- llmfoundry/command_utils/__init__.py | 8 +++++ .../data_prep/convert_dataset_hf.py | 3 ++ scripts/data_prep/convert_dataset_hf.py | 31 ++++++++++++++++++- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/llmfoundry/command_utils/__init__.py b/llmfoundry/command_utils/__init__.py index 756e611..39cbd13 100644 --- a/llmfoundry/command_utils/__init__.py +++ b/llmfoundry/command_utils/__init__.py @@ -3,6 +3,10 @@ from llmfoundry.command_utils.data_prep.convert_dataset_hf import ( convert_dataset_hf, convert_dataset_hf_from_args, + DataSplitConstants, + DatasetConstants, + add_dataset_config, + CONSTS, ) from llmfoundry.command_utils.data_prep.convert_dataset_json import ( convert_dataset_json, @@ -45,6 +49,10 @@ 'eval_from_yaml', 'convert_dataset_hf', 'convert_dataset_hf_from_args', + 'add_dataset_config', + 'DataSplitConstants', + 'DatasetConstants', + 'CONSTS', 'convert_dataset_json', 'convert_dataset_json_from_args', 'convert_delta_to_contrastive_mds', diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index 69b5e68..6774806 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -160,6 +160,9 @@ def __init__( CONSTS = {'allenai/c4': c4constants, 'the_pile': pileconstants} +def add_dataset_config(name, splits): + global CONSTS + CONSTS[name] = splits def build_hf_dataset( dataset_name: str, diff --git a/scripts/data_prep/convert_dataset_hf.py b/scripts/data_prep/convert_dataset_hf.py index 3b89386..eb9267f 100644 --- a/scripts/data_prep/convert_dataset_hf.py +++ b/scripts/data_prep/convert_dataset_hf.py @@ -4,7 +4,36 @@ """Streaming dataset conversion scripts for C4 and The Pile.""" from argparse import ArgumentParser, Namespace -from llmfoundry.command_utils import convert_dataset_hf_from_args +from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS + +def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("full", 1, 10, 100, 1000)): + ds_const = DatasetConstants( + chars_per_sample=chars_per_sample, # Computed over validation set + chars_per_token=chars_per_token, # OpenAI estimate + ) + total_rows = None + # we generate only train and test use --data_subset --out_root + ds_const.splits[f"train"] = DataSplitConstants( + hf_split="train", + folder_split=f"train", + raw_samples=total_rows, + truncated_samples=total_rows, + ) + + ds_const.splits[f"test"] = DataSplitConstants( + hf_split="test", + folder_split=f"test", + raw_samples=total_rows, + truncated_samples=total_rows, + ) + return ds_const + +finemath = generate_constants(2163, 4) +add_dataset_config("tyoc213/split-finemath", finemath) +finemath = generate_constants(2163, 4) +add_dataset_config("tyoc213/split-tulu-3-sft-olmo-2-mixture", finemath) +finemath = generate_constants(2163, 4) +add_dataset_config("tyoc213/split-NuminaMath-CoT", finemath) def parse_args() -> Namespace: From 5ef92f9eb008b9504e9e74d2841704cfe6291bc1 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Fri, 21 Mar 2025 22:57:56 -0600 Subject: [PATCH 12/50] OK Splitting into train/test and starting exploring dataset tokenization Tokenization works OK with finemath as it already has the `text` column, we can: * post process when downloading the dataset in `split_hf_dataset.py` or * we can call convert_finetuning_dataset and create the preprocesors at `data_prep/preproc` and start from there (this is a WIP) --- scripts/data_prep/preproc/__init__.py | 6 ++ scripts/data_prep/preproc/preprocs.py | 30 ++++++++ scripts/data_prep/split_hf_datasets.py | 98 +++++++++++++++++++++++++- scripts/data_prep/tokenize_split.py | 53 ++++++++++++++ 4 files changed, 184 insertions(+), 3 deletions(-) create mode 100644 scripts/data_prep/preproc/__init__.py create mode 100644 scripts/data_prep/preproc/preprocs.py create mode 100644 scripts/data_prep/tokenize_split.py diff --git a/scripts/data_prep/preproc/__init__.py b/scripts/data_prep/preproc/__init__.py new file mode 100644 index 0000000..f72f32b --- /dev/null +++ b/scripts/data_prep/preproc/__init__.py @@ -0,0 +1,6 @@ +from preproc.preprocs import pre_tulu, pre_numina, preprocessing_function +__all__ = [ + "pre_tulu", + "pre_numina", + "preprocessing_function", +] diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py new file mode 100644 index 0000000..ff5c63b --- /dev/null +++ b/scripts/data_prep/preproc/preprocs.py @@ -0,0 +1,30 @@ +from llmfoundry.data.finetuning.tasks import ( + DatasetConstructor +) + +dataset_constructor = DatasetConstructor() + +@dataset_constructor.register("tyoc213/split-finemath") +def preprocessing_function(inp: dict) -> dict: + """Format the already-split example.""" + return { + 'prompt': inp['inputs'] + ':', + 'response': inp['targets'], + } + +@dataset_constructor.register("tyoc213/split-tulu-3-sft-olmo-2-mixture") +def pre_tulu(inp: dict): + pro = [m["content"] for m in inp["messages"] if m["role"] == "user"] + ans = [m["content"] for m in inp["messages"] if m["role"] == "assistant"] + + prompt = "\n".join(pro) + answer = "\n".join(ans) + if len(prompt) < len(answer): + # FIXME: padding needed if not we get an error? + prompt = prompt + " " + " " * (len(answer) - len(prompt)) + return {'prompt': prompt, 'response': answer} + + +@dataset_constructor.register("tyoc213/split-NuminaMath-CoT") +def pre_numina(inp: dict): + return {'prompt': inp['problem'], 'response': inp['solution']} diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 03142ba..e7d773b 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -87,13 +87,105 @@ def pull_n_push( def filter_tulu(dataset): print(f"Original dataset rows {len(dataset)}") dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"]) + print("tulu", dataset.features) + # dataset = dataset.rename_column("messages", "text") + dataset = dataset.flatten() + print("new tulu features: ", dataset.features) print(f" current rows {len(dataset)}") return dataset +def process_numina(dataset): + print("numina", dataset.features) + # dataset = dataset.rename_column("messages", "text") + dataset = dataset.flatten() + print("new numina features", dataset.features) + return dataset + +def upload_token_folder(local_path, target_repo): + api = HfApi() + p = Path(".") + print(f"uploading to finemath-1k tokens {p.absolute()}") + print(f" {str(p/'finemath-k1')}") + api.upload_folder( + folder_path=local_path, + repo_id=target_repo, + repo_type="dataset", + # path_in_repo="", + # commit_message="", + ) + + print("endo!!!") + +def create_upload(): + ###################################################### + # import configurations to tokenize new dataset splits + import tokenize_split + from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS + + configs = [ + { + "target": "tyoc213/split-tulu-3-sft-olmo-2-mixture", + "ablations": ["full", "100k", "10k", "1k"], + }, + # { + # "target": "tyoc213/split-NuminaMath-CoT", + # "ablations": ["full", "100k", "10k", "1k"], + # }, + # { + # "target": "tyoc213/split-finemath", + # "ablations": ["full", "1M", "100k", "10k", "1k"], + # }, + ] + + for c in configs: + folder = c["target"].split("/")[1] + for ablation in c["ablations"]: + args = Namespace( + dataset=c["target"], + data_subset=ablation, + splits=['train', 'test'], + out_root=f'tokenized-{folder}-{ablation}', + compression="zstd", + concat_tokens=None, + tokenizer='HuggingFaceTB/SmolLM2-135M', + tokenizer_kwargs=None, + bos_text=None, + eos_text='<|endoftext|>', + no_wrap=False, + num_workers=None + ) + print(f" {args=}") + convert_dataset_hf_from_args( + dataset=args.dataset, + data_subset=args.data_subset, + splits=args.splits, + out_root=args.out_root, + compression=args.compression, + concat_tokens=args.concat_tokens, + tokenizer=args.tokenizer, + tokenizer_kwargs=args.tokenizer_kwargs, + bos_text=args.bos_text, + eos_text=args.eos_text, + no_wrap=args.no_wrap, + num_workers=args.num_workers, + ) + + print("\n ----------------------------\n"*7) + for c in configs: + for ablation in c["ablations"]: + local_path = Path(".") / f"{c['target']}" / f"{ablation}" + target_repo = c["target"] + print("local", local_path, "target_repo", target_repo) + # upload_token_folder(local_path, target_repo) + # upload all tokenized folders to corresponding repo/folder if __name__ == "__main__": if not os.environ.get("HUGGING_FACE_HUB_TOKEN"): print("No Hugging Face token found. Please login.") login() - pull_n_push("LocalResearchGroup/split-finemath", "HuggingFaceTB/finemath", "finemath-4plus") - pull_n_push("LocalResearchGroup/split-tulu-3-sft-olmo-2-mixture", "allenai/tulu-3-sft-olmo-2-mixture", after_pull=filter_tulu, ablations=("full", "100k", "10k", "1k")) - pull_n_push("LocalResearchGroup/split-NuminaMath-CoT", "AI-MO/NuminaMath-CoT", ablations=("full", "100k", "10k", "1k")) \ No newline at end of file + REMOTE_REPO = "LocalResearchGroup" + pull_n_push(f"{REMOTE_REPO}/split-tulu-3-sft-olmo-2-mixture", "allenai/tulu-3-sft-olmo-2-mixture", after_pull=filter_tulu, ablations=("full", "100k", "10k", "1k")) + pull_n_push(f"{REMOTE_REPO}/split-NuminaMath-CoT", "AI-MO/NuminaMath-CoT", after_pull=process_numina, ablations=("full", "100k", "10k", "1k")) + pull_n_push(f"{REMOTE_REPO}/split-finemath", "HuggingFaceTB/finemath", "finemath-4plus", ablations=("100k", "10k", "1k")) + + if False: + create_upload() diff --git a/scripts/data_prep/tokenize_split.py b/scripts/data_prep/tokenize_split.py new file mode 100644 index 0000000..f937cc4 --- /dev/null +++ b/scripts/data_prep/tokenize_split.py @@ -0,0 +1,53 @@ +from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS +from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS + +def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("full", 1, 10, 100, 1000)): + ds_const = DatasetConstants( + chars_per_sample=chars_per_sample, # Computed over validation set + chars_per_token=chars_per_token, # OpenAI estimate + ) + total_rows = None + # we generate only train and test use --data_subset --out_root + ds_const.splits[f"train"] = DataSplitConstants( + hf_split="train", + folder_split=f"train", + raw_samples=total_rows, + truncated_samples=total_rows, + ) + + ds_const.splits[f"test"] = DataSplitConstants( + hf_split="test", + folder_split=f"test", + raw_samples=total_rows, + truncated_samples=total_rows, + ) + return ds_const + +finemath = generate_constants(2163, 4) +add_dataset_config("tyoc213/split-finemath", finemath) +finemath = generate_constants(2163, 4) +add_dataset_config("tyoc213/split-tulu-3-sft-olmo-2-mixture", finemath) +finemath = generate_constants(2163, 4) +add_dataset_config("tyoc213/split-NuminaMath-CoT", finemath) + + +# def do_xxx(): +# args = Namespace(dataset='tyoc213/split-finemath', data_subset='10k', splits=['train', 'test'], out_root='finemath-10k', compression=None, concat_tokens=None, tokenizer='HuggingFaceTB/SmolLM2-135M', tokenizer_kwargs=None, bos_text=None, eos_text='<|endoftext|>', no_wrap=False, num_workers=None) +# convert_dataset_hf_from_args( +# dataset=args.dataset, +# data_subset=args.data_subset, +# splits=args.splits, +# out_root=args.out_root, +# compression=args.compression, +# concat_tokens=args.concat_tokens, +# tokenizer=args.tokenizer, +# tokenizer_kwargs=args.tokenizer_kwargs, +# bos_text=args.bos_text, +# eos_text=args.eos_text, +# no_wrap=args.no_wrap, +# num_workers=args.num_workers, +# ) + + +# if __name__ == "__main__": +# do_xxx() \ No newline at end of file From 3afadc78264034f6c28b19d9e02aa941ecec774c Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 22 Mar 2025 01:24:41 -0600 Subject: [PATCH 13/50] little cleanup --- scripts/data_prep/preproc/preprocs.py | 2 +- scripts/data_prep/split_hf_datasets.py | 21 ++++++++------------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py index ff5c63b..d40ff04 100644 --- a/scripts/data_prep/preproc/preprocs.py +++ b/scripts/data_prep/preproc/preprocs.py @@ -1,5 +1,5 @@ from llmfoundry.data.finetuning.tasks import ( - DatasetConstructor + DatasetConstructor, ) dataset_constructor = DatasetConstructor() diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index e7d773b..d69a9f5 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -6,7 +6,7 @@ import os def save_to_parquet(combined: DatasetDict, out_ds_path: Path): - data_files = dict() + data_files = {} for split, dataset in combined.items(): filename = out_ds_path /f"{split}.parquet" data_files[split] = filename @@ -41,7 +41,7 @@ def push_ablations(raw_datasets, ablations, hf_repo, config_name, private, shard { "train": ds["train"], "test": ds["test"], - } + }, ) print(f"Uploading ablation {label} train/val") @@ -89,7 +89,7 @@ def filter_tulu(dataset): dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"]) print("tulu", dataset.features) # dataset = dataset.rename_column("messages", "text") - dataset = dataset.flatten() + # dataset = dataset.flatten() print("new tulu features: ", dataset.features) print(f" current rows {len(dataset)}") return dataset @@ -97,15 +97,14 @@ def filter_tulu(dataset): def process_numina(dataset): print("numina", dataset.features) # dataset = dataset.rename_column("messages", "text") - dataset = dataset.flatten() + # dataset = dataset.flatten() print("new numina features", dataset.features) return dataset def upload_token_folder(local_path, target_repo): + # WIP: remove constants api = HfApi() p = Path(".") - print(f"uploading to finemath-1k tokens {p.absolute()}") - print(f" {str(p/'finemath-k1')}") api.upload_folder( folder_path=local_path, repo_id=target_repo, @@ -114,10 +113,8 @@ def upload_token_folder(local_path, target_repo): # commit_message="", ) - print("endo!!!") def create_upload(): - ###################################################### # import configurations to tokenize new dataset splits import tokenize_split from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS @@ -152,9 +149,9 @@ def create_upload(): bos_text=None, eos_text='<|endoftext|>', no_wrap=False, - num_workers=None + num_workers=None, ) - print(f" {args=}") + convert_dataset_hf_from_args( dataset=args.dataset, data_subset=args.data_subset, @@ -170,14 +167,12 @@ def create_upload(): num_workers=args.num_workers, ) - print("\n ----------------------------\n"*7) + # upload all tokenized folders to corresponding repo/folder for c in configs: for ablation in c["ablations"]: local_path = Path(".") / f"{c['target']}" / f"{ablation}" target_repo = c["target"] - print("local", local_path, "target_repo", target_repo) # upload_token_folder(local_path, target_repo) - # upload all tokenized folders to corresponding repo/folder if __name__ == "__main__": if not os.environ.get("HUGGING_FACE_HUB_TOKEN"): print("No Hugging Face token found. Please login.") From d75086152cd139a2feed9052a3c3c4b5f2780f32 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 22 Mar 2025 23:47:54 -0600 Subject: [PATCH 14/50] numina can now be tokenized, tulu is still failing --- scripts/data_prep/preproc/preprocs.py | 12 ++++++------ scripts/data_prep/split_hf_datasets.py | 19 ++++++++++--------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py index d40ff04..d465279 100644 --- a/scripts/data_prep/preproc/preprocs.py +++ b/scripts/data_prep/preproc/preprocs.py @@ -2,9 +2,10 @@ DatasetConstructor, ) +HF_REPO="LocalResearchGroup" dataset_constructor = DatasetConstructor() -@dataset_constructor.register("tyoc213/split-finemath") +@dataset_constructor.register(f"{HF_REPO}/split-finemath") def preprocessing_function(inp: dict) -> dict: """Format the already-split example.""" return { @@ -12,19 +13,18 @@ def preprocessing_function(inp: dict) -> dict: 'response': inp['targets'], } -@dataset_constructor.register("tyoc213/split-tulu-3-sft-olmo-2-mixture") +@dataset_constructor.register(f"{HF_REPO}/split-tulu-3-sft-olmo-2-mixture") def pre_tulu(inp: dict): pro = [m["content"] for m in inp["messages"] if m["role"] == "user"] ans = [m["content"] for m in inp["messages"] if m["role"] == "assistant"] prompt = "\n".join(pro) answer = "\n".join(ans) - if len(prompt) < len(answer): - # FIXME: padding needed if not we get an error? - prompt = prompt + " " + " " * (len(answer) - len(prompt)) + assert prompt is not None + assert answer is not None return {'prompt': prompt, 'response': answer} -@dataset_constructor.register("tyoc213/split-NuminaMath-CoT") +@dataset_constructor.register(f"{HF_REPO}/split-NuminaMath-CoT") def pre_numina(inp: dict): return {'prompt': inp['problem'], 'response': inp['solution']} diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index d69a9f5..37fc8fe 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -86,18 +86,19 @@ def pull_n_push( def filter_tulu(dataset): print(f"Original dataset rows {len(dataset)}") - dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"]) + # filter out messages of lenght = 2 user+assistant + # FIXME: extra checks finding [None] * 512 batch? + dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"] and len(r["messages"]) == 2 and r["messages"] is not None and r["messages"][0] is not None and r["messages"][1] is not None) print("tulu", dataset.features) - # dataset = dataset.rename_column("messages", "text") - # dataset = dataset.flatten() + dataset = dataset.remove_columns(["source", "dataset"]) print("new tulu features: ", dataset.features) print(f" current rows {len(dataset)}") return dataset def process_numina(dataset): print("numina", dataset.features) - # dataset = dataset.rename_column("messages", "text") - # dataset = dataset.flatten() + # remove column that on batch of 512 only has 2 rows which breaks pytorch collate! + dataset = dataset.remove_columns("messages") print("new numina features", dataset.features) return dataset @@ -124,10 +125,10 @@ def create_upload(): "target": "tyoc213/split-tulu-3-sft-olmo-2-mixture", "ablations": ["full", "100k", "10k", "1k"], }, - # { - # "target": "tyoc213/split-NuminaMath-CoT", - # "ablations": ["full", "100k", "10k", "1k"], - # }, + { + "target": "tyoc213/split-NuminaMath-CoT", + "ablations": ["full", "100k", "10k", "1k"], + }, # { # "target": "tyoc213/split-finemath", # "ablations": ["full", "1M", "100k", "10k", "1k"], From 1230588b4204573898772d1b93b72a02aac82edd Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Wed, 26 Mar 2025 09:51:12 -0600 Subject: [PATCH 15/50] split q/a messages column into prompt/response and remove unnecessary columns --- scripts/data_prep/preproc/preprocs.py | 9 +-------- scripts/data_prep/split_hf_datasets.py | 8 ++++++++ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py index d465279..f23a23a 100644 --- a/scripts/data_prep/preproc/preprocs.py +++ b/scripts/data_prep/preproc/preprocs.py @@ -15,14 +15,7 @@ def preprocessing_function(inp: dict) -> dict: @dataset_constructor.register(f"{HF_REPO}/split-tulu-3-sft-olmo-2-mixture") def pre_tulu(inp: dict): - pro = [m["content"] for m in inp["messages"] if m["role"] == "user"] - ans = [m["content"] for m in inp["messages"] if m["role"] == "assistant"] - - prompt = "\n".join(pro) - answer = "\n".join(ans) - assert prompt is not None - assert answer is not None - return {'prompt': prompt, 'response': answer} + return {'prompt': inp["prompt"], 'response': inp["response"]} @dataset_constructor.register(f"{HF_REPO}/split-NuminaMath-CoT") diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 37fc8fe..025176f 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -91,6 +91,14 @@ def filter_tulu(dataset): dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"] and len(r["messages"]) == 2 and r["messages"] is not None and r["messages"][0] is not None and r["messages"][1] is not None) print("tulu", dataset.features) dataset = dataset.remove_columns(["source", "dataset"]) + def extract_qa(messages): + user_question = next((msg["content"] for msg in messages if msg["role"] == "user"), None) + assistant_response = next((msg["content"] for msg in messages if msg["role"] == "assistant"), None) + return {"prompt": user_question, "response": assistant_response} + + # Apply function to dataset + dataset = dataset.map(lambda example: extract_qa(example["messages"])) + dataset = dataset.remove_columns(["messages"]) print("new tulu features: ", dataset.features) print(f" current rows {len(dataset)}") return dataset From 9db7f4b95a96bfc8234d12e9b0d27a06d14f7b67 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Fri, 28 Mar 2025 02:37:24 -0600 Subject: [PATCH 16/50] `data_prep/split_hf_datasets.py -h` for help and `data_prep/split_hf_datasets.py` to run all from start to finish (split+tokenize+upload works for finemath) * Take into account that currently you need to run manually `python data_prep/convert_finetuning_dataset.py --dataset tyoc213/split-tulu-3-sft-olmo-2-mixture --data_subset 100k --splits train test --out_root tulu100kzstd --tokenizer HuggingFaceTB/SmolLM2-135M --preprocessor preproc:pre_tulu --num_workers 0 --compression zstd` to tokenize tulu or numina (preproc methods are provided: pre_tulu, pre_numina) * to skip to upload tokenized folder `python data_prep/split_hf_datasets.py --target tyoc213 --source finemath --no-split --no-tokenize` --- scripts/data_prep/split_hf_datasets.py | 188 ++++++++++++++++--------- 1 file changed, 120 insertions(+), 68 deletions(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 025176f..a347188 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -1,4 +1,4 @@ -from argparse import ArgumentParser, Namespace +from argparse import ArgumentParser, Namespace, BooleanOptionalAction from datasets import load_dataset, load_from_disk, DatasetDict from huggingface_hub import HfApi, login @@ -44,7 +44,7 @@ def push_ablations(raw_datasets, ablations, hf_repo, config_name, private, shard }, ) - print(f"Uploading ablation {label} train/val") + print(f"\nUploading ablation {label} train/val") dsdict.push_to_hub(hf_repo, config_name=label, private=private, max_shard_size=shard_size) @@ -64,7 +64,9 @@ def pull_n_push( banner = f"Loading dataset {hf_ds_src}/{'default' if ds_name is None else ds_name}" print("#"*len(banner)) print(banner) + print(f"path={hf_ds_src=}, name={ds_name=}, split=train") print("#"*len(banner)) + dataset = load_dataset(path=hf_ds_src, name=ds_name, split="train") if after_pull is not None: dataset = after_pull(dataset) @@ -72,7 +74,10 @@ def pull_n_push( dsd = DatasetDict({"train": dataset["train"], "test": dataset["test"]}) if saving2parquet: - print(f"Saving parquet to {hf_ds_tgt} train/test") + b = f"Saving parquet to {hf_ds_tgt} train/test" + print("=" * len(b)) + print(b) + print("=" * len(b)) out_ds_path = Path(hf_ds_tgt) out_ds_path.mkdir(parents=True, exist_ok=True) data_files = save_to_parquet(dsd, out_ds_path.absolute()) @@ -111,85 +116,132 @@ def process_numina(dataset): return dataset def upload_token_folder(local_path, target_repo): - # WIP: remove constants + print(f"upload_token_folder({str(local_path.absolute())=}, {target_repo=})") api = HfApi() - p = Path(".") - api.upload_folder( + api.upload_large_folder( folder_path=local_path, repo_id=target_repo, repo_type="dataset", - # path_in_repo="", - # commit_message="", ) - -def create_upload(): +def create_pretraining_tokens(args, datasets, tokenizer='HuggingFaceTB/SmolLM2-135M'): # import configurations to tokenize new dataset splits import tokenize_split from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS - configs = [ - { - "target": "tyoc213/split-tulu-3-sft-olmo-2-mixture", - "ablations": ["full", "100k", "10k", "1k"], + for s in args.source: + + d = datasets[s] + folder = d["target"].split("/")[1] + for ablation in d["ablations"]: + if s == "finemath": + print("\ngenerating tokens for", s, ablation) + convert_dataset_hf_from_args( + dataset=d["target"], + data_subset=ablation, + splits=['train', 'test'], + out_root=f'tokenized-{s}-{ablation}', + compression="zstd", + concat_tokens=None, + tokenizer=tokenizer, + tokenizer_kwargs=None, + bos_text=None, + eos_text='<|endoftext|>', + no_wrap=False, + num_workers=None, + ) + else: + print(f"\nskipping {s} as it is an instruct dataset and doesn't containt text column") + print("use data_prep/convert_finetuning_dataset.py instead") + print(f"{d=}") + +def create_upload(args, datasets): + # upload all tokenized folders to corresponding repo/folder + for s in args.source: + d = datasets[s] + for ablation in d["ablations"]: + if s == "finemath": + local_path = Path(".") / f"tokenized-{s}-{ablation}" + target_repo = d["target"] + upload_token_folder(local_path, target_repo) + else: + print(f"skipping upload of tokens for {s}") + + +def upload_splits(args, datas): + for arg in args.source: + d = datas[arg] + ds_name = d.get("ds_name", None) + pull_n_push( + d["target"], + d["src"], + ds_name=ds_name, + ablations=d["ablations"], + after_pull=d.get("after_pull", None), + ) + + +def main(args): + datasets = { + "tulu": { + "src": "allenai/tulu-3-sft-olmo-2-mixture", + "target": f"{args.target_repo}/split-tulu-3-sft-olmo-2-mixture", + "after_pull": filter_tulu, + "ablations": ("full", "100k", "10k", "1k"), }, - { - "target": "tyoc213/split-NuminaMath-CoT", - "ablations": ["full", "100k", "10k", "1k"], + "numina": { + "src": "AI-MO/NuminaMath-CoT", + "target": f"{args.target_repo}/split-NuminaMath-CoT", + "after_pull": process_numina, + "ablations": ("full", "100k", "10k", "1k"), }, - # { - # "target": "tyoc213/split-finemath", - # "ablations": ["full", "1M", "100k", "10k", "1k"], - # }, - ] - - for c in configs: - folder = c["target"].split("/")[1] - for ablation in c["ablations"]: - args = Namespace( - dataset=c["target"], - data_subset=ablation, - splits=['train', 'test'], - out_root=f'tokenized-{folder}-{ablation}', - compression="zstd", - concat_tokens=None, - tokenizer='HuggingFaceTB/SmolLM2-135M', - tokenizer_kwargs=None, - bos_text=None, - eos_text='<|endoftext|>', - no_wrap=False, - num_workers=None, - ) - - convert_dataset_hf_from_args( - dataset=args.dataset, - data_subset=args.data_subset, - splits=args.splits, - out_root=args.out_root, - compression=args.compression, - concat_tokens=args.concat_tokens, - tokenizer=args.tokenizer, - tokenizer_kwargs=args.tokenizer_kwargs, - bos_text=args.bos_text, - eos_text=args.eos_text, - no_wrap=args.no_wrap, - num_workers=args.num_workers, - ) + "finemath" :{ + "src": "HuggingFaceTB/finemath", + "ds_name": "finemath-4plus", + "target": f"{args.target_repo}/split-finemath", + "ablations": ("full", "1M", "100k", "10k", "1k"), + } + } + if args.split: + d = upload_splits(args, datasets) + if args.tokenize: + create_pretraining_tokens(args, datasets) + if args.upload: + create_upload(args, datasets) + + +def parse_args() -> Namespace: + """Parse commandline arguments.""" + parser = ArgumentParser( + description= + 'Split to train/test 1M, 100k, 10k, 1k and tokenize', + ) + parser.add_argument( + '--source', + nargs='+', + choices=['tulu', 'numina', 'finemath'], + default=['tulu', 'numina', 'finemath'], + ) + + parser.add_argument( + "--target_repo", + default="LocalResearchGroup", + help="target repo to upload splits and tokenizations", + ) + + parser.add_argument('--split', action=BooleanOptionalAction, default=True, help="split generation") + parser.add_argument('--tokenize', action=BooleanOptionalAction, default=True, help="generate tokenization for splits") + # parser.add_argument('--tokenize-instruct', action=BooleanOptionalAction, default=True, help="generate tokenization for splits") + parser.add_argument('--upload', action=BooleanOptionalAction, default=True, help="upload tokenization folders") + # parser.add_argument('--upload-instruct-tokens', action=BooleanOptionalAction, default=True, help="upload tokenization folders") + + parsed = parser.parse_args() + return parsed + - # upload all tokenized folders to corresponding repo/folder - for c in configs: - for ablation in c["ablations"]: - local_path = Path(".") / f"{c['target']}" / f"{ablation}" - target_repo = c["target"] - # upload_token_folder(local_path, target_repo) if __name__ == "__main__": + args = parse_args() if not os.environ.get("HUGGING_FACE_HUB_TOKEN"): print("No Hugging Face token found. Please login.") login() - REMOTE_REPO = "LocalResearchGroup" - pull_n_push(f"{REMOTE_REPO}/split-tulu-3-sft-olmo-2-mixture", "allenai/tulu-3-sft-olmo-2-mixture", after_pull=filter_tulu, ablations=("full", "100k", "10k", "1k")) - pull_n_push(f"{REMOTE_REPO}/split-NuminaMath-CoT", "AI-MO/NuminaMath-CoT", after_pull=process_numina, ablations=("full", "100k", "10k", "1k")) - pull_n_push(f"{REMOTE_REPO}/split-finemath", "HuggingFaceTB/finemath", "finemath-4plus", ablations=("100k", "10k", "1k")) - - if False: - create_upload() + main(args) From 96d1bd0a3e7ba71c490a889cb5e54cc10725a120 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 29 Mar 2025 01:06:57 -0600 Subject: [PATCH 17/50] Making use of convert_finetuning_dataset_from_args to tokenize tulu and nomina, token folder has same shape so all can be uploaded --- scripts/data_prep/split_hf_datasets.py | 52 ++++++++++++++++---------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index a347188..56d3553 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -3,6 +3,8 @@ from datasets import load_dataset, load_from_disk, DatasetDict from huggingface_hub import HfApi, login from pathlib import Path + +from convert_finetuning_dataset import convert_finetuning_dataset_from_args import os def save_to_parquet(combined: DatasetDict, out_ds_path: Path): @@ -48,6 +50,7 @@ def push_ablations(raw_datasets, ablations, hf_repo, config_name, private, shard dsdict.push_to_hub(hf_repo, config_name=label, private=private, max_shard_size=shard_size) + def pull_n_push( hf_ds_tgt, hf_ds_src, @@ -81,8 +84,6 @@ def pull_n_push( out_ds_path = Path(hf_ds_tgt) out_ds_path.mkdir(parents=True, exist_ok=True) data_files = save_to_parquet(dsd, out_ds_path.absolute()) - # print(f"Loading parquet training/val from {str(out_ds_path)}\n\n\n") - # dsd = load_dataset(str(out_ds_path)) push_ablations(dsd, ablations, hf_ds_tgt, ds_name, private, shard_size) @@ -91,9 +92,7 @@ def pull_n_push( def filter_tulu(dataset): print(f"Original dataset rows {len(dataset)}") - # filter out messages of lenght = 2 user+assistant - # FIXME: extra checks finding [None] * 512 batch? - dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"] and len(r["messages"]) == 2 and r["messages"] is not None and r["messages"][0] is not None and r["messages"][1] is not None) + dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"] and len(r["messages"]) == 2) print("tulu", dataset.features) dataset = dataset.remove_columns(["source", "dataset"]) def extract_qa(messages): @@ -116,12 +115,13 @@ def process_numina(dataset): return dataset def upload_token_folder(local_path, target_repo): - print(f"upload_token_folder({str(local_path.absolute())=}, {target_repo=})") + print(f"upload_token_folder({str(local_path.relative_to("."))=}, {target_repo=})") api = HfApi() - api.upload_large_folder( + api.upload_folder( folder_path=local_path, repo_id=target_repo, repo_type="dataset", + path_in_repo=str(local_path.relative_to(".")) ) def create_pretraining_tokens(args, datasets, tokenizer='HuggingFaceTB/SmolLM2-135M'): @@ -140,7 +140,7 @@ def create_pretraining_tokens(args, datasets, tokenizer='HuggingFaceTB/SmolLM2-1 dataset=d["target"], data_subset=ablation, splits=['train', 'test'], - out_root=f'tokenized-{s}-{ablation}', + out_root=f'tokenized/{s}/{ablation}', compression="zstd", concat_tokens=None, tokenizer=tokenizer, @@ -151,21 +151,35 @@ def create_pretraining_tokens(args, datasets, tokenizer='HuggingFaceTB/SmolLM2-1 num_workers=None, ) else: - print(f"\nskipping {s} as it is an instruct dataset and doesn't containt text column") - print("use data_prep/convert_finetuning_dataset.py instead") - print(f"{d=}") + print(f"\nconvert_finetuning_dataset_from_args") + convert_finetuning_dataset_from_args( + d["target"], + f"{ablation}", # data_subset + ['train', 'test'], + d["preproc"], + [], + False, + f'tokenized/{s}/{ablation}', # out_root + None, + "zstd", + None, # num_workers + "HuggingFaceTB/SmolLM2-135M", # tokenizer + None, + 2048, # max_seq_len + "none", # target_prompts + "last", # target_responses + False, # encoder_decoder + ) def create_upload(args, datasets): # upload all tokenized folders to corresponding repo/folder for s in args.source: d = datasets[s] for ablation in d["ablations"]: - if s == "finemath": - local_path = Path(".") / f"tokenized-{s}-{ablation}" - target_repo = d["target"] - upload_token_folder(local_path, target_repo) - else: - print(f"skipping upload of tokens for {s}") + target_repo = d["target"] + local_path = Path(".") / f"tokenized/{s}/{ablation}" + print(f"\nUploading {ablation} to {target_repo} from {str(local_path)}\n") + upload_token_folder(local_path, target_repo) def upload_splits(args, datas): @@ -188,12 +202,14 @@ def main(args): "target": f"{args.target_repo}/split-tulu-3-sft-olmo-2-mixture", "after_pull": filter_tulu, "ablations": ("full", "100k", "10k", "1k"), + "preproc":"preproc:pre_tulu", }, "numina": { "src": "AI-MO/NuminaMath-CoT", "target": f"{args.target_repo}/split-NuminaMath-CoT", "after_pull": process_numina, "ablations": ("full", "100k", "10k", "1k"), + "preproc":"preproc:pre_numina", }, "finemath" :{ "src": "HuggingFaceTB/finemath", @@ -231,9 +247,7 @@ def parse_args() -> Namespace: parser.add_argument('--split', action=BooleanOptionalAction, default=True, help="split generation") parser.add_argument('--tokenize', action=BooleanOptionalAction, default=True, help="generate tokenization for splits") - # parser.add_argument('--tokenize-instruct', action=BooleanOptionalAction, default=True, help="generate tokenization for splits") parser.add_argument('--upload', action=BooleanOptionalAction, default=True, help="upload tokenization folders") - # parser.add_argument('--upload-instruct-tokens', action=BooleanOptionalAction, default=True, help="upload tokenization folders") parsed = parser.parse_args() return parsed From e76f1eec2c24f2550d2967f717f317b35792a85f Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sun, 30 Mar 2025 01:22:12 -0600 Subject: [PATCH 18/50] feat `download_repo.py` and `modal_script#pull_hf_to_folder` --- scripts/data_prep/download_repo.py | 62 ++++++++++++++++++++++++++++++ scripts/modal/modal_script.py | 25 ++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 scripts/data_prep/download_repo.py diff --git a/scripts/data_prep/download_repo.py b/scripts/data_prep/download_repo.py new file mode 100644 index 0000000..6a70ecc --- /dev/null +++ b/scripts/data_prep/download_repo.py @@ -0,0 +1,62 @@ +from argparse import ArgumentParser, Namespace, BooleanOptionalAction +from huggingface_hub import HfApi, login +import os + + +def main(args): + api = HfApi() + datasets = { + "tulu": { + "target": f"{args.repo}/split-tulu-3-sft-olmo-2-mixture", + "ablations": ("full", "100k", "10k", "1k"), + }, + "numina": { + "target": f"{args.repo}/split-NuminaMath-CoT", + "ablations": ("full", "100k", "10k", "1k"), + }, + "finemath" :{ + "target": f"{args.repo}/split-finemath", + "ablations": ("full", "1M", "100k", "10k", "1k"), + } + } + datas_list = args.dataset + + from pprint import pp + pp(datasets) + for ds in datas_list: + print(f"downloading {datasets[ds]["target"]=} to download-{ds}-tokenized\n") + local_dir = api.snapshot_download( + repo_id=datasets[ds]["target"], + repo_type="dataset", + local_dir=f"download-{ds}-tokenized", + ) + +def parse_args() -> Namespace: + """Parse commandline arguments.""" + parser = ArgumentParser( + description= + 'Downloads tokenized versions of train/test 1M, 100k, 10k, 1k', + ) + parser.add_argument( + '--dataset', + nargs='+', + choices=['tulu', 'numina', 'finemath'], + default=['tulu', 'numina', 'finemath'], + ) + + parser.add_argument( + "--repo", + default="LocalResearchGroup", + help="repo containing tokenizations", + ) + + parsed = parser.parse_args() + return parsed + + +if __name__ == "__main__": + args = parse_args() + if not os.environ.get("HUGGING_FACE_HUB_TOKEN"): + print("No Hugging Face token found. Please login.") + login() + main(args) \ No newline at end of file diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index 32cbbdc..14d1b89 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -411,6 +411,31 @@ def process_datasets(): if result.stderr: print("Process dataset errors:", result.stderr) +@app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], + volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME}, + concurrency_limit=1) +def pull_hf_to_folder(checkpoint_path: str, prompts: list[str]|str|None=None): + import subprocess + import os + + # Change to llm-foundry/scripts directory at the start + os.chdir("/llm-foundry/scripts") + print(f"Working directory: {os.getcwd()}") + + # Step 1: pull all tokens + print("Downloading all repos...") + data_prep_cmd = [ + PYTHON_PATH, # Use the correct Python interpreter + "data_prep/download_repo.py", + ] + result = subprocess.run(data_prep_cmd, capture_output=True, text=True) + print(result.stdout) + if result.stderr: + print("Download data errors:", result.stderr) + + DATASETS_VOLUME.commit() + + @app.local_entrypoint() def main(): from pathlib import Path From c8c0a9ce905a82632c248c11be81389dd1317d72 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sun, 30 Mar 2025 10:50:49 -0600 Subject: [PATCH 19/50] fix modal DATASETS_VOLUME_MOUNT_PATH and args --- scripts/modal/modal_script.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index 14d1b89..013d976 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -412,9 +412,9 @@ def process_datasets(): print("Process dataset errors:", result.stderr) @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], - volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME}, + volumes={DATASETS_VOLUME_MOUNT_PATH: DATASETS_VOLUME}, concurrency_limit=1) -def pull_hf_to_folder(checkpoint_path: str, prompts: list[str]|str|None=None): +def pull_hf_to_folder(): import subprocess import os From fadae9e18bc32155c03f66cbba33e536bb53586d Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sun, 30 Mar 2025 12:27:35 -0600 Subject: [PATCH 20/50] fix autoscaling parameter https://modal.com/docs/guide/modal-1-0-migration --- scripts/modal/modal_script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index 013d976..4c9f8fe 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -413,7 +413,7 @@ def process_datasets(): @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={DATASETS_VOLUME_MOUNT_PATH: DATASETS_VOLUME}, - concurrency_limit=1) + max_containers=1) def pull_hf_to_folder(): import subprocess import os From dd74ed2be5ce337994850cd61417f37f262b420b Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sun, 30 Mar 2025 21:27:17 -0600 Subject: [PATCH 21/50] add pull_hf_to_folder which downloads: tulu, numina and finemath hf splits --- scripts/data_prep/download_repo.py | 22 ++++++++++++---------- scripts/modal/modal_script.py | 3 ++- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/scripts/data_prep/download_repo.py b/scripts/data_prep/download_repo.py index 6a70ecc..ffa5c17 100644 --- a/scripts/data_prep/download_repo.py +++ b/scripts/data_prep/download_repo.py @@ -8,27 +8,23 @@ def main(args): datasets = { "tulu": { "target": f"{args.repo}/split-tulu-3-sft-olmo-2-mixture", - "ablations": ("full", "100k", "10k", "1k"), }, "numina": { "target": f"{args.repo}/split-NuminaMath-CoT", - "ablations": ("full", "100k", "10k", "1k"), }, "finemath" :{ "target": f"{args.repo}/split-finemath", - "ablations": ("full", "1M", "100k", "10k", "1k"), } } - datas_list = args.dataset - from pprint import pp - pp(datasets) - for ds in datas_list: - print(f"downloading {datasets[ds]["target"]=} to download-{ds}-tokenized\n") + for ds in args.dataset: + ld = f"{args.out}/{ds}" + datadown = datasets[ds]["target"] + print(f"downloading {datadown=} to {ld=}\n") local_dir = api.snapshot_download( - repo_id=datasets[ds]["target"], + repo_id=datadown, repo_type="dataset", - local_dir=f"download-{ds}-tokenized", + local_dir=ld, ) def parse_args() -> Namespace: @@ -49,6 +45,12 @@ def parse_args() -> Namespace: default="LocalResearchGroup", help="repo containing tokenizations", ) + + parser.add_argument( + "--out", + default=".", + help="local download folder", + ) parsed = parser.parse_args() return parsed diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index 4c9f8fe..950af6a 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -423,10 +423,11 @@ def pull_hf_to_folder(): print(f"Working directory: {os.getcwd()}") # Step 1: pull all tokens - print("Downloading all repos...") + print(f"Downloading repos to {DATASETS_VOLUME_MOUNT_PATH}/snapshot...") data_prep_cmd = [ PYTHON_PATH, # Use the correct Python interpreter "data_prep/download_repo.py", + "--out", f"{DATASETS_VOLUME_MOUNT_PATH}/snapshot", ] result = subprocess.run(data_prep_cmd, capture_output=True, text=True) print(result.stdout) From d976060c9a9fd621f524d6629ad310e235df58b1 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Mon, 31 Mar 2025 23:45:10 -0600 Subject: [PATCH 22/50] add glaiveai/glaive-code-assistant-v3 --- scripts/data_prep/preproc/__init__.py | 3 ++- scripts/data_prep/preproc/preprocs.py | 5 ++++ scripts/data_prep/split_hf_datasets.py | 36 +++++++++++++++----------- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/scripts/data_prep/preproc/__init__.py b/scripts/data_prep/preproc/__init__.py index f72f32b..db11cbc 100644 --- a/scripts/data_prep/preproc/__init__.py +++ b/scripts/data_prep/preproc/__init__.py @@ -1,5 +1,6 @@ -from preproc.preprocs import pre_tulu, pre_numina, preprocessing_function +from preproc.preprocs import pre_glaive, pre_tulu, pre_numina, preprocessing_function __all__ = [ + "pre_glaive", "pre_tulu", "pre_numina", "preprocessing_function", diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py index f23a23a..45468c8 100644 --- a/scripts/data_prep/preproc/preprocs.py +++ b/scripts/data_prep/preproc/preprocs.py @@ -21,3 +21,8 @@ def pre_tulu(inp: dict): @dataset_constructor.register(f"{HF_REPO}/split-NuminaMath-CoT") def pre_numina(inp: dict): return {'prompt': inp['problem'], 'response': inp['solution']} + + +@dataset_constructor.register(f"{HF_REPO}/split-glaive-code-assistant-v3") +def pre_glaive(inp: dict): + return {'prompt': inp['question'], 'response': inp['answer']} diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 56d3553..c1ac496 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -124,7 +124,7 @@ def upload_token_folder(local_path, target_repo): path_in_repo=str(local_path.relative_to(".")) ) -def create_pretraining_tokens(args, datasets, tokenizer='HuggingFaceTB/SmolLM2-135M'): +def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-135M"): # import configurations to tokenize new dataset splits import tokenize_split from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS @@ -139,14 +139,14 @@ def create_pretraining_tokens(args, datasets, tokenizer='HuggingFaceTB/SmolLM2-1 convert_dataset_hf_from_args( dataset=d["target"], data_subset=ablation, - splits=['train', 'test'], - out_root=f'tokenized/{s}/{ablation}', + splits=["train", "test"], + out_root=f"tokenized/{s}/{ablation}", compression="zstd", concat_tokens=None, tokenizer=tokenizer, tokenizer_kwargs=None, bos_text=None, - eos_text='<|endoftext|>', + eos_text="<|endoftext|>", no_wrap=False, num_workers=None, ) @@ -155,11 +155,11 @@ def create_pretraining_tokens(args, datasets, tokenizer='HuggingFaceTB/SmolLM2-1 convert_finetuning_dataset_from_args( d["target"], f"{ablation}", # data_subset - ['train', 'test'], + ["train", "test"], d["preproc"], [], False, - f'tokenized/{s}/{ablation}', # out_root + f"tokenized/{s}/{ablation}", # out_root None, "zstd", None, # num_workers @@ -216,7 +216,13 @@ def main(args): "ds_name": "finemath-4plus", "target": f"{args.target_repo}/split-finemath", "ablations": ("full", "1M", "100k", "10k", "1k"), - } + }, + "glaive": { + "src": "glaiveai/glaive-code-assistant-v3", + "target": f"{args.target_repo}/split-glaive-code-assistant-v3", + "ablations": ("full", "1M", "100k", "10k", "1k"), + "preproc":"preproc:pre_glaive", + }, } if args.split: d = upload_splits(args, datasets) @@ -230,13 +236,13 @@ def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( description= - 'Split to train/test 1M, 100k, 10k, 1k and tokenize', + "Split to train/test 1M, 100k, 10k, 1k and tokenize", ) parser.add_argument( - '--source', - nargs='+', - choices=['tulu', 'numina', 'finemath'], - default=['tulu', 'numina', 'finemath'], + "--source", + nargs="+", + choices=["tulu", "numina", "finemath", "glaive",], + default=["tulu", "numina", "finemath", "glaive",], ) parser.add_argument( @@ -245,9 +251,9 @@ def parse_args() -> Namespace: help="target repo to upload splits and tokenizations", ) - parser.add_argument('--split', action=BooleanOptionalAction, default=True, help="split generation") - parser.add_argument('--tokenize', action=BooleanOptionalAction, default=True, help="generate tokenization for splits") - parser.add_argument('--upload', action=BooleanOptionalAction, default=True, help="upload tokenization folders") + parser.add_argument("--split", action=BooleanOptionalAction, default=True, help="split generation") + parser.add_argument("--tokenize", action=BooleanOptionalAction, default=True, help="generate tokenization for splits") + parser.add_argument("--upload", action=BooleanOptionalAction, default=True, help="upload tokenization folders") parsed = parser.parse_args() return parsed From 32b5c7533ace5592d966b3fbcb97c2e8a2645600 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 3 Apr 2025 00:14:13 -0600 Subject: [PATCH 23/50] adding avelina python edu --- scripts/data_prep/split_hf_datasets.py | 11 ++++++++--- scripts/data_prep/tokenize_split.py | 15 +++++++++------ 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index c1ac496..c789461 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -134,7 +134,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 d = datasets[s] folder = d["target"].split("/")[1] for ablation in d["ablations"]: - if s == "finemath": + if s in ["finemath", "avelinapythonedu"]: print("\ngenerating tokens for", s, ablation) convert_dataset_hf_from_args( dataset=d["target"], @@ -223,6 +223,11 @@ def main(args): "ablations": ("full", "1M", "100k", "10k", "1k"), "preproc":"preproc:pre_glaive", }, + "avelinapythonedu": { + "src": "Avelina/python-edu", + "target": f"{args.target_repo}/split-avelina-python-edu", + "ablations": ("full", "1M", "100k", "10k", "1k"), + }, } if args.split: d = upload_splits(args, datasets) @@ -241,8 +246,8 @@ def parse_args() -> Namespace: parser.add_argument( "--source", nargs="+", - choices=["tulu", "numina", "finemath", "glaive",], - default=["tulu", "numina", "finemath", "glaive",], + choices=["tulu", "numina", "finemath", "glaive", "avelinapythonedu",], + default=["tulu", "numina", "finemath", "glaive", "avelinapythonedu"], ) parser.add_argument( diff --git a/scripts/data_prep/tokenize_split.py b/scripts/data_prep/tokenize_split.py index f937cc4..71b4842 100644 --- a/scripts/data_prep/tokenize_split.py +++ b/scripts/data_prep/tokenize_split.py @@ -23,12 +23,15 @@ def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("f ) return ds_const -finemath = generate_constants(2163, 4) -add_dataset_config("tyoc213/split-finemath", finemath) -finemath = generate_constants(2163, 4) -add_dataset_config("tyoc213/split-tulu-3-sft-olmo-2-mixture", finemath) -finemath = generate_constants(2163, 4) -add_dataset_config("tyoc213/split-NuminaMath-CoT", finemath) +_finemath = generate_constants(2163, 4) +HF_TARGET = "tyoc213" # "LocalResearchGroup" +add_dataset_config(f"{HF_TARGET}/split-finemath", _finemath) +_tulu = generate_constants(2163, 4) +add_dataset_config(f"{HF_TARGET}/split-tulu-3-sft-olmo-2-mixture", _tulu) +_numina = generate_constants(2163, 4) +add_dataset_config(f"{HF_TARGET}/split-NuminaMath-CoT", _numina) +_pythonedu = generate_constants(2163, 4) +add_dataset_config(f"{HF_TARGET}/split-avelina-python-edu", _pythonedu) # def do_xxx(): From f0d8012930754ef07aba2bf8736ae96dff16669d Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sun, 6 Apr 2025 20:40:46 -0600 Subject: [PATCH 24/50] extra commas and path changes --- scripts/data_prep/convert_dataset_hf.py | 7 ++++--- scripts/data_prep/download_repo.py | 2 +- scripts/data_prep/split_hf_datasets.py | 2 +- scripts/data_prep/tokenize_split.py | 24 +----------------------- scripts/modal/modal_script.py | 25 +++++++++++++++++++++++-- 5 files changed, 30 insertions(+), 30 deletions(-) diff --git a/scripts/data_prep/convert_dataset_hf.py b/scripts/data_prep/convert_dataset_hf.py index eb9267f..13235ce 100644 --- a/scripts/data_prep/convert_dataset_hf.py +++ b/scripts/data_prep/convert_dataset_hf.py @@ -28,12 +28,13 @@ def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("f ) return ds_const +HF_TARGET = "LocalResearchGroup" finemath = generate_constants(2163, 4) -add_dataset_config("tyoc213/split-finemath", finemath) +add_dataset_config(f"{HF_TARGET}/split-finemath", finemath) finemath = generate_constants(2163, 4) -add_dataset_config("tyoc213/split-tulu-3-sft-olmo-2-mixture", finemath) +add_dataset_config(f"{HF_TARGET}/split-tulu-3-sft-olmo-2-mixture", finemath) finemath = generate_constants(2163, 4) -add_dataset_config("tyoc213/split-NuminaMath-CoT", finemath) +add_dataset_config(f"{HF_TARGET}/split-NuminaMath-CoT", finemath) def parse_args() -> Namespace: diff --git a/scripts/data_prep/download_repo.py b/scripts/data_prep/download_repo.py index ffa5c17..f4e447c 100644 --- a/scripts/data_prep/download_repo.py +++ b/scripts/data_prep/download_repo.py @@ -14,7 +14,7 @@ def main(args): }, "finemath" :{ "target": f"{args.repo}/split-finemath", - } + }, } for ds in args.dataset: diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index c789461..af7aa41 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -121,7 +121,7 @@ def upload_token_folder(local_path, target_repo): folder_path=local_path, repo_id=target_repo, repo_type="dataset", - path_in_repo=str(local_path.relative_to(".")) + path_in_repo=str(local_path.relative_to(".")), ) def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-135M"): diff --git a/scripts/data_prep/tokenize_split.py b/scripts/data_prep/tokenize_split.py index 71b4842..e02d10c 100644 --- a/scripts/data_prep/tokenize_split.py +++ b/scripts/data_prep/tokenize_split.py @@ -24,7 +24,7 @@ def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("f return ds_const _finemath = generate_constants(2163, 4) -HF_TARGET = "tyoc213" # "LocalResearchGroup" +HF_TARGET = "LocalResearchGroup" add_dataset_config(f"{HF_TARGET}/split-finemath", _finemath) _tulu = generate_constants(2163, 4) add_dataset_config(f"{HF_TARGET}/split-tulu-3-sft-olmo-2-mixture", _tulu) @@ -32,25 +32,3 @@ def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("f add_dataset_config(f"{HF_TARGET}/split-NuminaMath-CoT", _numina) _pythonedu = generate_constants(2163, 4) add_dataset_config(f"{HF_TARGET}/split-avelina-python-edu", _pythonedu) - - -# def do_xxx(): -# args = Namespace(dataset='tyoc213/split-finemath', data_subset='10k', splits=['train', 'test'], out_root='finemath-10k', compression=None, concat_tokens=None, tokenizer='HuggingFaceTB/SmolLM2-135M', tokenizer_kwargs=None, bos_text=None, eos_text='<|endoftext|>', no_wrap=False, num_workers=None) -# convert_dataset_hf_from_args( -# dataset=args.dataset, -# data_subset=args.data_subset, -# splits=args.splits, -# out_root=args.out_root, -# compression=args.compression, -# concat_tokens=args.concat_tokens, -# tokenizer=args.tokenizer, -# tokenizer_kwargs=args.tokenizer_kwargs, -# bos_text=args.bos_text, -# eos_text=args.eos_text, -# no_wrap=args.no_wrap, -# num_workers=args.num_workers, -# ) - - -# if __name__ == "__main__": -# do_xxx() \ No newline at end of file diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index 950af6a..b3a1dda 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -423,11 +423,11 @@ def pull_hf_to_folder(): print(f"Working directory: {os.getcwd()}") # Step 1: pull all tokens - print(f"Downloading repos to {DATASETS_VOLUME_MOUNT_PATH}/snapshot...") + print(f"Downloading repos to {DATASETS_VOLUME_MOUNT_PATH}/") data_prep_cmd = [ PYTHON_PATH, # Use the correct Python interpreter "data_prep/download_repo.py", - "--out", f"{DATASETS_VOLUME_MOUNT_PATH}/snapshot", + "--out", f"{DATASETS_VOLUME_MOUNT_PATH}/", ] result = subprocess.run(data_prep_cmd, capture_output=True, text=True) print(result.stdout) @@ -436,6 +436,27 @@ def pull_hf_to_folder(): DATASETS_VOLUME.commit() +@app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], + max_containers=1) +def process_datasets(): + import subprocess + import os + + # Change to llm-foundry/scripts directory at the start + os.chdir("/llm-foundry/scripts") + print(f"Working directory: {os.getcwd()}") + + # Step 1: pull all tokens + print(f"Processing datasets...") + data_prep_cmd = [ + PYTHON_PATH, # Use the correct Python interpreter + "data_prep/convert_dataset_hf.py", + ] + result = subprocess.run(data_prep_cmd, capture_output=True, text=True) + print(result.stdout) + if result.stderr: + print("Process dataset errors:", result.stderr) + @app.local_entrypoint() def main(): From 947c6ca561b178bbc0c298e679fb32e96f1e9813 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Mon, 7 Apr 2025 23:35:44 -0600 Subject: [PATCH 25/50] fix glaive -1M option added some extra logs --- scripts/data_prep/split_hf_datasets.py | 16 ++++++++++++---- scripts/modal/modal_script.py | 8 ++++++-- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index af7aa41..9906469 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -115,14 +115,15 @@ def process_numina(dataset): return dataset def upload_token_folder(local_path, target_repo): - print(f"upload_token_folder({str(local_path.relative_to("."))=}, {target_repo=})") + print(f"upload_token_folder({str(local_path.relative_to("."))}, {target_repo})") api = HfApi() - api.upload_folder( + r = api.upload_folder( folder_path=local_path, repo_id=target_repo, repo_type="dataset", path_in_repo=str(local_path.relative_to(".")), ) + print(f"token uploaded result: {r}") def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-135M"): # import configurations to tokenize new dataset splits @@ -175,12 +176,13 @@ def create_upload(args, datasets): # upload all tokenized folders to corresponding repo/folder for s in args.source: d = datasets[s] + print(f"Uploading {d['ablations']} from {d} to {d['target']} from {Path('.').absolute()}") for ablation in d["ablations"]: target_repo = d["target"] local_path = Path(".") / f"tokenized/{s}/{ablation}" print(f"\nUploading {ablation} to {target_repo} from {str(local_path)}\n") upload_token_folder(local_path, target_repo) - + print("upload finished.") def upload_splits(args, datas): for arg in args.source: @@ -220,7 +222,7 @@ def main(args): "glaive": { "src": "glaiveai/glaive-code-assistant-v3", "target": f"{args.target_repo}/split-glaive-code-assistant-v3", - "ablations": ("full", "1M", "100k", "10k", "1k"), + "ablations": ("full", "100k", "10k", "1k"), "preproc":"preproc:pre_glaive", }, "avelinapythonedu": { @@ -230,11 +232,17 @@ def main(args): }, } if args.split: + print(f"spliting: {args.source}") d = upload_splits(args, datasets) + print(f"spliting: {args.source} finished.") if args.tokenize: + print(f"tokenizing: {args.source}") create_pretraining_tokens(args, datasets) + print(f"tokenizing: {args.source} finished.") if args.upload: + print(f"uploading tokens: {args.source}") create_upload(args, datasets) + print(f"uploading tokens: {args.source} finished.") def parse_args() -> Namespace: diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index b3a1dda..a789d10 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -446,11 +446,15 @@ def process_datasets(): os.chdir("/llm-foundry/scripts") print(f"Working directory: {os.getcwd()}") - # Step 1: pull all tokens + # process all datasets: tulu, numina, finemath, glaive, avelinapythonedu + # 1. pull original, split and upload splits (`--no-split` to skip) + # 2. tokenize dataset(s) (`--no-tokenize` to skip) + # 3. upload (tokenized) folders (`--no-upload` to skip) + # `--source` can be 1 or the allowed list (dont pass to process all registered datasets) print(f"Processing datasets...") data_prep_cmd = [ PYTHON_PATH, # Use the correct Python interpreter - "data_prep/convert_dataset_hf.py", + "data_prep/split_hf_datasets.py", ] result = subprocess.run(data_prep_cmd, capture_output=True, text=True) print(result.stdout) From b4270d51ab5ad4d0f01da3710adcef1eb98fc113 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Wed, 9 Apr 2025 17:16:28 -0600 Subject: [PATCH 26/50] revert back to `concurrency_limit` for modal < 0.73.76 --- scripts/modal/modal_script.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index a789d10..6e74175 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -35,7 +35,7 @@ image = image.add_local_file(TRAIN_YAML, f"/llm-foundry/scripts/train/yamls/finetune/{TRAIN_YAML}") @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], - max_containers=1) + concurrency_limit=1) def get_stats(): import subprocess @@ -58,7 +58,7 @@ def get_stats(): @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={DATASETS_VOLUME_MOUNT_PATH: DATASETS_VOLUME}, - max_containers=1) + concurrency_limit=1) def convert_c4_small_dataset(): import subprocess import os @@ -127,7 +127,7 @@ def convert_finetuning_dataset(): @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME}, - max_containers=1) + concurrency_limit=1) def view_model_checkpoints(save_folder: str=None): import os print("\nModel checkpoint files and sizes:") @@ -214,7 +214,7 @@ def run_aim_server(run_folder: str): @app.function(gpu=TRAINING_GPU, image=image, timeout=12*3600, secrets=[Secret.from_name("LRG")], volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME, DATASETS_VOLUME_MOUNT_PATH: DATASETS_VOLUME}, - max_containers=1) + concurrency_limit=1) def train_with_aim(run_ts: str, yaml_path: str = "train/yamls/pretrain/smollm2-135m.yaml"): import subprocess, time @@ -239,8 +239,13 @@ def train_with_aim(run_ts: str, yaml_path: str = "train/yamls/pretrain/smollm2-1 @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME}, +<<<<<<< HEAD max_containers=1) def convert_model_to_hf(checkpoint_path: str, yaml_path: str = "", upload_to_hf: bool = False, is_peft: bool = IS_PEFT): +======= + concurrency_limit=1) +def convert_model_to_hf(checkpoint_path: str, upload_to_hf: bool = False): +>>>>>>> 4234231 (revert back to `concurrency_limit` for modal < 0.73.76) """Convert a model checkpoint to a HuggingFace format.""" import subprocess, os from pathlib import Path @@ -276,7 +281,7 @@ def convert_model_to_hf(checkpoint_path: str, yaml_path: str = "", upload_to_hf: @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME}, - max_containers=1) + concurrency_limit=1) def evaluate_model(checkpoint_path: str): import subprocess, os from pathlib import Path @@ -307,7 +312,7 @@ def evaluate_model(checkpoint_path: str): @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME}, - max_containers=1) + concurrency_limit=1) def generate_responses(checkpoint_path: str, prompts: list[str]|str|None=None): import subprocess, os from pathlib import Path @@ -343,7 +348,7 @@ def generate_responses(checkpoint_path: str, prompts: list[str]|str|None=None): @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME}, - max_containers=1) + concurrency_limit=1) def push_folder_to_hf(folder_path: str, repo_id: str | None = None, repo_type: str = "model", private: bool = True): """Upload model checkpoint to HuggingFace Hub.""" from huggingface_hub import HfApi @@ -413,7 +418,7 @@ def process_datasets(): @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={DATASETS_VOLUME_MOUNT_PATH: DATASETS_VOLUME}, - max_containers=1) + concurrency_limit=1) def pull_hf_to_folder(): import subprocess import os @@ -437,7 +442,7 @@ def pull_hf_to_folder(): DATASETS_VOLUME.commit() @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], - max_containers=1) + concurrency_limit=1) def process_datasets(): import subprocess import os From f608b4d4fe1e4b022d5a49ad7cba3c5a2953595c Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Wed, 9 Apr 2025 21:11:09 -0600 Subject: [PATCH 27/50] added missings glaive, avelinapythonedu configs --- scripts/data_prep/download_repo.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/data_prep/download_repo.py b/scripts/data_prep/download_repo.py index f4e447c..e1c1e65 100644 --- a/scripts/data_prep/download_repo.py +++ b/scripts/data_prep/download_repo.py @@ -15,6 +15,12 @@ def main(args): "finemath" :{ "target": f"{args.repo}/split-finemath", }, + "glaive" : { + "target": f"{args.repo}/split-glaive-code-assistant-v3", + }, + "avelinapythonedu": { + "target": f"{args.repo}/split-avelina-python-edu", + }, } for ds in args.dataset: From 98705be08ca8b6c6498e5fe45ccd7b62a76aeccd Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Wed, 9 Apr 2025 21:55:42 -0600 Subject: [PATCH 28/50] missing cmdn line options in previous commit --- scripts/data_prep/download_repo.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/data_prep/download_repo.py b/scripts/data_prep/download_repo.py index e1c1e65..5242a1b 100644 --- a/scripts/data_prep/download_repo.py +++ b/scripts/data_prep/download_repo.py @@ -37,13 +37,13 @@ def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( description= - 'Downloads tokenized versions of train/test 1M, 100k, 10k, 1k', + "Downloads tokenized versions of train/test 1M, 100k, 10k, 1k", ) parser.add_argument( - '--dataset', - nargs='+', - choices=['tulu', 'numina', 'finemath'], - default=['tulu', 'numina', 'finemath'], + "--dataset", + nargs="+", + choices=["tulu", "numina", "finemath", "glaive", "avelinapythonedu"], + default=["tulu", "numina", "finemath", "glaive", "avelinapythonedu"], ) parser.add_argument( From bfea302c42183397d05c11d2c59827563500ab51 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 10 Apr 2025 00:55:12 -0600 Subject: [PATCH 29/50] adding concat tokens for pretran datasets that indicates use the tokenizer; refactored dataset_constants_splits_config --- scripts/data_prep/convert_dataset_hf.py | 8 ------- ...t.py => dataset_constants_split_config.py} | 24 ++++++++++--------- scripts/data_prep/split_hf_datasets.py | 18 ++++++++------ 3 files changed, 24 insertions(+), 26 deletions(-) rename scripts/data_prep/{tokenize_split.py => dataset_constants_split_config.py} (51%) diff --git a/scripts/data_prep/convert_dataset_hf.py b/scripts/data_prep/convert_dataset_hf.py index 13235ce..89f4a88 100644 --- a/scripts/data_prep/convert_dataset_hf.py +++ b/scripts/data_prep/convert_dataset_hf.py @@ -28,14 +28,6 @@ def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("f ) return ds_const -HF_TARGET = "LocalResearchGroup" -finemath = generate_constants(2163, 4) -add_dataset_config(f"{HF_TARGET}/split-finemath", finemath) -finemath = generate_constants(2163, 4) -add_dataset_config(f"{HF_TARGET}/split-tulu-3-sft-olmo-2-mixture", finemath) -finemath = generate_constants(2163, 4) -add_dataset_config(f"{HF_TARGET}/split-NuminaMath-CoT", finemath) - def parse_args() -> Namespace: """Parse commandline arguments.""" diff --git a/scripts/data_prep/tokenize_split.py b/scripts/data_prep/dataset_constants_split_config.py similarity index 51% rename from scripts/data_prep/tokenize_split.py rename to scripts/data_prep/dataset_constants_split_config.py index e02d10c..40262d0 100644 --- a/scripts/data_prep/tokenize_split.py +++ b/scripts/data_prep/dataset_constants_split_config.py @@ -1,5 +1,4 @@ -from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS -from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS +from llmfoundry.command_utils import DatasetConstants, DataSplitConstants, add_dataset_config def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("full", 1, 10, 100, 1000)): ds_const = DatasetConstants( @@ -23,12 +22,15 @@ def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("f ) return ds_const -_finemath = generate_constants(2163, 4) -HF_TARGET = "LocalResearchGroup" -add_dataset_config(f"{HF_TARGET}/split-finemath", _finemath) -_tulu = generate_constants(2163, 4) -add_dataset_config(f"{HF_TARGET}/split-tulu-3-sft-olmo-2-mixture", _tulu) -_numina = generate_constants(2163, 4) -add_dataset_config(f"{HF_TARGET}/split-NuminaMath-CoT", _numina) -_pythonedu = generate_constants(2163, 4) -add_dataset_config(f"{HF_TARGET}/split-avelina-python-edu", _pythonedu) + +def register_new_datasets(target = "LocalResearchGroup"): + _finemath = generate_constants(12163, 4) + add_dataset_config(f"{target}/split-finemath", _finemath) + _tulu = generate_constants(12163, 4) + add_dataset_config(f"{target}/split-tulu-3-sft-olmo-2-mixture", _tulu) + _numina = generate_constants(12163, 4) + add_dataset_config(f"{target}/split-NuminaMath-CoT", _numina) + _pythonedu = generate_constants(12163, 4) + add_dataset_config(f"{target}/split-avelina-python-edu", _pythonedu) + _glaive = generate_constants(12163, 4) + add_dataset_config(f"{target}/split-glaive-code-assistant-v3", _glaive) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 9906469..4752b9a 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -7,6 +7,10 @@ from convert_finetuning_dataset import convert_finetuning_dataset_from_args import os +import dataset_constants_split_config +from llmfoundry.command_utils import convert_dataset_hf_from_args + + def save_to_parquet(combined: DatasetDict, out_ds_path: Path): data_files = {} for split, dataset in combined.items(): @@ -127,9 +131,7 @@ def upload_token_folder(local_path, target_repo): def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-135M"): # import configurations to tokenize new dataset splits - import tokenize_split - from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS - + for s in args.source: d = datasets[s] @@ -143,7 +145,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 splits=["train", "test"], out_root=f"tokenized/{s}/{ablation}", compression="zstd", - concat_tokens=None, + concat_tokens=True, tokenizer=tokenizer, tokenizer_kwargs=None, bos_text=None, @@ -166,12 +168,13 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 None, # num_workers "HuggingFaceTB/SmolLM2-135M", # tokenizer None, - 2048, # max_seq_len + 20480, # max_seq_len "none", # target_prompts "last", # target_responses False, # encoder_decoder ) + def create_upload(args, datasets): # upload all tokenized folders to corresponding repo/folder for s in args.source: @@ -231,6 +234,7 @@ def main(args): "ablations": ("full", "1M", "100k", "10k", "1k"), }, } + dataset_constants_split_config.register_new_datasets(args.target_repo) if args.split: print(f"spliting: {args.source}") d = upload_splits(args, datasets) @@ -263,11 +267,11 @@ def parse_args() -> Namespace: default="LocalResearchGroup", help="target repo to upload splits and tokenizations", ) - + parser.add_argument("--split", action=BooleanOptionalAction, default=True, help="split generation") parser.add_argument("--tokenize", action=BooleanOptionalAction, default=True, help="generate tokenization for splits") parser.add_argument("--upload", action=BooleanOptionalAction, default=True, help="upload tokenization folders") - + parsed = parser.parse_args() return parsed From 308ee90d4ffca232bce370739debeb5cc4500c9f Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 10 Apr 2025 22:24:13 -0600 Subject: [PATCH 30/50] quickfix `"` by `'` --- scripts/data_prep/split_hf_datasets.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 4752b9a..e881c92 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -119,7 +119,7 @@ def process_numina(dataset): return dataset def upload_token_folder(local_path, target_repo): - print(f"upload_token_folder({str(local_path.relative_to("."))}, {target_repo})") + print(f"upload_token_folder({str(local_path.relative_to('.'))}, {target_repo})") api = HfApi() r = api.upload_folder( folder_path=local_path, @@ -129,6 +129,7 @@ def upload_token_folder(local_path, target_repo): ) print(f"token uploaded result: {r}") + def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-135M"): # import configurations to tokenize new dataset splits From 3cb31092cf603200bd4869f35edacde235e4092d Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Fri, 11 Apr 2025 22:25:01 -0600 Subject: [PATCH 31/50] fix concat_tokens value --- scripts/data_prep/split_hf_datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index e881c92..490b309 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -146,7 +146,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 splits=["train", "test"], out_root=f"tokenized/{s}/{ablation}", compression="zstd", - concat_tokens=True, + concat_tokens=2048, tokenizer=tokenizer, tokenizer_kwargs=None, bos_text=None, From 9c40d497bd8b236993efd727d35abb9b2606706f Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 24 Apr 2025 01:58:08 -0600 Subject: [PATCH 32/50] addking kind to make clear which datasets are instruct and which pretrain. Remove extra method --- scripts/data_prep/convert_dataset_hf.py | 22 ---------------------- scripts/data_prep/split_hf_datasets.py | 11 +++++++++-- 2 files changed, 9 insertions(+), 24 deletions(-) diff --git a/scripts/data_prep/convert_dataset_hf.py b/scripts/data_prep/convert_dataset_hf.py index 89f4a88..16dd91d 100644 --- a/scripts/data_prep/convert_dataset_hf.py +++ b/scripts/data_prep/convert_dataset_hf.py @@ -6,28 +6,6 @@ from llmfoundry.command_utils import convert_dataset_hf_from_args, DatasetConstants, DataSplitConstants, add_dataset_config, CONSTS -def generate_constants(chars_per_sample, chars_per_token, label=None, splits=("full", 1, 10, 100, 1000)): - ds_const = DatasetConstants( - chars_per_sample=chars_per_sample, # Computed over validation set - chars_per_token=chars_per_token, # OpenAI estimate - ) - total_rows = None - # we generate only train and test use --data_subset --out_root - ds_const.splits[f"train"] = DataSplitConstants( - hf_split="train", - folder_split=f"train", - raw_samples=total_rows, - truncated_samples=total_rows, - ) - - ds_const.splits[f"test"] = DataSplitConstants( - hf_split="test", - folder_split=f"test", - raw_samples=total_rows, - truncated_samples=total_rows, - ) - return ds_const - def parse_args() -> Namespace: """Parse commandline arguments.""" diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 490b309..f013795 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -138,7 +138,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 d = datasets[s] folder = d["target"].split("/")[1] for ablation in d["ablations"]: - if s in ["finemath", "avelinapythonedu"]: + if d["kind"] == "pretrain": print("\ngenerating tokens for", s, ablation) convert_dataset_hf_from_args( dataset=d["target"], @@ -154,7 +154,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 no_wrap=False, num_workers=None, ) - else: + elif d["kind"] == "instruct": print(f"\nconvert_finetuning_dataset_from_args") convert_finetuning_dataset_from_args( d["target"], @@ -174,6 +174,8 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 "last", # target_responses False, # encoder_decoder ) + else: + raise RuntimeError(f"Unknow dataset kind: {d['kind']}") def create_upload(args, datasets): @@ -209,6 +211,7 @@ def main(args): "after_pull": filter_tulu, "ablations": ("full", "100k", "10k", "1k"), "preproc":"preproc:pre_tulu", + "kind": "instruct", }, "numina": { "src": "AI-MO/NuminaMath-CoT", @@ -216,23 +219,27 @@ def main(args): "after_pull": process_numina, "ablations": ("full", "100k", "10k", "1k"), "preproc":"preproc:pre_numina", + "kind": "instruct", }, "finemath" :{ "src": "HuggingFaceTB/finemath", "ds_name": "finemath-4plus", "target": f"{args.target_repo}/split-finemath", "ablations": ("full", "1M", "100k", "10k", "1k"), + "kind": "pretrain", }, "glaive": { "src": "glaiveai/glaive-code-assistant-v3", "target": f"{args.target_repo}/split-glaive-code-assistant-v3", "ablations": ("full", "100k", "10k", "1k"), "preproc":"preproc:pre_glaive", + "kind": "instruct", }, "avelinapythonedu": { "src": "Avelina/python-edu", "target": f"{args.target_repo}/split-avelina-python-edu", "ablations": ("full", "1M", "100k", "10k", "1k"), + "kind": "pretrain", }, } dataset_constants_split_config.register_new_datasets(args.target_repo) From 3010ece02031e4940c193628643e6d5140f8f577 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 24 Apr 2025 02:59:41 -0600 Subject: [PATCH 33/50] Override ablations internal configuration with `--one-k`` to do only 1k split for any 3 steps (or all): 1. download/process, split and upload original (--split/--no-split) 2. tokenize locally and (--tokenize/--no-tokenize) 3. upload tokenized dataset to target repo (--upload/--no-upload) --- scripts/data_prep/split_hf_datasets.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index f013795..5b19c35 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -137,7 +137,8 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 d = datasets[s] folder = d["target"].split("/")[1] - for ablation in d["ablations"]: + ablations = d["ablations"] if not args.one_k else ("1k",) # override ablation config from cmd line arg + for ablation in ablations: if d["kind"] == "pretrain": print("\ngenerating tokens for", s, ablation) convert_dataset_hf_from_args( @@ -178,12 +179,13 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 raise RuntimeError(f"Unknow dataset kind: {d['kind']}") -def create_upload(args, datasets): +def create_tokenized_upload(args, datasets): # upload all tokenized folders to corresponding repo/folder for s in args.source: d = datasets[s] - print(f"Uploading {d['ablations']} from {d} to {d['target']} from {Path('.').absolute()}") - for ablation in d["ablations"]: + ablations = d["ablations"] if not args.one_k else ("1k",) # override ablation config from cmd line arg + print(f"Uploading {ablations} from {d} to {d['target']} from {Path('.').absolute()}") + for ablation in ablations: target_repo = d["target"] local_path = Path(".") / f"tokenized/{s}/{ablation}" print(f"\nUploading {ablation} to {target_repo} from {str(local_path)}\n") @@ -194,11 +196,12 @@ def upload_splits(args, datas): for arg in args.source: d = datas[arg] ds_name = d.get("ds_name", None) + ablations = d["ablations"] if not args.one_k else ("1k",) # override ablation config from cmd line arg pull_n_push( d["target"], d["src"], ds_name=ds_name, - ablations=d["ablations"], + ablations=ablations, after_pull=d.get("after_pull", None), ) @@ -253,7 +256,7 @@ def main(args): print(f"tokenizing: {args.source} finished.") if args.upload: print(f"uploading tokens: {args.source}") - create_upload(args, datasets) + create_tokenized_upload(args, datasets) print(f"uploading tokens: {args.source} finished.") @@ -279,6 +282,7 @@ def parse_args() -> Namespace: parser.add_argument("--split", action=BooleanOptionalAction, default=True, help="split generation") parser.add_argument("--tokenize", action=BooleanOptionalAction, default=True, help="generate tokenization for splits") parser.add_argument("--upload", action=BooleanOptionalAction, default=True, help="upload tokenization folders") + parser.add_argument("--one-k", action=BooleanOptionalAction, default=False, help="only process 1k") parsed = parser.parse_args() return parsed From b4645ec55b9c86e58999959f70bb97c96ca3a303 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 24 Apr 2025 03:23:26 -0600 Subject: [PATCH 34/50] missed 8192 tokens --- scripts/data_prep/preproc/__init__.py | 12 ++++--- scripts/data_prep/preproc/preprocs.py | 48 +++++++++++++++++++------- scripts/data_prep/split_hf_datasets.py | 8 ++--- 3 files changed, 46 insertions(+), 22 deletions(-) diff --git a/scripts/data_prep/preproc/__init__.py b/scripts/data_prep/preproc/__init__.py index db11cbc..d9b6ae4 100644 --- a/scripts/data_prep/preproc/__init__.py +++ b/scripts/data_prep/preproc/__init__.py @@ -1,7 +1,9 @@ -from preproc.preprocs import pre_glaive, pre_tulu, pre_numina, preprocessing_function +from preproc.preprocs import pre_ml_glaive, pre_ml_tulu, pre_ml_numina __all__ = [ - "pre_glaive", - "pre_tulu", - "pre_numina", - "preprocessing_function", + "pre_ml_glaive", + "pre_ml_tulu", + "pre_ml_numina", + # "pre_glaive", + # "pre_tulu", + # "pre_numina", ] diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py index 45468c8..d47ad29 100644 --- a/scripts/data_prep/preproc/preprocs.py +++ b/scripts/data_prep/preproc/preprocs.py @@ -5,24 +5,46 @@ HF_REPO="LocalResearchGroup" dataset_constructor = DatasetConstructor() -@dataset_constructor.register(f"{HF_REPO}/split-finemath") -def preprocessing_function(inp: dict) -> dict: - """Format the already-split example.""" - return { - 'prompt': inp['inputs'] + ':', - 'response': inp['targets'], - } +# @dataset_constructor.register(f"{HF_REPO}/split-tulu-3-sft-olmo-2-mixture") +# def pre_tulu(inp: dict): +# return {'prompt': inp["prompt"], 'response': inp["response"]} + + +# @dataset_constructor.register(f"{HF_REPO}/split-NuminaMath-CoT") +# def pre_numina(inp: dict): +# return {'prompt': inp['problem'], 'response': inp['solution']} + + +# @dataset_constructor.register(f"{HF_REPO}/split-glaive-code-assistant-v3") +# def pre_glaive(inp: dict): +# return {'prompt': inp['question'], 'response': inp['answer']} + + + +def preproc_chatml(inp: dict, k_prompt:str, k_response: str): + """Format dataset into ChatML template.""" + prompt = ( + "<|im_start|>system\n<|im_end|>\n" + f"<|im_start|>user\n{inp[k_prompt]}\n<|im_end|>\n" + ) + response = ( + f"<|im_start|>assistant\n{inp[k_response]}<|im_end|>\n" + "<|endoftext|>" + ) + return {"prompt": prompt, "response": response} @dataset_constructor.register(f"{HF_REPO}/split-tulu-3-sft-olmo-2-mixture") -def pre_tulu(inp: dict): - return {'prompt': inp["prompt"], 'response': inp["response"]} +def pre_ml_tulu(inp: dict): + return preproc_chatml(inp, "prompt", "response") @dataset_constructor.register(f"{HF_REPO}/split-NuminaMath-CoT") -def pre_numina(inp: dict): - return {'prompt': inp['problem'], 'response': inp['solution']} +def pre_ml_numina(inp: dict): + return preproc_chatml(inp, "problem", "solution") @dataset_constructor.register(f"{HF_REPO}/split-glaive-code-assistant-v3") -def pre_glaive(inp: dict): - return {'prompt': inp['question'], 'response': inp['answer']} +def pre_ml_glaive(inp: dict): + return preproc_chatml(inp, "question", "answer") + + diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 5b19c35..116141f 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -147,7 +147,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 splits=["train", "test"], out_root=f"tokenized/{s}/{ablation}", compression="zstd", - concat_tokens=2048, + concat_tokens=8192, tokenizer=tokenizer, tokenizer_kwargs=None, bos_text=None, @@ -213,7 +213,7 @@ def main(args): "target": f"{args.target_repo}/split-tulu-3-sft-olmo-2-mixture", "after_pull": filter_tulu, "ablations": ("full", "100k", "10k", "1k"), - "preproc":"preproc:pre_tulu", + "preproc":"preproc:pre_ml_tulu", "kind": "instruct", }, "numina": { @@ -221,7 +221,7 @@ def main(args): "target": f"{args.target_repo}/split-NuminaMath-CoT", "after_pull": process_numina, "ablations": ("full", "100k", "10k", "1k"), - "preproc":"preproc:pre_numina", + "preproc":"preproc:pre_ml_numina", "kind": "instruct", }, "finemath" :{ @@ -235,7 +235,7 @@ def main(args): "src": "glaiveai/glaive-code-assistant-v3", "target": f"{args.target_repo}/split-glaive-code-assistant-v3", "ablations": ("full", "100k", "10k", "1k"), - "preproc":"preproc:pre_glaive", + "preproc":"preproc:pre_ml_glaive", "kind": "instruct", }, "avelinapythonedu": { From 6cdf2ca289ce34bb27dd705b9891fa53c0d046e8 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Fri, 2 May 2025 09:44:35 -0600 Subject: [PATCH 35/50] add constants directly in convert_dataset_hf so that it can be called stand alone from scripts folder --- llmfoundry/command_utils/data_prep/convert_dataset_hf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index 6774806..f287f04 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -336,6 +336,10 @@ def convert_dataset_hf( KeyError: If constants are not defined for the split """ try: + if "tulu" not in CONSTS: + import dataset_constants_split_config + your_user = ... + dataset_constants_split_config.register_new_datasets(your_user) dataset_constants = CONSTS[dataset] except KeyError: raise ValueError( From 893298865fc82b3785b54a00730c435cc0aa620f Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Fri, 2 May 2025 14:45:41 -0600 Subject: [PATCH 36/50] tokenize each row and padd it up to max_length --- llmfoundry/command_utils/data_prep/convert_dataset_hf.py | 2 +- llmfoundry/data/data.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index f287f04..99c1ce7 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -338,7 +338,7 @@ def convert_dataset_hf( try: if "tulu" not in CONSTS: import dataset_constants_split_config - your_user = ... + your_user = "tyoc213" dataset_constants_split_config.register_new_datasets(your_user) dataset_constants = CONSTS[dataset] except KeyError: diff --git a/llmfoundry/data/data.py b/llmfoundry/data/data.py index 17b28e1..9af5ae3 100644 --- a/llmfoundry/data/data.py +++ b/llmfoundry/data/data.py @@ -160,7 +160,9 @@ def __iter__(self) -> Iterable[dict[str, NDArray]]: padding=False, ) iids = encoded['input_ids'] - buffer = buffer + self.bos_tokens + iids + self.eos_tokens + buffer = buffer + self.bos_tokens + iids + while len(buffer) <= self.max_length: + buffer += self.eos_tokens while len(buffer) >= self.max_length: concat_sample = buffer[:self.max_length] buffer = buffer[self.max_length:] if self.should_wrap else [] From 1bc9d97f352dea90667eba3e4cc66798de942f7f Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Tue, 27 May 2025 16:02:34 -0600 Subject: [PATCH 37/50] add glaive preprocessing after_pull on original dataset --- scripts/data_prep/split_hf_datasets.py | 37 ++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 116141f..80c0437 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -94,6 +94,30 @@ def pull_n_push( if purge_cache: dataset.cleanup_cache_files() + +def preproc_chatml(inp: dict, k_prompt:str, k_response: str): + """Format dataset into ChatML template.""" + prompt = ( + "<|im_start|>system\n<|im_end|>\n" + f"<|im_start|>user\n{inp[k_prompt]}\n<|im_end|>\n" + ) + response = ( + f"<|im_start|>assistant\n{inp[k_response]}<|im_end|>\n" + "<|endoftext|>" + ) + return {"prompt": prompt, "response": response} + +def pre_ml_tulu(inp: dict): + return preproc_chatml(inp, "prompt", "response") + + +def pre_ml_numina(inp: dict): + return preproc_chatml(inp, "problem", "solution") + + +def pre_ml_glaive(inp: dict): + return preproc_chatml(inp, "question", "answer") + def filter_tulu(dataset): print(f"Original dataset rows {len(dataset)}") dataset = dataset.filter(lambda r: r["source"] is not None and "aya" not in r["source"] and len(r["messages"]) == 2) @@ -118,6 +142,18 @@ def process_numina(dataset): print("new numina features", dataset.features) return dataset +def process_glaive(dataset): + print("glaive", dataset.features) + + def extract_qa(messages): + return pre_ml_glaive(messages) + + dataset = dataset.map(lambda example: extract_qa(example)) + print("glaive new features:", dataset.features) + + return dataset + + def upload_token_folder(local_path, target_repo): print(f"upload_token_folder({str(local_path.relative_to('.'))}, {target_repo})") api = HfApi() @@ -233,6 +269,7 @@ def main(args): }, "glaive": { "src": "glaiveai/glaive-code-assistant-v3", + "after_pull": process_glaive, "target": f"{args.target_repo}/split-glaive-code-assistant-v3", "ablations": ("full", "100k", "10k", "1k"), "preproc":"preproc:pre_ml_glaive", From 5ae1dd5969573c6eedf389c2ed30be92dc0a71b3 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Wed, 28 May 2025 21:53:05 -0600 Subject: [PATCH 38/50] use tokenizer if tokenizer is specified without concat tokens --- llmfoundry/command_utils/data_prep/convert_dataset_hf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index 99c1ce7..35b04b3 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -355,6 +355,8 @@ def convert_dataset_hf( else: mode = ConcatMode.NO_CONCAT built_tokenizer = None + if tokenizer: + build_tokenizer = build_tokenizer(tokenizer, {}) columns = {'text': 'str'} for split_name in splits: From 46eb8a3d785e9f56113b34a75de68326d4681343 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Fri, 30 May 2025 09:11:31 -0600 Subject: [PATCH 39/50] pretrain set to use tokenizer with concat tokens = None --- scripts/data_prep/split_hf_datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 80c0437..a785efd 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -183,7 +183,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 splits=["train", "test"], out_root=f"tokenized/{s}/{ablation}", compression="zstd", - concat_tokens=8192, + concat_tokens=None, tokenizer=tokenizer, tokenizer_kwargs=None, bos_text=None, From 21b52a483341832c70b45350e541ca555bd09431 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Fri, 30 May 2025 19:21:57 -0600 Subject: [PATCH 40/50] adding system prompt chat template --- scripts/data_prep/preproc/preprocs.py | 2 +- scripts/data_prep/split_hf_datasets.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py index d47ad29..eef8a46 100644 --- a/scripts/data_prep/preproc/preprocs.py +++ b/scripts/data_prep/preproc/preprocs.py @@ -24,7 +24,7 @@ def preproc_chatml(inp: dict, k_prompt:str, k_response: str): """Format dataset into ChatML template.""" prompt = ( - "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Local Research Group<|im_end|>\n" f"<|im_start|>user\n{inp[k_prompt]}\n<|im_end|>\n" ) response = ( diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index a785efd..40aabed 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -98,7 +98,7 @@ def pull_n_push( def preproc_chatml(inp: dict, k_prompt:str, k_response: str): """Format dataset into ChatML template.""" prompt = ( - "<|im_start|>system\n<|im_end|>\n" + "<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Local Research Group<|im_end|>\n" f"<|im_start|>user\n{inp[k_prompt]}\n<|im_end|>\n" ) response = ( From 798e8385dab43d02b83ed4cc56dd36c60a9cd2d6 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 31 May 2025 00:42:21 -0600 Subject: [PATCH 41/50] numina and tulu preproc after_pull --- scripts/data_prep/split_hf_datasets.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 40aabed..4b22f58 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -132,6 +132,7 @@ def extract_qa(messages): dataset = dataset.map(lambda example: extract_qa(example["messages"])) dataset = dataset.remove_columns(["messages"]) print("new tulu features: ", dataset.features) + dataset = dataset.map(lambda example: pre_ml_tulu(example)) print(f" current rows {len(dataset)}") return dataset @@ -139,6 +140,7 @@ def process_numina(dataset): print("numina", dataset.features) # remove column that on batch of 512 only has 2 rows which breaks pytorch collate! dataset = dataset.remove_columns("messages") + dataset = dataset.map(lambda example: pre_ml_numina(example)) print("new numina features", dataset.features) return dataset From 7a12c49bc5972ce20df0de59f0d48236af29bdd7 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Thu, 5 Jun 2025 15:27:40 -0600 Subject: [PATCH 42/50] `built_tokenizer` when tokenizer exist for pretraining dataset --- llmfoundry/command_utils/data_prep/convert_dataset_hf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index 35b04b3..ffb3bce 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -356,7 +356,7 @@ def convert_dataset_hf( mode = ConcatMode.NO_CONCAT built_tokenizer = None if tokenizer: - build_tokenizer = build_tokenizer(tokenizer, {}) + built_tokenizer = build_tokenizer(tokenizer, {}) columns = {'text': 'str'} for split_name in splits: From bbbeddb2249bf8f7b5a0edbf5856545ae3891e69 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Fri, 6 Jun 2025 19:47:23 -0600 Subject: [PATCH 43/50] set correct max_seq_len for instruct and pretrain datasets --- scripts/data_prep/preproc/preprocs.py | 34 ++++---------------------- scripts/data_prep/split_hf_datasets.py | 7 +++--- 2 files changed, 8 insertions(+), 33 deletions(-) diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py index eef8a46..ebe1ec6 100644 --- a/scripts/data_prep/preproc/preprocs.py +++ b/scripts/data_prep/preproc/preprocs.py @@ -2,48 +2,24 @@ DatasetConstructor, ) -HF_REPO="LocalResearchGroup" dataset_constructor = DatasetConstructor() -# @dataset_constructor.register(f"{HF_REPO}/split-tulu-3-sft-olmo-2-mixture") -# def pre_tulu(inp: dict): -# return {'prompt': inp["prompt"], 'response': inp["response"]} - - -# @dataset_constructor.register(f"{HF_REPO}/split-NuminaMath-CoT") -# def pre_numina(inp: dict): -# return {'prompt': inp['problem'], 'response': inp['solution']} - - -# @dataset_constructor.register(f"{HF_REPO}/split-glaive-code-assistant-v3") -# def pre_glaive(inp: dict): -# return {'prompt': inp['question'], 'response': inp['answer']} - - def preproc_chatml(inp: dict, k_prompt:str, k_response: str): """Format dataset into ChatML template.""" - prompt = ( - "<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Local Research Group<|im_end|>\n" - f"<|im_start|>user\n{inp[k_prompt]}\n<|im_end|>\n" - ) - response = ( - f"<|im_start|>assistant\n{inp[k_response]}<|im_end|>\n" - "<|endoftext|>" - ) - return {"prompt": prompt, "response": response} - -@dataset_constructor.register(f"{HF_REPO}/split-tulu-3-sft-olmo-2-mixture") + return {"prompt": inp[k_prompt], "response": inp[k_response]} + +@dataset_constructor.register(f"LocalResearchGroup/split-tulu-3-sft-olmo-2-mixture") def pre_ml_tulu(inp: dict): return preproc_chatml(inp, "prompt", "response") -@dataset_constructor.register(f"{HF_REPO}/split-NuminaMath-CoT") +@dataset_constructor.register(f"LocalResearchGroup/split-NuminaMath-CoT") def pre_ml_numina(inp: dict): return preproc_chatml(inp, "problem", "solution") -@dataset_constructor.register(f"{HF_REPO}/split-glaive-code-assistant-v3") +@dataset_constructor.register(f"LocalResearchGroup/split-glaive-code-assistant-v3") def pre_ml_glaive(inp: dict): return preproc_chatml(inp, "question", "answer") diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 4b22f58..04f96af 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -170,9 +170,8 @@ def upload_token_folder(local_path, target_repo): def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-135M"): # import configurations to tokenize new dataset splits - + max_seq_len = 8192 for s in args.source: - d = datasets[s] folder = d["target"].split("/")[1] ablations = d["ablations"] if not args.one_k else ("1k",) # override ablation config from cmd line arg @@ -187,7 +186,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 compression="zstd", concat_tokens=None, tokenizer=tokenizer, - tokenizer_kwargs=None, + tokenizer_kwargs=f'{{"model_max_length": {max_seq_len} }}', bos_text=None, eos_text="<|endoftext|>", no_wrap=False, @@ -208,7 +207,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 None, # num_workers "HuggingFaceTB/SmolLM2-135M", # tokenizer None, - 20480, # max_seq_len + max_seq_len, # max_seq_len "none", # target_prompts "last", # target_responses False, # encoder_decoder From a1fed853c785a0817e57032ca1743afcae2ff306 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 7 Jun 2025 00:28:00 -0600 Subject: [PATCH 44/50] preprocs methods need to return the colums that have the template applied --- scripts/data_prep/preproc/preprocs.py | 11 +++-------- scripts/data_prep/split_hf_datasets.py | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 18 deletions(-) diff --git a/scripts/data_prep/preproc/preprocs.py b/scripts/data_prep/preproc/preprocs.py index ebe1ec6..b3b69b9 100644 --- a/scripts/data_prep/preproc/preprocs.py +++ b/scripts/data_prep/preproc/preprocs.py @@ -4,23 +4,18 @@ dataset_constructor = DatasetConstructor() - -def preproc_chatml(inp: dict, k_prompt:str, k_response: str): - """Format dataset into ChatML template.""" - return {"prompt": inp[k_prompt], "response": inp[k_response]} - @dataset_constructor.register(f"LocalResearchGroup/split-tulu-3-sft-olmo-2-mixture") def pre_ml_tulu(inp: dict): - return preproc_chatml(inp, "prompt", "response") + return {"prompt": inp["prompt"], "response": inp["response"]} @dataset_constructor.register(f"LocalResearchGroup/split-NuminaMath-CoT") def pre_ml_numina(inp: dict): - return preproc_chatml(inp, "problem", "solution") + return {"prompt": inp["prompt"], "response": inp["response"]} @dataset_constructor.register(f"LocalResearchGroup/split-glaive-code-assistant-v3") def pre_ml_glaive(inp: dict): - return preproc_chatml(inp, "question", "answer") + return {"prompt": inp["prompt"], "response": inp["response"]} diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 04f96af..0e2c806 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -205,7 +205,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 None, "zstd", None, # num_workers - "HuggingFaceTB/SmolLM2-135M", # tokenizer + tokenizer, # tokenizer None, max_seq_len, # max_seq_len "none", # target_prompts @@ -261,13 +261,6 @@ def main(args): "preproc":"preproc:pre_ml_numina", "kind": "instruct", }, - "finemath" :{ - "src": "HuggingFaceTB/finemath", - "ds_name": "finemath-4plus", - "target": f"{args.target_repo}/split-finemath", - "ablations": ("full", "1M", "100k", "10k", "1k"), - "kind": "pretrain", - }, "glaive": { "src": "glaiveai/glaive-code-assistant-v3", "after_pull": process_glaive, @@ -276,6 +269,13 @@ def main(args): "preproc":"preproc:pre_ml_glaive", "kind": "instruct", }, + "finemath" :{ + "src": "HuggingFaceTB/finemath", + "ds_name": "finemath-4plus", + "target": f"{args.target_repo}/split-finemath", + "ablations": ("full", "1M", "100k", "10k", "1k"), + "kind": "pretrain", + }, "avelinapythonedu": { "src": "Avelina/python-edu", "target": f"{args.target_repo}/split-avelina-python-edu", @@ -307,8 +307,8 @@ def parse_args() -> Namespace: parser.add_argument( "--source", nargs="+", - choices=["tulu", "numina", "finemath", "glaive", "avelinapythonedu",], - default=["tulu", "numina", "finemath", "glaive", "avelinapythonedu"], + choices=["tulu", "numina", "glaive", "finemath", "avelinapythonedu",], + default=["tulu", "numina", "glaive", "finemath", "avelinapythonedu"], ) parser.add_argument( From feb9513f41287658ce34445e527b85b16227849c Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Sat, 7 Jun 2025 19:11:58 -0600 Subject: [PATCH 45/50] `concat_tokens=max_seq_len` for pretrain datasets --- llmfoundry/command_utils/data_prep/convert_dataset_hf.py | 2 +- scripts/data_prep/split_hf_datasets.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index ffb3bce..10fba68 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -356,7 +356,7 @@ def convert_dataset_hf( mode = ConcatMode.NO_CONCAT built_tokenizer = None if tokenizer: - built_tokenizer = build_tokenizer(tokenizer, {}) + built_tokenizer = build_tokenizer(tokenizer, tokenizer_kwargs) columns = {'text': 'str'} for split_name in splits: diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 0e2c806..6915b57 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -184,7 +184,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 splits=["train", "test"], out_root=f"tokenized/{s}/{ablation}", compression="zstd", - concat_tokens=None, + concat_tokens=max_seq_len, tokenizer=tokenizer, tokenizer_kwargs=f'{{"model_max_length": {max_seq_len} }}', bos_text=None, From 33bda53c1d00ed84986328b97be34c16e8af6344 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Mon, 9 Jun 2025 14:13:42 -0600 Subject: [PATCH 46/50] remove dead code --- llmfoundry/command_utils/data_prep/convert_dataset_hf.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index 10fba68..e95a1c9 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -336,10 +336,6 @@ def convert_dataset_hf( KeyError: If constants are not defined for the split """ try: - if "tulu" not in CONSTS: - import dataset_constants_split_config - your_user = "tyoc213" - dataset_constants_split_config.register_new_datasets(your_user) dataset_constants = CONSTS[dataset] except KeyError: raise ValueError( From 041433bbfcd9313280f1b224eed91e36923f6f54 Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Wed, 11 Jun 2025 23:55:03 -0600 Subject: [PATCH 47/50] extend timeout to process datasets, fix instruct tokenizer, fix padding that sets a row per sample in concat tokens path --- llmfoundry/data/data.py | 2 +- scripts/data_prep/split_hf_datasets.py | 5 +++-- scripts/modal/modal_script.py | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/llmfoundry/data/data.py b/llmfoundry/data/data.py index 9af5ae3..4804d4e 100644 --- a/llmfoundry/data/data.py +++ b/llmfoundry/data/data.py @@ -161,7 +161,7 @@ def __iter__(self) -> Iterable[dict[str, NDArray]]: ) iids = encoded['input_ids'] buffer = buffer + self.bos_tokens + iids - while len(buffer) <= self.max_length: + while len(buffer) < self.max_length: buffer += self.eos_tokens while len(buffer) >= self.max_length: concat_sample = buffer[:self.max_length] diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 6915b57..b208329 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -177,7 +177,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 ablations = d["ablations"] if not args.one_k else ("1k",) # override ablation config from cmd line arg for ablation in ablations: if d["kind"] == "pretrain": - print("\ngenerating tokens for", s, ablation) + print("\nconvert_dataset_hf_from_args for", s, ablation) convert_dataset_hf_from_args( dataset=d["target"], data_subset=ablation, @@ -193,7 +193,8 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 num_workers=None, ) elif d["kind"] == "instruct": - print(f"\nconvert_finetuning_dataset_from_args") + print(f"\nconvert_finetuning_dataset_from_args for", s, ablation) + tokenizer="HuggingFaceTB/SmolLM2-135M-instruct" convert_finetuning_dataset_from_args( d["target"], f"{ablation}", # data_subset diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index 6e74175..f34553d 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -441,7 +441,7 @@ def pull_hf_to_folder(): DATASETS_VOLUME.commit() -@app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], +@app.function(gpu=TRAINING_GPU, image=image, timeout=4*3600, secrets=[Secret.from_name("LRG")], concurrency_limit=1) def process_datasets(): import subprocess From b3e86e6cbbf8603cd40438b9f8ed4eb826ebce6e Mon Sep 17 00:00:00 2001 From: daol <506234+tyoc213@users.noreply.github.com> Date: Mon, 16 Jun 2025 20:59:56 -0600 Subject: [PATCH 48/50] `no_wrap=True` to trim samples for pretrain data --- scripts/data_prep/split_hf_datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index b208329..6434a42 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -189,7 +189,7 @@ def create_pretraining_tokens(args, datasets, tokenizer="HuggingFaceTB/SmolLM2-1 tokenizer_kwargs=f'{{"model_max_length": {max_seq_len} }}', bos_text=None, eos_text="<|endoftext|>", - no_wrap=False, + no_wrap=True, num_workers=None, ) elif d["kind"] == "instruct": From d768c4f6651417b40ba94245748c83d8adaa288a Mon Sep 17 00:00:00 2001 From: tyoc213 <506234+tyoc213@users.noreply.github.com> Date: Tue, 26 Aug 2025 16:45:41 -0600 Subject: [PATCH 49/50] force enter loop if any token present and break; make batchsize=1 so that no errors are show because different sizes in batch (as not padded up to a length) --- llmfoundry/command_utils/data_prep/convert_dataset_hf.py | 2 +- llmfoundry/data/data.py | 7 +++---- scripts/data_prep/split_hf_datasets.py | 8 ++++---- scripts/modal/modal_script.py | 5 ----- 4 files changed, 8 insertions(+), 14 deletions(-) diff --git a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py index e95a1c9..9e836a9 100644 --- a/llmfoundry/command_utils/data_prep/convert_dataset_hf.py +++ b/llmfoundry/command_utils/data_prep/convert_dataset_hf.py @@ -382,7 +382,7 @@ def convert_dataset_hf( ) loader = build_dataloader( dataset=hf_dataset, - batch_size=512, + batch_size=1, num_workers=num_workers, ) samples = generate_samples( diff --git a/llmfoundry/data/data.py b/llmfoundry/data/data.py index 4804d4e..e792a66 100644 --- a/llmfoundry/data/data.py +++ b/llmfoundry/data/data.py @@ -160,16 +160,15 @@ def __iter__(self) -> Iterable[dict[str, NDArray]]: padding=False, ) iids = encoded['input_ids'] - buffer = buffer + self.bos_tokens + iids - while len(buffer) < self.max_length: - buffer += self.eos_tokens - while len(buffer) >= self.max_length: + buffer = buffer + self.bos_tokens + iids + self.eos_tokens + while len(buffer) >= self.max_length or len(buffer) > 0: concat_sample = buffer[:self.max_length] buffer = buffer[self.max_length:] if self.should_wrap else [] yield { # convert to ndarray to store in MDS format 'tokens': np.asarray(concat_sample, dtype=np.int32), } + break def stream_remote_local_validate( diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index 6434a42..b2d0a9f 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -289,11 +289,11 @@ def main(args): print(f"spliting: {args.source}") d = upload_splits(args, datasets) print(f"spliting: {args.source} finished.") - if args.tokenize: + if args.tokenize_local: print(f"tokenizing: {args.source}") create_pretraining_tokens(args, datasets) print(f"tokenizing: {args.source} finished.") - if args.upload: + if args.upload_tokens: print(f"uploading tokens: {args.source}") create_tokenized_upload(args, datasets) print(f"uploading tokens: {args.source} finished.") @@ -319,8 +319,8 @@ def parse_args() -> Namespace: ) parser.add_argument("--split", action=BooleanOptionalAction, default=True, help="split generation") - parser.add_argument("--tokenize", action=BooleanOptionalAction, default=True, help="generate tokenization for splits") - parser.add_argument("--upload", action=BooleanOptionalAction, default=True, help="upload tokenization folders") + parser.add_argument("--tokenize-local", action=BooleanOptionalAction, default=True, help="generate tokenization for splits") + parser.add_argument("--upload-tokens", action=BooleanOptionalAction, default=True, help="upload tokenization folders") parser.add_argument("--one-k", action=BooleanOptionalAction, default=False, help="only process 1k") parsed = parser.parse_args() diff --git a/scripts/modal/modal_script.py b/scripts/modal/modal_script.py index f34553d..88affca 100644 --- a/scripts/modal/modal_script.py +++ b/scripts/modal/modal_script.py @@ -239,13 +239,8 @@ def train_with_aim(run_ts: str, yaml_path: str = "train/yamls/pretrain/smollm2-1 @app.function(gpu=TRAINING_GPU, image=image, timeout=3600, secrets=[Secret.from_name("LRG")], volumes={MODEL_CHECKPOINT_VOLUME_MOUNT_PATH: MODEL_CHECKPOINT_VOLUME}, -<<<<<<< HEAD max_containers=1) def convert_model_to_hf(checkpoint_path: str, yaml_path: str = "", upload_to_hf: bool = False, is_peft: bool = IS_PEFT): -======= - concurrency_limit=1) -def convert_model_to_hf(checkpoint_path: str, upload_to_hf: bool = False): ->>>>>>> 4234231 (revert back to `concurrency_limit` for modal < 0.73.76) """Convert a model checkpoint to a HuggingFace format.""" import subprocess, os from pathlib import Path From 4dccc9d046d8bb128b35b16e4536dd612481dc1f Mon Sep 17 00:00:00 2001 From: tyoc213 <506234+tyoc213@users.noreply.github.com> Date: Sat, 30 Aug 2025 15:47:23 -0600 Subject: [PATCH 50/50] add little more info on how to use --- scripts/data_prep/split_hf_datasets.py | 53 ++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 8 deletions(-) diff --git a/scripts/data_prep/split_hf_datasets.py b/scripts/data_prep/split_hf_datasets.py index b2d0a9f..f665488 100644 --- a/scripts/data_prep/split_hf_datasets.py +++ b/scripts/data_prep/split_hf_datasets.py @@ -302,26 +302,63 @@ def main(args): def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( - description= - "Split to train/test 1M, 100k, 10k, 1k and tokenize", + description="""Tool to help build splits, tokenize and upload tokens. + + 1. -split Split `source` dataset to train/test 1M, 100k, 10k, 1k and upload it to `target_repo` (default LRG@hf) + 2. ---tokenize-local the splits locally to tokenized folder + 3. --upload-tokens upload local tokens to target repo + + + python data_prep/split_hf_datasets.py --source avelinapythonedu --split --no-tokenize-local --no-upload-tokens + python data_prep/split_hf_datasets.py --source avelinapythonedu --no-split --tokenize-local --no-upload-tokens + python data_prep/split_hf_datasets.py --source avelinapythonedu --no-split --no-tokenize-local --upload-tokens + + add `--one-k` to target only 1k rows split + """, ) parser.add_argument( "--source", nargs="+", - choices=["tulu", "numina", "glaive", "finemath", "avelinapythonedu",], + choices=[ + "tulu", + "numina", + "glaive", + "finemath", + "avelinapythonedu", + ], default=["tulu", "numina", "glaive", "finemath", "avelinapythonedu"], ) parser.add_argument( "--target_repo", default="LocalResearchGroup", - help="target repo to upload splits and tokenizations", + help="target repo to upload splits and tokenizations default is `LocalResearchGroup`", ) - parser.add_argument("--split", action=BooleanOptionalAction, default=True, help="split generation") - parser.add_argument("--tokenize-local", action=BooleanOptionalAction, default=True, help="generate tokenization for splits") - parser.add_argument("--upload-tokens", action=BooleanOptionalAction, default=True, help="upload tokenization folders") - parser.add_argument("--one-k", action=BooleanOptionalAction, default=False, help="only process 1k") + parser.add_argument( + "--split", + action=BooleanOptionalAction, + default=True, + help="Make splits out of source datasets", + ) + parser.add_argument( + "--tokenize-local", + action=BooleanOptionalAction, + default=True, + help="generate local tokenization for splits", + ) + parser.add_argument( + "--upload-tokens", + action=BooleanOptionalAction, + default=True, + help="upload local tokenization to target repo", + ) + parser.add_argument( + "--one-k", + action=BooleanOptionalAction, + default=False, + help="for testing/checks only process 1k split", + ) parsed = parser.parse_args() return parsed