Skip to content
Snippets Groups Projects
Commit 9a4b4dde authored by Andri Joos's avatar Andri Joos :blush:
Browse files

split array columns into multiple columns

parent b9fbb709
No related branches found
No related tags found
No related merge requests found
......@@ -54,6 +54,18 @@ def _cast_columns(df: pd.DataFrame, column_type_mapping: Dict[str | int, str]) -
return df
def _split_array_column(df: pd.DataFrame) -> pd.DataFrame:
array_columns = [col for col in df.columns if isinstance(df[col].values[0], np.ndarray)] # Data is consistent in each row
for column in array_columns:
array_dtype = df[column].iloc[0].dtype # First row must have a value
stacked_arrays = np.stack(df[column].values, dtype=array_dtype) # is faster than df[column].apply(lambda vec: pd.Series(vec, dtype=array_dtype))
expanded_columns = pd.DataFrame(stacked_arrays, index=df.index)
expanded_columns.columns = [f'{column}_{i}' for i in range(expanded_columns.shape[1])]
df = df.join(expanded_columns).drop(columns=[column])
return df
def _drop_non_shared_columns(df: pd.DataFrame, shared_columns: Set[str]) -> pd.DataFrame:
columns_to_drop = [column for column in df.columns if str(column) not in shared_columns]
df = df.drop(columns=columns_to_drop)
......@@ -87,6 +99,9 @@ def _transform_parquet_file(
# Parse columns
df = _cast_columns(df, column_name_type_mapping)
# Split arrays
df = _split_array_column(df)
print(f'Saving {filename}')
df.to_parquet(out_dir / filename)
# df.to_csv(out_dir / f'{file.stem}.csv')
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment