data processing tkt c'est nickel

This commit is contained in:
2024-03-13 14:13:51 +01:00
parent 66c6e6c931
commit 87e1af2ea2
6 changed files with 477 additions and 145 deletions

View File

@@ -1,3 +1,5 @@
import os
import numpy as np
import pandas as pd
from utils.df_utils import *
@@ -39,7 +41,7 @@ def get_raw_measure_with_metadata(file: pd.Series) -> tuple[pd.DataFrame, list[p
return metadata, sliced_experiments
def load_data() -> tuple[pd.DataFrame, list[pd.DataFrame]]:
def load_raw_data() -> tuple[pd.DataFrame, list[pd.DataFrame]]:
"""
Load all the available data, slice it into individual measures and give the corresponding metadata
:return: dataframe containing the metadata with one row per sliced measure
@@ -52,3 +54,18 @@ def load_data() -> tuple[pd.DataFrame, list[pd.DataFrame]]:
metadata = pd.concat([metadata, temp_metadata])
sliced_experiments.extend(temp_sliced_experiments)
return metadata, sliced_experiments
def data_to_single_df(data: list[pd.DataFrame]) -> pd.DataFrame:
"""
Converts a list of dataframes into a long dataframe. Loses lots of info, to use with care!
:param data: list of dataframes of same length
:return:
"""
return pd.DataFrame(map(lambda x: np.append(x.index.to_numpy(), x["#Intensity"].to_numpy()), data))
def load_data(name: str, path: os.path = os.path.join("data")) -> tuple[pd.DataFrame, pd.DataFrame]:
metadata = pd.read_csv(os.path.join(path, name, "metadata.csv"))
experiments = pd.read_csv(os.path.join(path, name, "experiments.csv"))
return metadata, experiments