diff --git a/efaar_benchmarking/core.py b/efaar_benchmarking/core.py index 81642e4..cbcf9a9 100644 --- a/efaar_benchmarking/core.py +++ b/efaar_benchmarking/core.py @@ -1,3 +1,6 @@ +from importlib.metadata import version + + def get_version() -> str: """Returns a string representation of the version of efaar_benchmarking currently in use @@ -7,15 +10,6 @@ def get_version() -> str: the version number installed of this package """ try: - from importlib.metadata import version # type: ignore - return version("efaar_benchmarking") - except ImportError: - try: - import pkg_resources - - return pkg_resources.get_distribution("efaar_benchmarking").version - except pkg_resources.DistributionNotFound: - return "set_version_placeholder" except ModuleNotFoundError: return "set_version_placeholder" diff --git a/notebooks/map_building_benchmarking.ipynb b/notebooks/map_building_benchmarking.ipynb index b10bcc1..20da315 100644 --- a/notebooks/map_building_benchmarking.ipynb +++ b/notebooks/map_building_benchmarking.ipynb @@ -21,9 +21,11 @@ "import pickle\n", "\n", "pc_count = 128\n", - "save_results = False # Results already uploaded to the notebooks/data folder in the repo. If True, will replace these files.\n", + "save_results = (\n", + " False # Results already uploaded to the notebooks/data folder in the repo. If True, will replace these files.\n", + ")\n", "pert_signal_pval_cutoff = 0.05\n", - "recall_thr_pairs = [(.05, .95)]" + "recall_thr_pairs = [(0.05, 0.95)]" ] }, { @@ -53,17 +55,43 @@ "# Run EFAAR pipelines\n", "all_embeddings_pre_agg = {}\n", "print(\"Running for embedding size\", pc_count)\n", - "all_embeddings_pre_agg[f\"scVI{pc_count}\"] = embed_by_scvi_anndata(adata_raw, batch_col=gem_group_colname, n_latent=pc_count, n_hidden=pc_count*2)\n", + "all_embeddings_pre_agg[f\"scVI{pc_count}\"] = embed_by_scvi_anndata(\n", + " adata_raw, batch_col=gem_group_colname, n_latent=pc_count, n_hidden=pc_count * 2\n", + ")\n", "print(\"embed_by_scvi_anndata completed\")\n", - "all_embeddings_pre_agg[f\"scVI{pc_count}-CS\"] = centerscale_on_controls(all_embeddings_pre_agg[f\"scVI{pc_count}\"], metadata, pert_col=pert_colname, control_key=control_key, batch_col=gem_group_colname)\n", + "all_embeddings_pre_agg[f\"scVI{pc_count}-CS\"] = centerscale_on_controls(\n", + " all_embeddings_pre_agg[f\"scVI{pc_count}\"],\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " control_key=control_key,\n", + " batch_col=gem_group_colname,\n", + ")\n", "print(\"centerscale completed\")\n", - "all_embeddings_pre_agg[f\"scVI{pc_count}-TVN\"] = tvn_on_controls(all_embeddings_pre_agg[f\"scVI{pc_count}\"], metadata, pert_col=pert_colname, control_key=control_key, batch_col=gem_group_colname)\n", + "all_embeddings_pre_agg[f\"scVI{pc_count}-TVN\"] = tvn_on_controls(\n", + " all_embeddings_pre_agg[f\"scVI{pc_count}\"],\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " control_key=control_key,\n", + " batch_col=gem_group_colname,\n", + ")\n", "print(\"tvn completed\")\n", "all_embeddings_pre_agg[f\"PCA{pc_count}\"] = embed_by_pca_anndata(adata_raw, gem_group_colname, pc_count)\n", "print(\"embed_by_pca_anndata completed\")\n", - "all_embeddings_pre_agg[f\"PCA{pc_count}-CS\"] = centerscale_on_controls(all_embeddings_pre_agg[f\"PCA{pc_count}\"], metadata, pert_col=pert_colname, control_key=control_key, batch_col=gem_group_colname)\n", + "all_embeddings_pre_agg[f\"PCA{pc_count}-CS\"] = centerscale_on_controls(\n", + " all_embeddings_pre_agg[f\"PCA{pc_count}\"],\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " control_key=control_key,\n", + " batch_col=gem_group_colname,\n", + ")\n", "print(\"centerscale completed\")\n", - "all_embeddings_pre_agg[f\"PCA{pc_count}-TVN\"] = tvn_on_controls(all_embeddings_pre_agg[f\"PCA{pc_count}\"], metadata, pert_col=pert_colname, control_key=control_key, batch_col=gem_group_colname)\n", + "all_embeddings_pre_agg[f\"PCA{pc_count}-TVN\"] = tvn_on_controls(\n", + " all_embeddings_pre_agg[f\"PCA{pc_count}\"],\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " control_key=control_key,\n", + " batch_col=gem_group_colname,\n", + ")\n", "print(\"tvn completed\")\n", "\n", "# Run biological relationship benchmarks\n", @@ -77,9 +105,9 @@ "\n", "# Save results\n", "if save_results:\n", - " with open(f'data/{dataset}_map_cache.pkl', 'wb') as f:\n", + " with open(f\"data/{dataset}_map_cache.pkl\", \"wb\") as f:\n", " pickle.dump(map_data, f) # storing the PCA-TVN map data for downstream analysis\n", - " with open(f'data/{dataset}_metadata.pkl', 'wb') as f:\n", + " with open(f\"data/{dataset}_metadata.pkl\", \"wb\") as f:\n", " pickle.dump(metadata, f) # storing the metadata for downstream analysis" ] }, @@ -109,7 +137,12 @@ "features, metadata = filter_cell_profiler_features(features, metadata)\n", "\n", "expression_data_folder = \"../efaar_benchmarking/expression_data\"\n", - "expr = pd.read_csv(f\"{expression_data_folder}/U2OS_expression.csv\", index_col=0).groupby(\"gene\").zfpkm.agg(\"median\").reset_index()\n", + "expr = (\n", + " pd.read_csv(f\"{expression_data_folder}/U2OS_expression.csv\", index_col=0)\n", + " .groupby(\"gene\")\n", + " .zfpkm.agg(\"median\")\n", + " .reset_index()\n", + ")\n", "unexpr_genes = list(expr.loc[expr.zfpkm < -3, \"gene\"])\n", "expr_genes = list(expr.loc[expr.zfpkm >= -3, \"gene\"])\n", "expr_ind = metadata[pert_colname].isin(expr_genes + [control_key])\n", @@ -117,18 +150,41 @@ "# Run EFAAR pipelines\n", "all_embeddings_pre_agg = {}\n", "print(\"Computing PCA embedding for\", pc_count, \"dimensions...\")\n", - "all_embeddings_pre_agg[f\"PCA{pc_count}\"] = embed_by_pca(features.values, metadata, variance_or_ncomp=pc_count, batch_col=plate_colname)\n", + "all_embeddings_pre_agg[f\"PCA{pc_count}\"] = embed_by_pca(\n", + " features.values, metadata, variance_or_ncomp=pc_count, batch_col=plate_colname\n", + ")\n", "print(\"Computing centerscale...\")\n", - "all_embeddings_pre_agg[f\"PCA{pc_count}-CS\"] = centerscale_on_controls(all_embeddings_pre_agg[f\"PCA{pc_count}\"], metadata, pert_col=pert_colname, control_key=control_key, batch_col=run_colname)\n", + "all_embeddings_pre_agg[f\"PCA{pc_count}-CS\"] = centerscale_on_controls(\n", + " all_embeddings_pre_agg[f\"PCA{pc_count}\"],\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " control_key=control_key,\n", + " batch_col=run_colname,\n", + ")\n", "print(\"Computing TVN...\")\n", - "all_embeddings_pre_agg[f\"PCA{pc_count}-TVN\"] = tvn_on_controls(all_embeddings_pre_agg[f\"PCA{pc_count}\"], metadata, pert_col=pert_colname, control_key=control_key, batch_col=run_colname)\n", + "all_embeddings_pre_agg[f\"PCA{pc_count}-TVN\"] = tvn_on_controls(\n", + " all_embeddings_pre_agg[f\"PCA{pc_count}\"],\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " control_key=control_key,\n", + " batch_col=run_colname,\n", + ")\n", "\n", "# Run perturbation signal benchmarks\n", "for k, emb in all_embeddings_pre_agg.items():\n", - " cons_res = pert_signal_consistency_benchmark(emb, metadata, pert_col=pert_colname, neg_ctrl_perts=unexpr_genes, keys_to_drop=all_controls)\n", + " cons_res = pert_signal_consistency_benchmark(\n", + " emb, metadata, pert_col=pert_colname, neg_ctrl_perts=unexpr_genes, keys_to_drop=all_controls\n", + " )\n", " print(k, round(sum(cons_res.pval <= pert_signal_pval_cutoff) / sum(~pd.isna(cons_res.pval)) * 100, 1))\n", "\n", - " magn_res = pert_signal_distance_benchmark(emb, metadata, pert_col=pert_colname, neg_ctrl_perts=unexpr_genes, control_key=control_key, keys_to_drop=[x for x in all_controls if x!=control_key])\n", + " magn_res = pert_signal_distance_benchmark(\n", + " emb,\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " neg_ctrl_perts=unexpr_genes,\n", + " control_key=control_key,\n", + " keys_to_drop=[x for x in all_controls if x != control_key],\n", + " )\n", " print(k, round(sum(magn_res.pval <= pert_signal_pval_cutoff) / sum(~pd.isna(magn_res.pval)) * 100, 1))\n", "\n", "# Run biological relationship benchmarks\n", @@ -137,14 +193,14 @@ " print(\"Aggregating...\")\n", " map_data = aggregate(emb[expr_ind], metadata[expr_ind], pert_col=pert_colname, keys_to_remove=all_controls)\n", " print(\"Computing recall...\")\n", - " metrics = known_relationship_benchmark(map_data, recall_thr_pairs=[(.05, .95)], pert_col=pert_colname)\n", + " metrics = known_relationship_benchmark(map_data, recall_thr_pairs=[(0.05, 0.95)], pert_col=pert_colname)\n", " print(metrics[list(metrics.columns)[::-1]])\n", "\n", "# Save results\n", "if save_results:\n", - " with open(f'data/{dataset}_map_cache.pkl', 'wb') as f:\n", + " with open(f\"data/{dataset}_map_cache.pkl\", \"wb\") as f:\n", " pickle.dump(map_data, f) # storing the PCA-TVN map data for downstream analysis\n", - " with open(f'data/{dataset}_metadata.pkl', 'wb') as f:\n", + " with open(f\"data/{dataset}_metadata.pkl\", \"wb\") as f:\n", " pickle.dump(metadata, f) # storing the metadata for downstream analysis" ] }, @@ -172,7 +228,9 @@ "print(\"Perturbation dataset loaded\")\n", "\n", "expression_data_folder = \"../efaar_benchmarking/expression_data\"\n", - "expr = pd.read_csv(f\"{expression_data_folder}/HeLa_expression.csv\") # note that we assume the HeLa expression data was used for PERISCOPE which is the default option in load_periscope()\n", + "expr = pd.read_csv(\n", + " f\"{expression_data_folder}/HeLa_expression.csv\"\n", + ") # note that we assume the HeLa expression data was used for PERISCOPE which is the default option in load_periscope()\n", "expr.columns = [\"gene\", \"tpm\"]\n", "expr.gene = expr.gene.apply(lambda x: x.split(\" \")[0])\n", "unexpr_genes = list(expr.loc[expr.tpm == 0, \"gene\"])\n", @@ -182,18 +240,41 @@ "# Run EFAAR pipelines\n", "all_embeddings_pre_agg = {}\n", "print(\"Computing PCA embedding for\", pc_count, \"dimensions...\")\n", - "all_embeddings_pre_agg[f\"PCA{pc_count}\"] = embed_by_pca(features.values, metadata, variance_or_ncomp=pc_count, batch_col=plate_colname)\n", + "all_embeddings_pre_agg[f\"PCA{pc_count}\"] = embed_by_pca(\n", + " features.values, metadata, variance_or_ncomp=pc_count, batch_col=plate_colname\n", + ")\n", "print(\"Computing centerscale...\")\n", - "all_embeddings_pre_agg[f\"PCA{pc_count}-CS\"] = centerscale_on_controls(all_embeddings_pre_agg[f\"PCA{pc_count}\"], metadata, pert_col=pert_colname, control_key=control_key, batch_col=plate_colname)\n", + "all_embeddings_pre_agg[f\"PCA{pc_count}-CS\"] = centerscale_on_controls(\n", + " all_embeddings_pre_agg[f\"PCA{pc_count}\"],\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " control_key=control_key,\n", + " batch_col=plate_colname,\n", + ")\n", "print(\"Computing TVN...\")\n", - "all_embeddings_pre_agg[f\"PCA{pc_count}-TVN\"] = tvn_on_controls(all_embeddings_pre_agg[f\"PCA{pc_count}\"], metadata, pert_col=pert_colname, control_key=control_key, batch_col=plate_colname)\n", + "all_embeddings_pre_agg[f\"PCA{pc_count}-TVN\"] = tvn_on_controls(\n", + " all_embeddings_pre_agg[f\"PCA{pc_count}\"],\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " control_key=control_key,\n", + " batch_col=plate_colname,\n", + ")\n", "\n", "# Run perturbation signal benchmarks\n", "for k, emb in all_embeddings_pre_agg.items():\n", - " cons_res = pert_signal_consistency_benchmark(emb, metadata, pert_col=pert_colname, neg_ctrl_perts=unexpr_genes, keys_to_drop=all_controls)\n", + " cons_res = pert_signal_consistency_benchmark(\n", + " emb, metadata, pert_col=pert_colname, neg_ctrl_perts=unexpr_genes, keys_to_drop=all_controls\n", + " )\n", " print(k, round(sum(cons_res.pval <= pert_signal_pval_cutoff) / sum(~pd.isna(cons_res.pval)) * 100, 1))\n", "\n", - " magn_res = pert_signal_distance_benchmark(emb, metadata, pert_col=pert_colname, neg_ctrl_perts=unexpr_genes, control_key=control_key, keys_to_drop=[x for x in all_controls if x!=control_key])\n", + " magn_res = pert_signal_distance_benchmark(\n", + " emb,\n", + " metadata,\n", + " pert_col=pert_colname,\n", + " neg_ctrl_perts=unexpr_genes,\n", + " control_key=control_key,\n", + " keys_to_drop=[x for x in all_controls if x != control_key],\n", + " )\n", " print(k, round(sum(magn_res.pval <= pert_signal_pval_cutoff) / sum(~pd.isna(magn_res.pval)) * 100, 1))\n", "\n", "# Run biological relationship benchmarks\n", @@ -202,14 +283,14 @@ " print(\"Aggregating...\")\n", " map_data = aggregate(emb[expr_ind], metadata[expr_ind], pert_col=pert_colname, keys_to_remove=all_controls)\n", " print(\"Computing recall...\")\n", - " metrics = known_relationship_benchmark(map_data, recall_thr_pairs=[(.05, .95)], pert_col=pert_colname)\n", + " metrics = known_relationship_benchmark(map_data, recall_thr_pairs=[(0.05, 0.95)], pert_col=pert_colname)\n", " print(metrics[list(metrics.columns)[::-1]])\n", "\n", "# Save results\n", "if save_results:\n", - " with open(f'data/{dataset}_map_cache.pkl', 'wb') as f:\n", + " with open(f\"data/{dataset}_map_cache.pkl\", \"wb\") as f:\n", " pickle.dump(map_data, f) # storing the PCA-TVN map data for downstream analysis\n", - " with open(f'data/{dataset}_metadata.pkl', 'wb') as f:\n", + " with open(f\"data/{dataset}_metadata.pkl\", \"wb\") as f:\n", " pickle.dump(metadata, f) # storing the metadata for downstream analysis" ] } diff --git a/notebooks/map_evaluation_comparison.ipynb b/notebooks/map_evaluation_comparison.ipynb index 689fb7a..1691ccc 100644 --- a/notebooks/map_evaluation_comparison.ipynb +++ b/notebooks/map_evaluation_comparison.ipynb @@ -37,43 +37,40 @@ "import pickle\n", "import seaborn as sns\n", "from sklearn.metrics.pairwise import cosine_similarity\n", - "import pandas as pd\n", - "import numpy as np\n", - "from tqdm import tqdm\n", - "from itertools import combinations\n", - "from sklearn.cluster import AgglomerativeClustering\n", "import numpy as np\n", "\n", - "res_folder = 'data'\n", - "expression_data_folder = '../efaar_benchmarking/expression_data'\n", + "res_folder = \"data\"\n", + "expression_data_folder = \"../efaar_benchmarking/expression_data\"\n", "\n", "pert_label_cols = {\n", - " 'GWPS': 'gene',\n", - " 'cpg0016': 'Metadata_Symbol',\n", - " 'cpg0021': 'Metadata_Foci_Barcode_MatchedTo_GeneCode',\n", + " \"GWPS\": \"gene\",\n", + " \"cpg0016\": \"Metadata_Symbol\",\n", + " \"cpg0021\": \"Metadata_Foci_Barcode_MatchedTo_GeneCode\",\n", "}\n", "\n", "control_cols = {\n", - " 'GWPS': ['non-targeting'],\n", - " 'cpg0016': ['non-targeting', 'no-guide'],\n", - " 'cpg0021': ['nontargeting', 'negCtrl'],\n", + " \"GWPS\": [\"non-targeting\"],\n", + " \"cpg0016\": [\"non-targeting\", \"no-guide\"],\n", + " \"cpg0021\": [\"nontargeting\", \"negCtrl\"],\n", "}\n", "\n", + "\n", "def sig_level(x):\n", " if np.isnan(x):\n", - " return '-'\n", + " return \"-\"\n", " elif x < 1e-5:\n", - " return '*****'\n", + " return \"*****\"\n", " elif x < 1e-4:\n", - " return '****'\n", + " return \"****\"\n", " elif x < 1e-3:\n", - " return '***'\n", + " return \"***\"\n", " elif x < 1e-2:\n", - " return '**'\n", + " return \"**\"\n", " elif x < 0.05:\n", - " return '*'\n", + " return \"*\"\n", " else:\n", - " return 'ns'\n", + " return \"ns\"\n", + "\n", "\n", "def plot_clustermap(maps, dataset, genes, save_path=None):\n", " pheno_data = maps[dataset]\n", @@ -86,10 +83,19 @@ " cosi = pd.DataFrame(cosi, index=feat_genes.index, columns=feat_genes.index)\n", " sns.set_theme(font_scale=1)\n", " v = np.triu(cosi[genes].loc[genes])\n", - " sns.clustermap(pd.DataFrame(v, columns=genes, index=genes), cmap='RdBu_r', vmin=-1, vmax=1, figsize=(5,5),\n", - " row_cluster=False, col_cluster=False, cbar_pos=(.1, .1, .005, .18))\n", + " sns.clustermap(\n", + " pd.DataFrame(v, columns=genes, index=genes),\n", + " cmap=\"RdBu_r\",\n", + " vmin=-1,\n", + " vmax=1,\n", + " figsize=(5, 5),\n", + " row_cluster=False,\n", + " col_cluster=False,\n", + " cbar_pos=(0.1, 0.1, 0.005, 0.18),\n", + " )\n", " plt.savefig(save_path)\n", "\n", + "\n", "def plot_asym_clustermap(maps, datasets, genes, red_genes=[], save_path=None, cluster=False):\n", " genes2plot = genes\n", " for i, dataset in enumerate(datasets):\n", @@ -107,39 +113,62 @@ " sns.set_theme(font_scale=2)\n", " if i == 0:\n", " if cluster:\n", - " cg = sns.clustermap(cosi, cmap='RdBu_r', vmin=-1, vmax=1, yticklabels=True) # yticklabels=True because we use that info later for ordering\n", + " cg = sns.clustermap(\n", + " cosi, cmap=\"RdBu_r\", vmin=-1, vmax=1, yticklabels=True\n", + " ) # yticklabels=True because we use that info later for ordering\n", " ordering = list(cosi.index[cg.dendrogram_row.reordered_ind])\n", " else:\n", - " cg = sns.clustermap(cosi, cmap='RdBu_r', vmin=-1, vmax=1, figsize=(30,30), row_cluster=False, col_cluster=False, cbar_pos=(.1, .1, .005, .18))\n", + " cg = sns.clustermap(\n", + " cosi,\n", + " cmap=\"RdBu_r\",\n", + " vmin=-1,\n", + " vmax=1,\n", + " figsize=(30, 30),\n", + " row_cluster=False,\n", + " col_cluster=False,\n", + " cbar_pos=(0.1, 0.1, 0.005, 0.18),\n", + " )\n", " ordering = list(cosi.index)\n", " plt.clf()\n", " u = np.tril(cosi[ordering].loc[ordering], k=-1)\n", " else:\n", " tmp = cosi[ordering].loc[ordering]\n", " v = np.triu(tmp)\n", - " lbls = list(tmp.index) # needed when cluster=False and each gene in `genes` can be in multiple clusters\n", - " cg = sns.clustermap(pd.DataFrame(u+v, columns=lbls, index=lbls), cmap='RdBu_r', vmin=-1, vmax=1, figsize=(30,30), row_cluster=False, col_cluster=False, cbar_pos=(.1, .1, .005, .18))\n", + " lbls = list(tmp.index) # needed when cluster=False and each gene in `genes` can be in multiple clusters\n", + " cg = sns.clustermap(\n", + " pd.DataFrame(u + v, columns=lbls, index=lbls),\n", + " cmap=\"RdBu_r\",\n", + " vmin=-1,\n", + " vmax=1,\n", + " figsize=(30, 30),\n", + " row_cluster=False,\n", + " col_cluster=False,\n", + " cbar_pos=(0.1, 0.1, 0.005, 0.18),\n", + " )\n", " if len(red_genes) > 0:\n", " for l in cg.ax_heatmap.yaxis.get_ticklabels():\n", " if l.get_text() in red_genes:\n", - " l.set_color('red')\n", + " l.set_color(\"red\")\n", " cg.cax.set_visible(False)\n", " if save_path is not None:\n", - " plt.savefig(save_path, bbox_inches='tight', pad_inches=0.01)\n", + " plt.savefig(save_path, bbox_inches=\"tight\", pad_inches=0.01)\n", " plt.show()\n", "\n", + "\n", "def plot_heatmaps(all_pvals, datasets_to_plot, overlap_thr=0):\n", " df_plotting = all_pvals.copy()\n", - " df_plotting = df_plotting.dropna(subset=[f'genes_{datasets_to_plot[0]}', f'genes_{datasets_to_plot[1]}'])\n", - " df_plotting['overlap_plotting'] = df_plotting.apply(\n", - " lambda row: sorted(set(row[f'genes_{datasets_to_plot[0]}']).intersection(row[f'genes_{datasets_to_plot[1]}'])), axis=1)\n", + " df_plotting = df_plotting.dropna(subset=[f\"genes_{datasets_to_plot[0]}\", f\"genes_{datasets_to_plot[1]}\"])\n", + " df_plotting[\"overlap_plotting\"] = df_plotting.apply(\n", + " lambda row: sorted(set(row[f\"genes_{datasets_to_plot[0]}\"]).intersection(row[f\"genes_{datasets_to_plot[1]}\"])),\n", + " axis=1,\n", + " )\n", " clusters = {}\n", - " for c,genes in df_plotting[['cluster', 'overlap_plotting']].itertuples(index=False):\n", + " for c, genes in df_plotting[[\"cluster\", \"overlap_plotting\"]].itertuples(index=False):\n", " clusters[c] = genes\n", " clusters = dict(sorted(clusters.items(), key=lambda item: len(item[1])))\n", " clus_to_save = {}\n", " for i, (c, clus) in enumerate(clusters.items()):\n", - " if i==0:\n", + " if i == 0:\n", " clus_to_save[c] = clus\n", " else:\n", " for _, clus2 in clus_to_save.items():\n", @@ -171,10 +200,10 @@ "metadatas = {}\n", "maps = {}\n", "\n", - "for dataset in ['GWPS', 'cpg0016', 'cpg0021']:\n", - " with open(f'data/{dataset}_map_cache.pkl', 'rb') as f:\n", + "for dataset in [\"GWPS\", \"cpg0016\", \"cpg0021\"]:\n", + " with open(f\"data/{dataset}_map_cache.pkl\", \"rb\") as f:\n", " maps[dataset] = pickle.load(f)\n", - " with open(f'{res_folder}/{dataset}_metadata.pkl', 'rb') as f:\n", + " with open(f\"{res_folder}/{dataset}_metadata.pkl\", \"rb\") as f:\n", " metadatas[dataset] = pickle.load(f)" ] }, @@ -211,31 +240,40 @@ } ], "source": [ - "gwps_all_genes = set(metadatas['GWPS'][pert_label_cols['GWPS']])\n", - "print('GWPS gene count is', len(gwps_all_genes))\n", - "print('GWPS expressed gene count is', len(gwps_all_genes)) ## all genes are expressed in GWPS\n", - "print('GWPS sample size is', len(metadatas['GWPS']))\n", - "print('GWPS sample size excluding controls is', len(metadatas['GWPS'][~metadatas['GWPS'][pert_label_cols['GWPS']].isin(control_cols['GWPS'])]))\n", + "gwps_all_genes = set(metadatas[\"GWPS\"][pert_label_cols[\"GWPS\"]])\n", + "print(\"GWPS gene count is\", len(gwps_all_genes))\n", + "print(\"GWPS expressed gene count is\", len(gwps_all_genes)) ## all genes are expressed in GWPS\n", + "print(\"GWPS sample size is\", len(metadatas[\"GWPS\"]))\n", + "print(\n", + " \"GWPS sample size excluding controls is\",\n", + " len(metadatas[\"GWPS\"][~metadatas[\"GWPS\"][pert_label_cols[\"GWPS\"]].isin(control_cols[\"GWPS\"])]),\n", + ")\n", "\n", - "cpg16_all_genes = set(metadatas['cpg0016'][pert_label_cols['cpg0016']])\n", - "print('cpg0016', len(cpg16_all_genes))\n", - "expr = pd.read_csv(f'{expression_data_folder}/U2OS_expression.csv', index_col=0)\n", - "expr = expr.groupby('gene').zfpkm.agg('median').reset_index()\n", + "cpg16_all_genes = set(metadatas[\"cpg0016\"][pert_label_cols[\"cpg0016\"]])\n", + "print(\"cpg0016\", len(cpg16_all_genes))\n", + "expr = pd.read_csv(f\"{expression_data_folder}/U2OS_expression.csv\", index_col=0)\n", + "expr = expr.groupby(\"gene\").zfpkm.agg(\"median\").reset_index()\n", "jump_exp_genes = cpg16_all_genes.intersection(expr[expr.zfpkm >= -3].gene)\n", - "print('cpg0016 gene count is', len(cpg16_all_genes))\n", - "print('cpg0016 expressed gene count is', len(jump_exp_genes))\n", - "print('cpg0016 sample size is', len(metadatas['cpg0016']))\n", - "print('cpg0016 sample size excluding controls is', len(metadatas['cpg0016'][~metadatas['cpg0016'][pert_label_cols['cpg0016']].isin(control_cols['cpg0016'])]))\n", + "print(\"cpg0016 gene count is\", len(cpg16_all_genes))\n", + "print(\"cpg0016 expressed gene count is\", len(jump_exp_genes))\n", + "print(\"cpg0016 sample size is\", len(metadatas[\"cpg0016\"]))\n", + "print(\n", + " \"cpg0016 sample size excluding controls is\",\n", + " len(metadatas[\"cpg0016\"][~metadatas[\"cpg0016\"][pert_label_cols[\"cpg0016\"]].isin(control_cols[\"cpg0016\"])]),\n", + ")\n", "\n", - "cpg21_all_genes = set(metadatas['cpg0021'][pert_label_cols['cpg0021']])\n", - "expr = pd.read_csv(f'{expression_data_folder}/HeLa_expression.csv')\n", - "expr.columns = ['gene', 'tpm']\n", - "expr.gene = expr.gene.apply(lambda x: x.split(' ')[0])\n", + "cpg21_all_genes = set(metadatas[\"cpg0021\"][pert_label_cols[\"cpg0021\"]])\n", + "expr = pd.read_csv(f\"{expression_data_folder}/HeLa_expression.csv\")\n", + "expr.columns = [\"gene\", \"tpm\"]\n", + "expr.gene = expr.gene.apply(lambda x: x.split(\" \")[0])\n", "periscope_exp_genes = cpg21_all_genes.intersection(expr[expr.tpm != 0].gene)\n", - "print('cpg0021 gene count is', len(cpg21_all_genes))\n", - "print('cpg0021 expressed gene count is', len(periscope_exp_genes))\n", - "print('cpg0021 sample size is', len(metadatas['cpg0021']))\n", - "print('cpg0021 sample size excluding controls is', len(metadatas['cpg0021'][~metadatas['cpg0021'][pert_label_cols['cpg0021']].isin(control_cols['cpg0021'])]))" + "print(\"cpg0021 gene count is\", len(cpg21_all_genes))\n", + "print(\"cpg0021 expressed gene count is\", len(periscope_exp_genes))\n", + "print(\"cpg0021 sample size is\", len(metadatas[\"cpg0021\"]))\n", + "print(\n", + " \"cpg0021 sample size excluding controls is\",\n", + " len(metadatas[\"cpg0021\"][~metadatas[\"cpg0021\"][pert_label_cols[\"cpg0021\"]].isin(control_cols[\"cpg0021\"])]),\n", + ")" ] }, { @@ -273,15 +311,15 @@ "source": [ "benchmark_sources = {}\n", "for src in BENCHMARK_SOURCES:\n", - " res = pd.read_csv(f'../efaar_benchmarking/benchmark_annotations/{src}.txt')\n", + " res = pd.read_csv(f\"../efaar_benchmarking/benchmark_annotations/{src}.txt\")\n", " res = res[res.entity1 != res.entity2]\n", - " res['sorted_entities'] = res.apply(lambda row: tuple(sorted([row['entity1'], row['entity2']])), axis=1)\n", + " res[\"sorted_entities\"] = res.apply(lambda row: tuple(sorted([row[\"entity1\"], row[\"entity2\"]])), axis=1)\n", " benchmark_sources[src] = set(res.sorted_entities)\n", "\n", "upset_data = from_contents(benchmark_sources)\n", - "upset = UpSet(upset_data, sort_by='cardinality', sort_categories_by='-input')\n", + "upset = UpSet(upset_data, sort_by=\"cardinality\", sort_categories_by=\"-input\")\n", "upset.plot()\n", - "plt.yscale('log')\n", + "plt.yscale(\"log\")\n", "plt.show()\n", "\n", "# Count the union of value sets\n", @@ -300,9 +338,9 @@ "# Count the number of elements that are included in all sources\n", "all_sources_count = sum(all(element in source for source in benchmark_sources.values()) for element in union_set)\n", "\n", - "print('Union count:', union_count)\n", - "print('At least two count:', at_least_two_count)\n", - "print('All sources count:', all_sources_count)" + "print(\"Union count:\", union_count)\n", + "print(\"At least two count:\", at_least_two_count)\n", + "print(\"All sources count:\", all_sources_count)" ] }, { @@ -341,14 +379,14 @@ "source": [ "features, _ = load_cpg16_crispr()\n", "\n", - "features['Cytoplasm_Number_Object_Number'].hist(bins=100)\n", - "plt.xlabel('Number of cytoplasm objects')\n", - "plt.ylabel('Frequency')\n", + "features[\"Cytoplasm_Number_Object_Number\"].hist(bins=100)\n", + "plt.xlabel(\"Number of cytoplasm objects\")\n", + "plt.ylabel(\"Frequency\")\n", "plt.show()\n", "\n", - "features['Nuclei_Number_Object_Number'].hist(bins=100)\n", - "plt.xlabel('Number of nuclei objects')\n", - "plt.ylabel('Frequency')\n", + "features[\"Nuclei_Number_Object_Number\"].hist(bins=100)\n", + "plt.xlabel(\"Number of nuclei objects\")\n", + "plt.ylabel(\"Frequency\")\n", "plt.show()" ] }, @@ -388,15 +426,15 @@ } ], "source": [ - "datasets = ['GWPS', 'cpg0016', 'cpg0021']\n", + "datasets = [\"GWPS\", \"cpg0016\", \"cpg0021\"]\n", "metdict = {dataset: cluster_benchmark(maps[dataset], pert_label_cols[dataset]) for dataset in datasets}\n", - "thr = .01\n", + "thr = 0.01\n", "sigs = {}\n", "for k, df in metdict.items():\n", " sigs[k] = set(df[df.ks_pval <= thr].cluster)\n", "\n", "plt.figure(figsize=(8, 8))\n", - "venn3([sigs['GWPS'], sigs['cpg0016'], sigs['cpg0021']], set_labels=['GWPS', 'cpg0016', 'cpg0021'])\n", + "venn3([sigs[\"GWPS\"], sigs[\"cpg0016\"], sigs[\"cpg0021\"]], set_labels=[\"GWPS\", \"cpg0016\", \"cpg0021\"])\n", "plt.show()" ] }, @@ -437,13 +475,24 @@ "inter_set = set.intersection(*sigs.values())\n", "all_pvals_inter = None\n", "for m in datasets:\n", - " tmp = metdict[m].loc[metdict[m].cluster.isin(inter_set)].sort_values('ks_pval')[['cluster', 'ks_pval', 'genes']].rename(columns={'ks_pval': f'pval_{m}', 'genes': f'genes_{m}'})\n", + " tmp = (\n", + " metdict[m]\n", + " .loc[metdict[m].cluster.isin(inter_set)]\n", + " .sort_values(\"ks_pval\")[[\"cluster\", \"ks_pval\", \"genes\"]]\n", + " .rename(columns={\"ks_pval\": f\"pval_{m}\", \"genes\": f\"genes_{m}\"})\n", + " )\n", " if all_pvals_inter is None:\n", " all_pvals_inter = tmp\n", " else:\n", - " all_pvals_inter = all_pvals_inter.merge(tmp, on='cluster', how='outer')\n", - "all_pvals_inter = all_pvals_inter.sort_values('cluster')\n", - "repres = all_pvals_inter[[x for x in all_pvals_inter.columns if 'genes' not in x]].copy().set_index('cluster').map(sig_level).rename(columns={f'pval_{x}': x for x in datasets})\n", + " all_pvals_inter = all_pvals_inter.merge(tmp, on=\"cluster\", how=\"outer\")\n", + "all_pvals_inter = all_pvals_inter.sort_values(\"cluster\")\n", + "repres = (\n", + " all_pvals_inter[[x for x in all_pvals_inter.columns if \"genes\" not in x]]\n", + " .copy()\n", + " .set_index(\"cluster\")\n", + " .map(sig_level)\n", + " .rename(columns={f\"pval_{x}\": x for x in datasets})\n", + ")\n", "print(repres)" ] }, @@ -480,7 +529,7 @@ } ], "source": [ - "plot_heatmaps(all_pvals_inter, ['GWPS', 'cpg0016'])" + "plot_heatmaps(all_pvals_inter, [\"GWPS\", \"cpg0016\"])" ] }, { @@ -576,13 +625,24 @@ " diff_set = sigs[z].difference(union_sigs)\n", " all_pvals_exlusive_z = None\n", " for m in datasets:\n", - " tmp = metdict[m].loc[metdict[m].cluster.isin(diff_set)].sort_values('ks_pval')[['cluster', 'ks_pval', 'genes']].rename(columns={'ks_pval': f'pval_{m}', 'genes': f'genes_{m}'})\n", + " tmp = (\n", + " metdict[m]\n", + " .loc[metdict[m].cluster.isin(diff_set)]\n", + " .sort_values(\"ks_pval\")[[\"cluster\", \"ks_pval\", \"genes\"]]\n", + " .rename(columns={\"ks_pval\": f\"pval_{m}\", \"genes\": f\"genes_{m}\"})\n", + " )\n", " if all_pvals_exlusive_z is None:\n", " all_pvals_exlusive_z = tmp\n", " else:\n", - " all_pvals_exlusive_z = all_pvals_exlusive_z.merge(tmp, on='cluster', how='outer')\n", - " all_pvals_exlusive_z = all_pvals_exlusive_z.sort_values('cluster')\n", - " repres = all_pvals_exlusive_z[[x for x in all_pvals_exlusive_z.columns if 'genes' not in x]].copy().set_index('cluster').map(sig_level).rename(columns={f'pval_{x}': x for x in datasets})\n", + " all_pvals_exlusive_z = all_pvals_exlusive_z.merge(tmp, on=\"cluster\", how=\"outer\")\n", + " all_pvals_exlusive_z = all_pvals_exlusive_z.sort_values(\"cluster\")\n", + " repres = (\n", + " all_pvals_exlusive_z[[x for x in all_pvals_exlusive_z.columns if \"genes\" not in x]]\n", + " .copy()\n", + " .set_index(\"cluster\")\n", + " .map(sig_level)\n", + " .rename(columns={f\"pval_{x}\": x for x in datasets})\n", + " )\n", " print(repres)" ] }, @@ -665,16 +725,20 @@ } ], "source": [ - "dataset = 'GWPS'\n", + "dataset = \"GWPS\"\n", "src = \"GO\"\n", "topx = 25\n", - "enr_p_lim = .01\n", - "for gene in ['C18orf21', 'C1orf131']:\n", + "enr_p_lim = 0.01\n", + "for gene in [\"C18orf21\", \"C1orf131\"]:\n", " print(gene)\n", " res = compute_top_similars(maps[dataset], pert_label_cols[dataset], gene, topx=topx)\n", - " print(', '.join(sorted(res.pert)))\n", - " print(enrichment(res.pert, list(maps[dataset].metadata[pert_label_cols[dataset]]), source=src, pval_thr=enr_p_lim)[['cluster', 'pval']])\n", - " print('\\n')" + " print(\", \".join(sorted(res.pert)))\n", + " print(\n", + " enrichment(res.pert, list(maps[dataset].metadata[pert_label_cols[dataset]]), source=src, pval_thr=enr_p_lim)[\n", + " [\"cluster\", \"pval\"]\n", + " ]\n", + " )\n", + " print(\"\\n\")" ] } ], diff --git a/pyproject.toml b/pyproject.toml index 95c6508..14eab8f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,23 +46,22 @@ dependencies = [ [project.optional-dependencies] dev = [ - "bandit[toml]", - "black", - "coverage", - "docstr-coverage", - "flake8", - "isort", - "mypy", - "pre-commit", - "pytest", - "pytest-cov", - "pyupgrade", - "radon", - "types-pkg_resources", - "types-setuptools", - "tox", - "validate-pyproject[all]", - "jupyterlab", + 'bandit[toml]', + 'black', + 'coverage', + 'docstr-coverage', + 'flake8', + 'isort', + 'mypy', + 'pre-commit', + 'pytest', + 'pytest-cov', + 'pyupgrade', + 'radon', + 'types-setuptools', + 'tox', + 'validate-pyproject[all]', + 'jupyterlab', ] diff --git a/requirements/dev_3.11.txt b/requirements/dev_3.11.txt index e9fade8..2013233 100644 --- a/requirements/dev_3.11.txt +++ b/requirements/dev_3.11.txt @@ -1,33 +1,37 @@ -absl-py==2.0.0 +absl-py==2.1.0 # via # chex # ml-collections # optax # orbax-checkpoint -aiobotocore==2.13.1 +aiobotocore==2.15.1 # via s3fs -aiohttp==3.9.2 +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.10.8 # via # aiobotocore # fsspec # s3fs -aioitertools==0.11.0 +aioitertools==0.12.0 # via aiobotocore aiosignal==1.3.1 # via aiohttp -anndata==0.10.3 +anndata==0.10.9 # via # mudata # scvi-tools -anyio==4.2.0 - # via jupyter-server -appnope==0.1.3 +anyio==4.6.0 + # via + # httpx + # jupyter-server +appnope==0.1.4 # via ipykernel argon2-cffi==23.1.0 # via jupyter-server argon2-cffi-bindings==21.2.0 # via argon2-cffi -array-api-compat==1.4 +array-api-compat==1.8 # via anndata arrow==1.3.0 # via isoduration @@ -35,28 +39,31 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -attrs==23.1.0 +attrs==24.2.0 # via # aiohttp # jsonschema # referencing -babel==2.14.0 +babel==2.16.0 # via jupyterlab-server -bandit[toml]==1.7.6 - # via efaar_benchmarking (pyproject.toml) -beautifulsoup4==4.12.2 +bandit==1.7.10 + # via efaar-benchmarking (pyproject.toml) +beautifulsoup4==4.12.3 # via nbconvert -black==23.12.1 - # via efaar_benchmarking (pyproject.toml) +black==24.8.0 + # via efaar-benchmarking (pyproject.toml) bleach==6.1.0 # via nbconvert -botocore==1.34.131 +botocore==1.35.23 # via aiobotocore -cachetools==5.3.2 +cachetools==5.5.0 # via tox -certifi==2023.11.17 - # via requests -cffi==1.16.0 +certifi==2024.8.30 + # via + # httpcore + # httpx + # requests +cffi==1.17.1 # via argon2-cffi-bindings cfgv==3.4.0 # via pre-commit @@ -64,7 +71,7 @@ chardet==5.2.0 # via tox charset-normalizer==3.3.2 # via requests -chex==0.1.8 +chex==0.1.87 # via optax click==8.1.7 # via @@ -74,19 +81,19 @@ colorama==0.4.6 # via # radon # tox -comm==0.2.0 +comm==0.2.2 # via ipykernel contextlib2==21.6.0 # via ml-collections -contourpy==1.2.0 +contourpy==1.3.0 # via matplotlib -coverage[toml]==7.4.0 +coverage==7.6.1 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # pytest-cov cycler==0.12.1 # via matplotlib -debugpy==1.8.0 +debugpy==1.8.6 # via ipykernel decorator==5.1.1 # via ipython @@ -94,30 +101,30 @@ defusedxml==0.7.1 # via nbconvert distlib==0.3.8 # via virtualenv -dm-tree==0.1.8 - # via chex docrep==0.3.2 # via scvi-tools -docstr-coverage==2.3.0 - # via efaar_benchmarking (pyproject.toml) -etils[epath,epy]==1.6.0 - # via orbax-checkpoint -executing==2.0.1 +docstr-coverage==2.3.2 + # via efaar-benchmarking (pyproject.toml) +etils==1.9.4 + # via + # optax + # orbax-checkpoint +executing==2.1.0 # via stack-data -fastjsonschema==2.19.1 +fastjsonschema==2.20.0 # via # nbformat # validate-pyproject -filelock==3.13.1 +filelock==3.16.1 # via # torch # tox # virtualenv -flake8==6.1.0 - # via efaar_benchmarking (pyproject.toml) -flax==0.7.5 +flake8==7.1.1 + # via efaar-benchmarking (pyproject.toml) +flax==0.9.0 # via scvi-tools -fonttools==4.47.0 +fonttools==4.54.1 # via matplotlib fqdn==1.5.1 # via jsonschema @@ -125,7 +132,7 @@ frozenlist==1.4.1 # via # aiohttp # aiosignal -fsspec[http]==2023.12.2 +fsspec==2024.9.0 # via # etils # lightning @@ -133,37 +140,41 @@ fsspec[http]==2023.12.2 # s3fs # torch geomloss==0.2.6 - # via efaar_benchmarking (pyproject.toml) -gitdb==4.0.11 - # via gitpython -gitpython==3.1.41 - # via bandit -h5py==3.10.0 + # via efaar-benchmarking (pyproject.toml) +h11==0.14.0 + # via httpcore +h5py==3.12.1 # via # anndata - # mudata # scvi-tools -identify==2.5.33 +httpcore==1.0.6 + # via httpx +httpx==0.27.2 + # via jupyterlab +humanize==4.10.0 + # via orbax-checkpoint +identify==2.6.1 # via pre-commit -idna==3.6 +idna==3.10 # via # anyio + # httpx # jsonschema # requests # yarl -importlib-resources==6.1.1 +importlib-resources==6.4.5 # via etils iniconfig==2.0.0 # via pytest -ipykernel==6.28.0 +ipykernel==6.29.5 # via jupyterlab -ipython==8.19.0 +ipython==8.28.0 # via ipykernel isoduration==20.11.0 # via jsonschema isort==5.13.2 - # via efaar_benchmarking (pyproject.toml) -jax==0.4.23 + # via efaar-benchmarking (pyproject.toml) +jax==0.4.33 # via # chex # flax @@ -171,16 +182,17 @@ jax==0.4.23 # optax # orbax-checkpoint # scvi-tools -jaxlib==0.4.23 +jaxlib==0.4.33 # via # chex + # jax # numpyro # optax # orbax-checkpoint # scvi-tools jedi==0.19.1 # via ipython -jinja2==3.1.3 +jinja2==3.1.4 # via # jupyter-server # jupyterlab @@ -189,27 +201,27 @@ jinja2==3.1.3 # torch jmespath==1.0.1 # via botocore -joblib==1.3.2 +joblib==1.4.2 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # scikit-learn -json5==0.9.14 +json5==0.9.25 # via jupyterlab-server -jsonpointer==2.4 +jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.20.0 +jsonschema==4.23.0 # via # jupyter-events # jupyterlab-server # nbformat jsonschema-specifications==2023.12.1 # via jsonschema -jupyter-client==8.6.0 +jupyter-client==8.6.3 # via # ipykernel # jupyter-server # nbclient -jupyter-core==5.6.0 +jupyter-core==5.7.2 # via # ipykernel # jupyter-client @@ -218,29 +230,29 @@ jupyter-core==5.6.0 # nbclient # nbconvert # nbformat -jupyter-events==0.9.0 +jupyter-events==0.10.0 # via jupyter-server -jupyter-lsp==2.2.2 +jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.12.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab # jupyterlab-server # notebook-shim -jupyter-server-terminals==0.5.1 +jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.0.11 - # via efaar_benchmarking (pyproject.toml) +jupyterlab==4.2.5 + # via efaar-benchmarking (pyproject.toml) jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.25.2 +jupyterlab-server==2.27.3 # via jupyterlab -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib -lightning==2.1.3 +lightning==2.1.4 # via scvi-tools -lightning-utilities==0.10.0 +lightning-utilities==0.11.7 # via # lightning # pytorch-lightning @@ -249,21 +261,21 @@ mando==0.7.1 # via radon markdown-it-py==3.0.0 # via rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via # jinja2 # nbconvert -matplotlib==3.8.2 +matplotlib==3.9.2 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # matplotlib-venn # seaborn -matplotlib-inline==0.1.6 +matplotlib-inline==0.1.7 # via # ipykernel # ipython -matplotlib-venn==0.11.10 - # via efaar_benchmarking (pyproject.toml) +matplotlib-venn==1.1.1 + # via efaar-benchmarking (pyproject.toml) mccabe==0.7.0 # via flake8 mdurl==0.1.2 @@ -272,58 +284,58 @@ mistune==3.0.2 # via nbconvert ml-collections==0.1.1 # via scvi-tools -ml-dtypes==0.3.1 +ml-dtypes==0.5.0 # via # jax # jaxlib # tensorstore mpmath==1.3.0 # via sympy -msgpack==1.0.7 +msgpack==1.1.0 # via # flax # orbax-checkpoint -mudata==0.2.3 +mudata==0.3.1 # via scvi-tools -multidict==6.0.4 +multidict==6.1.0 # via # aiohttp # yarl multipledispatch==1.0.0 # via numpyro -mypy==1.8.0 - # via efaar_benchmarking (pyproject.toml) +mypy==1.11.2 + # via efaar-benchmarking (pyproject.toml) mypy-extensions==1.0.0 # via # black # mypy natsort==8.4.0 # via anndata -nbclient==0.9.0 +nbclient==0.10.0 # via nbconvert -nbconvert==7.13.1 +nbconvert==7.16.4 # via jupyter-server -nbformat==5.9.2 +nbformat==5.10.4 # via # jupyter-server # nbclient # nbconvert -nest-asyncio==1.5.8 +nest-asyncio==1.6.0 # via # ipykernel # orbax-checkpoint -networkx==3.2.1 +networkx==3.3 # via torch -nodeenv==1.8.0 +nodeenv==1.9.1 # via pre-commit -notebook-shim==0.2.3 +notebook-shim==0.2.4 # via jupyterlab -numpy==1.26.2 +numpy==2.1.1 # via + # efaar-benchmarking (pyproject.toml) # anndata # chex # contourpy - # efaar_benchmarking (pyproject.toml) # flax # geomloss # h5py @@ -333,35 +345,32 @@ numpy==1.26.2 # matplotlib # matplotlib-venn # ml-dtypes - # mudata # numpyro - # opt-einsum # optax # orbax-checkpoint # pandas # pyro-ppl - # pytorch-lightning # scikit-learn # scipy # scvi-tools # seaborn # tensorstore # torchmetrics -numpyro==0.13.2 +numpyro==0.15.3 # via scvi-tools -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # jax # pyro-ppl -optax==0.1.7 +optax==0.2.3 # via # flax # scvi-tools -orbax-checkpoint==0.4.8 +orbax-checkpoint==0.6.4 # via flax -overrides==7.4.0 +overrides==7.7.0 # via jupyter-server -packaging==23.2 +packaging==24.1 # via # anndata # black @@ -380,79 +389,78 @@ packaging==23.2 # torchmetrics # tox # validate-pyproject -pandas==2.1.4 +pandas==2.2.3 # via + # efaar-benchmarking (pyproject.toml) # anndata - # efaar_benchmarking (pyproject.toml) - # mudata # scvi-tools # seaborn -pandocfilters==1.5.0 +pandocfilters==1.5.1 # via nbconvert -parso==0.8.3 +parso==0.8.4 # via jedi pathspec==0.12.1 # via black -pbr==6.0.0 +pbr==6.1.0 # via stevedore pexpect==4.9.0 # via ipython -pillow==10.3.0 +pillow==10.4.0 # via matplotlib -platformdirs==4.1.0 +platformdirs==4.3.6 # via # black # jupyter-core # tox # virtualenv -plotly==5.22.0 +plotly==5.24.1 # via upsetplotly -pluggy==1.3.0 +pluggy==1.5.0 # via # pytest # tox -pre-commit==3.6.0 - # via efaar_benchmarking (pyproject.toml) -prometheus-client==0.19.0 +pre-commit==3.8.0 + # via efaar-benchmarking (pyproject.toml) +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.43 +prompt-toolkit==3.0.48 # via ipython -protobuf==4.25.1 +protobuf==5.28.2 # via orbax-checkpoint -psutil==5.9.7 +psutil==6.0.0 # via ipykernel ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pycodestyle==2.11.1 +pycodestyle==2.12.1 # via flake8 -pycparser==2.21 +pycparser==2.22 # via cffi -pyflakes==3.1.0 +pyflakes==3.2.0 # via flake8 -pygments==2.17.2 +pygments==2.18.0 # via # ipython # nbconvert # rich -pyparsing==3.1.1 +pyparsing==3.1.4 # via matplotlib -pyproject-api==1.6.1 +pyproject-api==1.8.0 # via tox pyro-api==0.1.2 # via pyro-ppl -pyro-ppl==1.8.6 +pyro-ppl==1.9.1 # via scvi-tools -pytest==7.4.3 +pytest==8.3.3 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # pytest-cov -pytest-cov==4.1.0 - # via efaar_benchmarking (pyproject.toml) -python-dateutil==2.8.2 +pytest-cov==5.0.0 + # via efaar-benchmarking (pyproject.toml) +python-dateutil==2.9.0.post0 # via # arrow # botocore @@ -461,13 +469,13 @@ python-dateutil==2.8.2 # pandas python-json-logger==2.0.7 # via jupyter-events -pytorch-lightning==2.1.3 +pytorch-lightning==2.4.0 # via lightning -pytz==2023.3.post1 +pytz==2024.2 # via pandas -pyupgrade==3.15.0 - # via efaar_benchmarking (pyproject.toml) -pyyaml==6.0.1 +pyupgrade==3.17.0 + # via efaar-benchmarking (pyproject.toml) +pyyaml==6.0.2 # via # bandit # docstr-coverage @@ -478,22 +486,20 @@ pyyaml==6.0.1 # orbax-checkpoint # pre-commit # pytorch-lightning -pyzmq==25.1.2 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-server radon==6.0.1 - # via efaar_benchmarking (pyproject.toml) -referencing==0.32.0 + # via efaar-benchmarking (pyproject.toml) +referencing==0.35.1 # via # jsonschema # jsonschema-specifications # jupyter-events -requests==2.31.0 - # via - # fsspec - # jupyterlab-server +requests==2.32.3 + # via jupyterlab-server rfc3339-validator==0.1.4 # via # jsonschema @@ -502,40 +508,40 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.0 +rich==13.9.1 # via # bandit # flax # scvi-tools -rpds-py==0.16.2 +rpds-py==0.20.0 # via # jsonschema # referencing -s3fs==2023.12.2 - # via efaar_benchmarking (pyproject.toml) -scikit-learn==1.3.2 +s3fs==2024.9.0 + # via efaar-benchmarking (pyproject.toml) +scikit-learn==1.5.2 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # scvi-tools -scipy==1.11.4 +scipy==1.14.1 # via + # efaar-benchmarking (pyproject.toml) # anndata - # efaar_benchmarking (pyproject.toml) # jax # jaxlib # matplotlib-venn # scikit-learn # scvi-tools scvi-tools==1.1.2 - # via efaar_benchmarking (pyproject.toml) -seaborn==0.12.2 - # via efaar_benchmarking (pyproject.toml) -send2trash==1.8.2 + # via efaar-benchmarking (pyproject.toml) +seaborn==0.13.2 + # via efaar-benchmarking (pyproject.toml) +send2trash==1.8.3 # via jupyter-server -setuptools==70.0.0 +setuptools==75.1.0 # via + # jupyterlab # lightning-utilities - # nodeenv six==1.16.0 # via # asttokens @@ -545,60 +551,60 @@ six==1.16.0 # ml-collections # python-dateutil # rfc3339-validator -smmap==5.0.1 - # via gitdb -sniffio==1.3.0 - # via anyio -soupsieve==2.5 +sniffio==1.3.1 + # via + # anyio + # httpx +soupsieve==2.6 # via beautifulsoup4 stack-data==0.6.3 # via ipython -stevedore==5.1.0 +stevedore==5.3.0 # via bandit -sympy==1.12 +sympy==1.13.3 # via torch -tenacity==8.3.0 +tenacity==9.0.0 # via plotly -tensorstore==0.1.51 +tensorstore==0.1.66 # via # flax # orbax-checkpoint -terminado==0.18.0 +terminado==0.18.1 # via # jupyter-server # jupyter-server-terminals -threadpoolctl==3.2.0 +threadpoolctl==3.5.0 # via scikit-learn -tinycss2==1.2.1 +tinycss2==1.3.0 # via nbconvert -tokenize-rt==5.2.0 +tokenize-rt==6.0.0 # via pyupgrade -toolz==0.12.0 +toolz==0.12.1 # via chex -torch==2.1.2 +torch==2.4.1 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # geomloss # lightning # pyro-ppl # pytorch-lightning # scvi-tools # torchmetrics -torchmetrics==1.2.1 +torchmetrics==1.4.2 # via # lightning # pytorch-lightning # scvi-tools -tornado==6.4 +tornado==6.4.1 # via # ipykernel # jupyter-client # jupyter-server # jupyterlab # terminado -tox==4.11.4 - # via efaar_benchmarking (pyproject.toml) -tqdm==4.63.1 +tox==4.21.0 + # via efaar-benchmarking (pyproject.toml) +tqdm==4.66.5 # via # docstr-coverage # lightning @@ -607,7 +613,7 @@ tqdm==4.63.1 # pytorch-lightning # scvi-tools # upsetplotly -traitlets==5.14.0 +traitlets==5.14.3 # via # comm # ipykernel @@ -621,56 +627,55 @@ traitlets==5.14.0 # nbclient # nbconvert # nbformat -trove-classifiers==2023.11.29 +trove-classifiers==2024.9.12 # via validate-pyproject -types-pkg-resources==0.1.3 - # via efaar_benchmarking (pyproject.toml) -types-python-dateutil==2.8.19.14 +types-python-dateutil==2.9.0.20240906 # via arrow -types-setuptools==69.0.0.0 - # via efaar_benchmarking (pyproject.toml) -typing-extensions==4.9.0 +types-setuptools==75.1.0.20240917 + # via efaar-benchmarking (pyproject.toml) +typing-extensions==4.12.2 # via # chex # etils # flax + # ipython # lightning # lightning-utilities # mypy # orbax-checkpoint # pytorch-lightning # torch -tzdata==2023.4 +tzdata==2024.2 # via pandas upsetplotly==0.1.7 - # via efaar_benchmarking (pyproject.toml) + # via efaar-benchmarking (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.0.7 +urllib3==2.2.3 # via # botocore # requests -validate-pyproject[all]==0.15 - # via efaar_benchmarking (pyproject.toml) -virtualenv==20.25.0 +validate-pyproject==0.20.2 + # via efaar-benchmarking (pyproject.toml) +virtualenv==20.26.6 # via # pre-commit # tox -wcwidth==0.2.12 +wcwidth==0.2.13 # via prompt-toolkit -webcolors==1.13 +webcolors==24.8.0 # via jsonschema webencodings==0.5.1 # via # bleach # tinycss2 -websocket-client==1.7.0 +websocket-client==1.8.0 # via jupyter-server wget==3.2 - # via efaar_benchmarking (pyproject.toml) + # via efaar-benchmarking (pyproject.toml) wrapt==1.16.0 # via aiobotocore -yarl==1.9.4 +yarl==1.13.1 # via aiohttp -zipp==3.17.0 +zipp==3.20.2 # via etils diff --git a/requirements/dev_3.12.txt b/requirements/dev_3.12.txt new file mode 100644 index 0000000..2454d68 --- /dev/null +++ b/requirements/dev_3.12.txt @@ -0,0 +1,682 @@ +absl-py==2.1.0 + # via + # chex + # ml-collections + # optax + # orbax-checkpoint +aiobotocore==2.15.1 + # via s3fs +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.10.8 + # via + # aiobotocore + # fsspec + # s3fs +aioitertools==0.12.0 + # via aiobotocore +aiosignal==1.3.1 + # via aiohttp +anndata==0.10.9 + # via + # mudata + # scvi-tools +anyio==4.6.0 + # via + # httpx + # jupyter-server +appnope==0.1.4 + # via ipykernel +argon2-cffi==23.1.0 + # via jupyter-server +argon2-cffi-bindings==21.2.0 + # via argon2-cffi +array-api-compat==1.8 + # via anndata +arrow==1.3.0 + # via isoduration +asttokens==2.4.1 + # via stack-data +async-lru==2.0.4 + # via jupyterlab +attrs==24.2.0 + # via + # aiohttp + # jsonschema + # referencing +babel==2.16.0 + # via jupyterlab-server +bandit==1.7.10 + # via efaar-benchmarking (pyproject.toml) +beautifulsoup4==4.12.3 + # via nbconvert +black==24.8.0 + # via efaar-benchmarking (pyproject.toml) +bleach==6.1.0 + # via nbconvert +botocore==1.35.23 + # via aiobotocore +cachetools==5.5.0 + # via tox +certifi==2024.8.30 + # via + # httpcore + # httpx + # requests +cffi==1.17.1 + # via argon2-cffi-bindings +cfgv==3.4.0 + # via pre-commit +chardet==5.2.0 + # via tox +charset-normalizer==3.3.2 + # via requests +chex==0.1.87 + # via optax +click==8.1.7 + # via + # black + # docstr-coverage +colorama==0.4.6 + # via + # radon + # tox +comm==0.2.2 + # via ipykernel +contextlib2==21.6.0 + # via ml-collections +contourpy==1.3.0 + # via matplotlib +coverage==7.6.1 + # via + # efaar-benchmarking (pyproject.toml) + # pytest-cov +cycler==0.12.1 + # via matplotlib +debugpy==1.8.6 + # via ipykernel +decorator==5.1.1 + # via ipython +defusedxml==0.7.1 + # via nbconvert +distlib==0.3.8 + # via virtualenv +docrep==0.3.2 + # via scvi-tools +docstr-coverage==2.3.2 + # via efaar-benchmarking (pyproject.toml) +etils==1.9.4 + # via + # optax + # orbax-checkpoint +executing==2.1.0 + # via stack-data +fastjsonschema==2.20.0 + # via + # nbformat + # validate-pyproject +filelock==3.16.1 + # via + # torch + # tox + # virtualenv +flake8==7.1.1 + # via efaar-benchmarking (pyproject.toml) +flax==0.9.0 + # via scvi-tools +fonttools==4.54.1 + # via matplotlib +fqdn==1.5.1 + # via jsonschema +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +fsspec==2024.9.0 + # via + # etils + # lightning + # pytorch-lightning + # s3fs + # torch +geomloss==0.2.6 + # via efaar-benchmarking (pyproject.toml) +h11==0.14.0 + # via httpcore +h5py==3.12.1 + # via + # anndata + # scvi-tools +httpcore==1.0.6 + # via httpx +httpx==0.27.2 + # via jupyterlab +humanize==4.10.0 + # via orbax-checkpoint +identify==2.6.1 + # via pre-commit +idna==3.10 + # via + # anyio + # httpx + # jsonschema + # requests + # yarl +importlib-resources==6.4.5 + # via etils +iniconfig==2.0.0 + # via pytest +ipykernel==6.29.5 + # via jupyterlab +ipython==8.28.0 + # via ipykernel +isoduration==20.11.0 + # via jsonschema +isort==5.13.2 + # via efaar-benchmarking (pyproject.toml) +jax==0.4.33 + # via + # chex + # flax + # numpyro + # optax + # orbax-checkpoint + # scvi-tools +jaxlib==0.4.33 + # via + # chex + # jax + # numpyro + # optax + # orbax-checkpoint + # scvi-tools +jedi==0.19.1 + # via ipython +jinja2==3.1.4 + # via + # jupyter-server + # jupyterlab + # jupyterlab-server + # nbconvert + # torch +jmespath==1.0.1 + # via botocore +joblib==1.4.2 + # via + # efaar-benchmarking (pyproject.toml) + # scikit-learn +json5==0.9.25 + # via jupyterlab-server +jsonpointer==3.0.0 + # via jsonschema +jsonschema==4.23.0 + # via + # jupyter-events + # jupyterlab-server + # nbformat +jsonschema-specifications==2023.12.1 + # via jsonschema +jupyter-client==8.6.3 + # via + # ipykernel + # jupyter-server + # nbclient +jupyter-core==5.7.2 + # via + # ipykernel + # jupyter-client + # jupyter-server + # jupyterlab + # nbclient + # nbconvert + # nbformat +jupyter-events==0.10.0 + # via jupyter-server +jupyter-lsp==2.2.5 + # via jupyterlab +jupyter-server==2.14.2 + # via + # jupyter-lsp + # jupyterlab + # jupyterlab-server + # notebook-shim +jupyter-server-terminals==0.5.3 + # via jupyter-server +jupyterlab==4.2.5 + # via efaar-benchmarking (pyproject.toml) +jupyterlab-pygments==0.3.0 + # via nbconvert +jupyterlab-server==2.27.3 + # via jupyterlab +kiwisolver==1.4.7 + # via matplotlib +lightning==2.1.4 + # via scvi-tools +lightning-utilities==0.11.7 + # via + # lightning + # pytorch-lightning + # torchmetrics +mando==0.7.1 + # via radon +markdown-it-py==3.0.0 + # via rich +markupsafe==2.1.5 + # via + # jinja2 + # nbconvert +matplotlib==3.9.2 + # via + # efaar-benchmarking (pyproject.toml) + # matplotlib-venn + # seaborn +matplotlib-inline==0.1.7 + # via + # ipykernel + # ipython +matplotlib-venn==1.1.1 + # via efaar-benchmarking (pyproject.toml) +mccabe==0.7.0 + # via flake8 +mdurl==0.1.2 + # via markdown-it-py +mistune==3.0.2 + # via nbconvert +ml-collections==0.1.1 + # via scvi-tools +ml-dtypes==0.5.0 + # via + # jax + # jaxlib + # tensorstore +mpmath==1.3.0 + # via sympy +msgpack==1.1.0 + # via + # flax + # orbax-checkpoint +mudata==0.3.1 + # via scvi-tools +multidict==6.1.0 + # via + # aiohttp + # yarl +multipledispatch==1.0.0 + # via numpyro +mypy==1.11.2 + # via efaar-benchmarking (pyproject.toml) +mypy-extensions==1.0.0 + # via + # black + # mypy +natsort==8.4.0 + # via anndata +nbclient==0.10.0 + # via nbconvert +nbconvert==7.16.4 + # via jupyter-server +nbformat==5.10.4 + # via + # jupyter-server + # nbclient + # nbconvert +nest-asyncio==1.6.0 + # via + # ipykernel + # orbax-checkpoint +networkx==3.3 + # via torch +nodeenv==1.9.1 + # via pre-commit +notebook-shim==0.2.4 + # via jupyterlab +numpy==2.1.1 + # via + # efaar-benchmarking (pyproject.toml) + # anndata + # chex + # contourpy + # flax + # geomloss + # h5py + # jax + # jaxlib + # lightning + # matplotlib + # matplotlib-venn + # ml-dtypes + # numpyro + # optax + # orbax-checkpoint + # pandas + # pyro-ppl + # scikit-learn + # scipy + # scvi-tools + # seaborn + # tensorstore + # torchmetrics +numpyro==0.15.3 + # via scvi-tools +opt-einsum==3.4.0 + # via + # jax + # pyro-ppl +optax==0.2.3 + # via + # flax + # scvi-tools +orbax-checkpoint==0.6.4 + # via flax +overrides==7.7.0 + # via jupyter-server +packaging==24.1 + # via + # anndata + # black + # ipykernel + # jupyter-server + # jupyterlab + # jupyterlab-server + # lightning + # lightning-utilities + # matplotlib + # nbconvert + # plotly + # pyproject-api + # pytest + # pytorch-lightning + # torchmetrics + # tox + # validate-pyproject +pandas==2.2.3 + # via + # efaar-benchmarking (pyproject.toml) + # anndata + # scvi-tools + # seaborn +pandocfilters==1.5.1 + # via nbconvert +parso==0.8.4 + # via jedi +pathspec==0.12.1 + # via black +pbr==6.1.0 + # via stevedore +pexpect==4.9.0 + # via ipython +pillow==10.4.0 + # via matplotlib +platformdirs==4.3.6 + # via + # black + # jupyter-core + # tox + # virtualenv +plotly==5.24.1 + # via upsetplotly +pluggy==1.5.0 + # via + # pytest + # tox +pre-commit==3.8.0 + # via efaar-benchmarking (pyproject.toml) +prometheus-client==0.21.0 + # via jupyter-server +prompt-toolkit==3.0.48 + # via ipython +protobuf==5.28.2 + # via orbax-checkpoint +psutil==6.0.0 + # via ipykernel +ptyprocess==0.7.0 + # via + # pexpect + # terminado +pure-eval==0.2.3 + # via stack-data +pycodestyle==2.12.1 + # via flake8 +pycparser==2.22 + # via cffi +pyflakes==3.2.0 + # via flake8 +pygments==2.18.0 + # via + # ipython + # nbconvert + # rich +pyparsing==3.1.4 + # via matplotlib +pyproject-api==1.8.0 + # via tox +pyro-api==0.1.2 + # via pyro-ppl +pyro-ppl==1.9.1 + # via scvi-tools +pytest==8.3.3 + # via + # efaar-benchmarking (pyproject.toml) + # pytest-cov +pytest-cov==5.0.0 + # via efaar-benchmarking (pyproject.toml) +python-dateutil==2.9.0.post0 + # via + # arrow + # botocore + # jupyter-client + # matplotlib + # pandas +python-json-logger==2.0.7 + # via jupyter-events +pytorch-lightning==2.4.0 + # via lightning +pytz==2024.2 + # via pandas +pyupgrade==3.17.0 + # via efaar-benchmarking (pyproject.toml) +pyyaml==6.0.2 + # via + # bandit + # docstr-coverage + # flax + # jupyter-events + # lightning + # ml-collections + # orbax-checkpoint + # pre-commit + # pytorch-lightning +pyzmq==26.2.0 + # via + # ipykernel + # jupyter-client + # jupyter-server +radon==6.0.1 + # via efaar-benchmarking (pyproject.toml) +referencing==0.35.1 + # via + # jsonschema + # jsonschema-specifications + # jupyter-events +requests==2.32.3 + # via jupyterlab-server +rfc3339-validator==0.1.4 + # via + # jsonschema + # jupyter-events +rfc3986-validator==0.1.1 + # via + # jsonschema + # jupyter-events +rich==13.9.1 + # via + # bandit + # flax + # scvi-tools +rpds-py==0.20.0 + # via + # jsonschema + # referencing +s3fs==2024.9.0 + # via efaar-benchmarking (pyproject.toml) +scikit-learn==1.5.2 + # via + # efaar-benchmarking (pyproject.toml) + # scvi-tools +scipy==1.14.1 + # via + # efaar-benchmarking (pyproject.toml) + # anndata + # jax + # jaxlib + # matplotlib-venn + # scikit-learn + # scvi-tools +scvi-tools==1.1.2 + # via efaar-benchmarking (pyproject.toml) +seaborn==0.13.2 + # via efaar-benchmarking (pyproject.toml) +send2trash==1.8.3 + # via jupyter-server +setuptools==75.1.0 + # via + # chex + # jupyterlab + # lightning-utilities + # torch +six==1.16.0 + # via + # asttokens + # bleach + # docrep + # mando + # ml-collections + # python-dateutil + # rfc3339-validator +sniffio==1.3.1 + # via + # anyio + # httpx +soupsieve==2.6 + # via beautifulsoup4 +stack-data==0.6.3 + # via ipython +stevedore==5.3.0 + # via bandit +sympy==1.13.3 + # via torch +tenacity==9.0.0 + # via plotly +tensorstore==0.1.66 + # via + # flax + # orbax-checkpoint +terminado==0.18.1 + # via + # jupyter-server + # jupyter-server-terminals +threadpoolctl==3.5.0 + # via scikit-learn +tinycss2==1.3.0 + # via nbconvert +tokenize-rt==6.0.0 + # via pyupgrade +toolz==0.12.1 + # via chex +torch==2.4.1 + # via + # efaar-benchmarking (pyproject.toml) + # geomloss + # lightning + # pyro-ppl + # pytorch-lightning + # scvi-tools + # torchmetrics +torchmetrics==1.4.2 + # via + # lightning + # pytorch-lightning + # scvi-tools +tornado==6.4.1 + # via + # ipykernel + # jupyter-client + # jupyter-server + # jupyterlab + # terminado +tox==4.21.0 + # via efaar-benchmarking (pyproject.toml) +tqdm==4.66.5 + # via + # docstr-coverage + # lightning + # numpyro + # pyro-ppl + # pytorch-lightning + # scvi-tools + # upsetplotly +traitlets==5.14.3 + # via + # comm + # ipykernel + # ipython + # jupyter-client + # jupyter-core + # jupyter-events + # jupyter-server + # jupyterlab + # matplotlib-inline + # nbclient + # nbconvert + # nbformat +trove-classifiers==2024.9.12 + # via validate-pyproject +types-python-dateutil==2.9.0.20240906 + # via arrow +types-setuptools==75.1.0.20240917 + # via efaar-benchmarking (pyproject.toml) +typing-extensions==4.12.2 + # via + # chex + # etils + # flax + # lightning + # lightning-utilities + # mypy + # orbax-checkpoint + # pytorch-lightning + # torch +tzdata==2024.2 + # via pandas +upsetplotly==0.1.7 + # via efaar-benchmarking (pyproject.toml) +uri-template==1.3.0 + # via jsonschema +urllib3==2.2.3 + # via + # botocore + # requests +validate-pyproject==0.20.2 + # via efaar-benchmarking (pyproject.toml) +virtualenv==20.26.6 + # via + # pre-commit + # tox +wcwidth==0.2.13 + # via prompt-toolkit +webcolors==24.8.0 + # via jsonschema +webencodings==0.5.1 + # via + # bleach + # tinycss2 +websocket-client==1.8.0 + # via jupyter-server +wget==3.2 + # via efaar-benchmarking (pyproject.toml) +wrapt==1.16.0 + # via aiobotocore +yarl==1.13.1 + # via aiohttp +zipp==3.20.2 + # via etils diff --git a/requirements/main_3.11.txt b/requirements/main_3.11.txt index f9fd178..65f068c 100644 --- a/requirements/main_3.11.txt +++ b/requirements/main_3.11.txt @@ -1,59 +1,57 @@ -absl-py==2.0.0 +absl-py==2.1.0 # via # chex # ml-collections # optax # orbax-checkpoint -aiobotocore==2.13.1 +aiobotocore==2.15.1 # via s3fs -aiohttp==3.9.2 +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.10.8 # via # aiobotocore # fsspec # s3fs -aioitertools==0.11.0 +aioitertools==0.12.0 # via aiobotocore aiosignal==1.3.1 # via aiohttp -anndata==0.10.3 +anndata==0.10.9 # via # mudata # scvi-tools -array-api-compat==1.4 +array-api-compat==1.8 # via anndata -attrs==23.1.0 +attrs==24.2.0 # via aiohttp -botocore==1.34.131 +botocore==1.35.23 # via aiobotocore -certifi==2023.11.17 - # via requests -charset-normalizer==3.3.2 - # via requests -chex==0.1.8 +chex==0.1.87 # via optax contextlib2==21.6.0 # via ml-collections -contourpy==1.2.0 +contourpy==1.3.0 # via matplotlib cycler==0.12.1 # via matplotlib -dm-tree==0.1.8 - # via chex docrep==0.3.2 # via scvi-tools -etils[epath,epy]==1.6.0 - # via orbax-checkpoint -filelock==3.13.1 +etils==1.9.4 + # via + # optax + # orbax-checkpoint +filelock==3.16.1 # via torch -flax==0.7.5 +flax==0.9.0 # via scvi-tools -fonttools==4.47.0 +fonttools==4.54.1 # via matplotlib frozenlist==1.4.1 # via # aiohttp # aiosignal -fsspec[http]==2023.12.2 +fsspec==2024.9.0 # via # etils # lightning @@ -61,19 +59,18 @@ fsspec[http]==2023.12.2 # s3fs # torch geomloss==0.2.6 - # via efaar_benchmarking (pyproject.toml) -h5py==3.10.0 + # via efaar-benchmarking (pyproject.toml) +h5py==3.12.1 # via # anndata - # mudata # scvi-tools -idna==3.6 - # via - # requests - # yarl -importlib-resources==6.1.1 +humanize==4.10.0 + # via orbax-checkpoint +idna==3.10 + # via yarl +importlib-resources==6.4.5 # via etils -jax==0.4.23 +jax==0.4.33 # via # chex # flax @@ -81,59 +78,60 @@ jax==0.4.23 # optax # orbax-checkpoint # scvi-tools -jaxlib==0.4.23 +jaxlib==0.4.33 # via # chex + # jax # numpyro # optax # orbax-checkpoint # scvi-tools -jinja2==3.1.3 +jinja2==3.1.4 # via torch jmespath==1.0.1 # via botocore -joblib==1.3.2 +joblib==1.4.2 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # scikit-learn -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib -lightning==2.1.3 +lightning==2.1.4 # via scvi-tools -lightning-utilities==0.10.0 +lightning-utilities==0.11.7 # via # lightning # pytorch-lightning # torchmetrics markdown-it-py==3.0.0 # via rich -markupsafe==2.1.3 +markupsafe==2.1.5 # via jinja2 -matplotlib==3.8.2 +matplotlib==3.9.2 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # matplotlib-venn # seaborn -matplotlib-venn==0.11.10 - # via efaar_benchmarking (pyproject.toml) +matplotlib-venn==1.1.1 + # via efaar-benchmarking (pyproject.toml) mdurl==0.1.2 # via markdown-it-py ml-collections==0.1.1 # via scvi-tools -ml-dtypes==0.3.1 +ml-dtypes==0.5.0 # via # jax # jaxlib # tensorstore mpmath==1.3.0 # via sympy -msgpack==1.0.7 +msgpack==1.1.0 # via # flax # orbax-checkpoint -mudata==0.2.3 +mudata==0.3.1 # via scvi-tools -multidict==6.0.4 +multidict==6.1.0 # via # aiohttp # yarl @@ -141,16 +139,16 @@ multipledispatch==1.0.0 # via numpyro natsort==8.4.0 # via anndata -nest-asyncio==1.5.8 +nest-asyncio==1.6.0 # via orbax-checkpoint -networkx==3.2.1 +networkx==3.3 # via torch -numpy==1.26.2 +numpy==2.1.1 # via + # efaar-benchmarking (pyproject.toml) # anndata # chex # contourpy - # efaar_benchmarking (pyproject.toml) # flax # geomloss # h5py @@ -160,33 +158,30 @@ numpy==1.26.2 # matplotlib # matplotlib-venn # ml-dtypes - # mudata # numpyro - # opt-einsum # optax # orbax-checkpoint # pandas # pyro-ppl - # pytorch-lightning # scikit-learn # scipy # scvi-tools # seaborn # tensorstore # torchmetrics -numpyro==0.13.2 +numpyro==0.15.3 # via scvi-tools -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # jax # pyro-ppl -optax==0.1.7 +optax==0.2.3 # via # flax # scvi-tools -orbax-checkpoint==0.4.8 +orbax-checkpoint==0.6.4 # via flax -packaging==23.2 +packaging==24.1 # via # anndata # lightning @@ -195,102 +190,99 @@ packaging==23.2 # plotly # pytorch-lightning # torchmetrics -pandas==2.1.4 +pandas==2.2.3 # via + # efaar-benchmarking (pyproject.toml) # anndata - # efaar_benchmarking (pyproject.toml) - # mudata # scvi-tools # seaborn -pillow==10.3.0 +pillow==10.4.0 # via matplotlib -plotly==5.22.0 +plotly==5.24.1 # via upsetplotly -protobuf==4.25.1 +protobuf==5.28.2 # via orbax-checkpoint -pygments==2.17.2 +pygments==2.18.0 # via rich -pyparsing==3.1.1 +pyparsing==3.1.4 # via matplotlib pyro-api==0.1.2 # via pyro-ppl -pyro-ppl==1.8.6 +pyro-ppl==1.9.1 # via scvi-tools -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # botocore # matplotlib # pandas -pytorch-lightning==2.1.3 +pytorch-lightning==2.4.0 # via lightning -pytz==2023.3.post1 +pytz==2024.2 # via pandas -pyyaml==6.0.1 +pyyaml==6.0.2 # via # flax # lightning # ml-collections # orbax-checkpoint # pytorch-lightning -requests==2.31.0 - # via fsspec -rich==13.7.0 +rich==13.9.1 # via # flax # scvi-tools -s3fs==2023.12.2 - # via efaar_benchmarking (pyproject.toml) -scikit-learn==1.3.2 +s3fs==2024.9.0 + # via efaar-benchmarking (pyproject.toml) +scikit-learn==1.5.2 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # scvi-tools -scipy==1.11.4 +scipy==1.14.1 # via + # efaar-benchmarking (pyproject.toml) # anndata - # efaar_benchmarking (pyproject.toml) # jax # jaxlib # matplotlib-venn # scikit-learn # scvi-tools scvi-tools==1.1.2 - # via efaar_benchmarking (pyproject.toml) -seaborn==0.12.2 - # via efaar_benchmarking (pyproject.toml) -setuptools==70.0.0 + # via efaar-benchmarking (pyproject.toml) +seaborn==0.13.2 + # via efaar-benchmarking (pyproject.toml) +setuptools==75.1.0 # via lightning-utilities six==1.16.0 # via # docrep # ml-collections # python-dateutil -sympy==1.12 +sympy==1.13.3 # via torch -tenacity==8.3.0 +tenacity==9.0.0 # via plotly -tensorstore==0.1.51 +tensorstore==0.1.66 # via # flax # orbax-checkpoint -threadpoolctl==3.2.0 +threadpoolctl==3.5.0 # via scikit-learn -toolz==0.12.0 +toolz==0.12.1 # via chex -torch==2.1.2 +torch==2.4.1 # via - # efaar_benchmarking (pyproject.toml) + # efaar-benchmarking (pyproject.toml) # geomloss # lightning # pyro-ppl # pytorch-lightning # scvi-tools # torchmetrics -torchmetrics==1.2.1 +torchmetrics==1.4.2 # via # lightning # pytorch-lightning # scvi-tools -tqdm==4.66.1 +tqdm==4.66.5 # via # lightning # numpyro @@ -298,7 +290,7 @@ tqdm==4.66.1 # pytorch-lightning # scvi-tools # upsetplotly -typing-extensions==4.9.0 +typing-extensions==4.12.2 # via # chex # etils @@ -308,19 +300,17 @@ typing-extensions==4.9.0 # orbax-checkpoint # pytorch-lightning # torch -tzdata==2023.4 +tzdata==2024.2 # via pandas upsetplotly==0.1.7 - # via efaar_benchmarking (pyproject.toml) -urllib3==2.0.7 - # via - # botocore - # requests + # via efaar-benchmarking (pyproject.toml) +urllib3==2.2.3 + # via botocore wget==3.2 - # via efaar_benchmarking (pyproject.toml) + # via efaar-benchmarking (pyproject.toml) wrapt==1.16.0 # via aiobotocore -yarl==1.9.4 +yarl==1.13.1 # via aiohttp -zipp==3.17.0 +zipp==3.20.2 # via etils diff --git a/requirements/main_3.12.txt b/requirements/main_3.12.txt new file mode 100644 index 0000000..1d2667e --- /dev/null +++ b/requirements/main_3.12.txt @@ -0,0 +1,319 @@ +absl-py==2.1.0 + # via + # chex + # ml-collections + # optax + # orbax-checkpoint +aiobotocore==2.15.1 + # via s3fs +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.10.8 + # via + # aiobotocore + # fsspec + # s3fs +aioitertools==0.12.0 + # via aiobotocore +aiosignal==1.3.1 + # via aiohttp +anndata==0.10.9 + # via + # mudata + # scvi-tools +array-api-compat==1.8 + # via anndata +attrs==24.2.0 + # via aiohttp +botocore==1.35.23 + # via aiobotocore +chex==0.1.87 + # via optax +contextlib2==21.6.0 + # via ml-collections +contourpy==1.3.0 + # via matplotlib +cycler==0.12.1 + # via matplotlib +docrep==0.3.2 + # via scvi-tools +etils==1.9.4 + # via + # optax + # orbax-checkpoint +filelock==3.16.1 + # via torch +flax==0.9.0 + # via scvi-tools +fonttools==4.54.1 + # via matplotlib +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +fsspec==2024.9.0 + # via + # etils + # lightning + # pytorch-lightning + # s3fs + # torch +geomloss==0.2.6 + # via efaar-benchmarking (pyproject.toml) +h5py==3.12.1 + # via + # anndata + # scvi-tools +humanize==4.10.0 + # via orbax-checkpoint +idna==3.10 + # via yarl +importlib-resources==6.4.5 + # via etils +jax==0.4.33 + # via + # chex + # flax + # numpyro + # optax + # orbax-checkpoint + # scvi-tools +jaxlib==0.4.33 + # via + # chex + # jax + # numpyro + # optax + # orbax-checkpoint + # scvi-tools +jinja2==3.1.4 + # via torch +jmespath==1.0.1 + # via botocore +joblib==1.4.2 + # via + # efaar-benchmarking (pyproject.toml) + # scikit-learn +kiwisolver==1.4.7 + # via matplotlib +lightning==2.1.4 + # via scvi-tools +lightning-utilities==0.11.7 + # via + # lightning + # pytorch-lightning + # torchmetrics +markdown-it-py==3.0.0 + # via rich +markupsafe==2.1.5 + # via jinja2 +matplotlib==3.9.2 + # via + # efaar-benchmarking (pyproject.toml) + # matplotlib-venn + # seaborn +matplotlib-venn==1.1.1 + # via efaar-benchmarking (pyproject.toml) +mdurl==0.1.2 + # via markdown-it-py +ml-collections==0.1.1 + # via scvi-tools +ml-dtypes==0.5.0 + # via + # jax + # jaxlib + # tensorstore +mpmath==1.3.0 + # via sympy +msgpack==1.1.0 + # via + # flax + # orbax-checkpoint +mudata==0.3.1 + # via scvi-tools +multidict==6.1.0 + # via + # aiohttp + # yarl +multipledispatch==1.0.0 + # via numpyro +natsort==8.4.0 + # via anndata +nest-asyncio==1.6.0 + # via orbax-checkpoint +networkx==3.3 + # via torch +numpy==2.1.1 + # via + # efaar-benchmarking (pyproject.toml) + # anndata + # chex + # contourpy + # flax + # geomloss + # h5py + # jax + # jaxlib + # lightning + # matplotlib + # matplotlib-venn + # ml-dtypes + # numpyro + # optax + # orbax-checkpoint + # pandas + # pyro-ppl + # scikit-learn + # scipy + # scvi-tools + # seaborn + # tensorstore + # torchmetrics +numpyro==0.15.3 + # via scvi-tools +opt-einsum==3.4.0 + # via + # jax + # pyro-ppl +optax==0.2.3 + # via + # flax + # scvi-tools +orbax-checkpoint==0.6.4 + # via flax +packaging==24.1 + # via + # anndata + # lightning + # lightning-utilities + # matplotlib + # plotly + # pytorch-lightning + # torchmetrics +pandas==2.2.3 + # via + # efaar-benchmarking (pyproject.toml) + # anndata + # scvi-tools + # seaborn +pillow==10.4.0 + # via matplotlib +plotly==5.24.1 + # via upsetplotly +protobuf==5.28.2 + # via orbax-checkpoint +pygments==2.18.0 + # via rich +pyparsing==3.1.4 + # via matplotlib +pyro-api==0.1.2 + # via pyro-ppl +pyro-ppl==1.9.1 + # via scvi-tools +python-dateutil==2.9.0.post0 + # via + # botocore + # matplotlib + # pandas +pytorch-lightning==2.4.0 + # via lightning +pytz==2024.2 + # via pandas +pyyaml==6.0.2 + # via + # flax + # lightning + # ml-collections + # orbax-checkpoint + # pytorch-lightning +rich==13.9.1 + # via + # flax + # scvi-tools +s3fs==2024.9.0 + # via efaar-benchmarking (pyproject.toml) +scikit-learn==1.5.2 + # via + # efaar-benchmarking (pyproject.toml) + # scvi-tools +scipy==1.14.1 + # via + # efaar-benchmarking (pyproject.toml) + # anndata + # jax + # jaxlib + # matplotlib-venn + # scikit-learn + # scvi-tools +scvi-tools==1.1.2 + # via efaar-benchmarking (pyproject.toml) +seaborn==0.13.2 + # via efaar-benchmarking (pyproject.toml) +setuptools==75.1.0 + # via + # chex + # lightning-utilities + # torch +six==1.16.0 + # via + # docrep + # ml-collections + # python-dateutil +sympy==1.13.3 + # via torch +tenacity==9.0.0 + # via plotly +tensorstore==0.1.66 + # via + # flax + # orbax-checkpoint +threadpoolctl==3.5.0 + # via scikit-learn +toolz==0.12.1 + # via chex +torch==2.4.1 + # via + # efaar-benchmarking (pyproject.toml) + # geomloss + # lightning + # pyro-ppl + # pytorch-lightning + # scvi-tools + # torchmetrics +torchmetrics==1.4.2 + # via + # lightning + # pytorch-lightning + # scvi-tools +tqdm==4.66.5 + # via + # lightning + # numpyro + # pyro-ppl + # pytorch-lightning + # scvi-tools + # upsetplotly +typing-extensions==4.12.2 + # via + # chex + # etils + # flax + # lightning + # lightning-utilities + # orbax-checkpoint + # pytorch-lightning + # torch +tzdata==2024.2 + # via pandas +upsetplotly==0.1.7 + # via efaar-benchmarking (pyproject.toml) +urllib3==2.2.3 + # via botocore +wget==3.2 + # via efaar-benchmarking (pyproject.toml) +wrapt==1.16.0 + # via aiobotocore +yarl==1.13.1 + # via aiohttp +zipp==3.20.2 + # via etils