diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1480220..2725815 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,6 +6,6 @@ repos: # - id: end-of-file-fixer # - id: trailing-whitespace - repo: https://github.com/psf/black - rev: 19.3b0 + rev: 23.3.0 hooks: - id: black diff --git a/examples/crabnet_performance.py b/examples/crabnet_performance.py index 54edd42..c3819e8 100644 --- a/examples/crabnet_performance.py +++ b/examples/crabnet_performance.py @@ -4,6 +4,7 @@ # TODO: incorporate CrabNet uncertainty into search """ from crabnet.crabnet_ import CrabNet +import pandas as pd # %% imports from tqdm import tqdm @@ -82,7 +83,7 @@ idx = perf_val_df.pred.idxmax() # idx = np.where(val_pred == max(val_pred))[0][0] move_row = perf_val_df.loc[idx] - perf_train_df.append(move_row) + perf_train_df = pd.concat((perf_train_df, move_row)) perf_val_df = perf_val_df.drop(index=idx) next_experiments.append(move_row.to_dict()) experiment = ad_experiments_metrics( diff --git a/mat_discover/adaptive_design.py b/mat_discover/adaptive_design.py index 6b68ee8..7a2b14c 100644 --- a/mat_discover/adaptive_design.py +++ b/mat_discover/adaptive_design.py @@ -150,7 +150,7 @@ def suggest_next_experiment( # append compound to train, remove from val, and reset indices # https://stackoverflow.com/a/12204428/13697228 move_row = self.val_df[self.val_df.index == next_index] - self.train_df = self.train_df.append(move_row) + self.train_df = pd.concat((self.train_df, move_row)) self.val_df = self.val_df[self.val_df.index != next_index] # self.val_df = self.val_df.drop(index=next_index) diff --git a/mat_discover/mat_discover_.py b/mat_discover/mat_discover_.py index 0b9f705..ec28d81 100644 --- a/mat_discover/mat_discover_.py +++ b/mat_discover/mat_discover_.py @@ -813,8 +813,8 @@ def predict( f"self.val_rad_neigh_avg` and `self.val_k_neigh_avg` are being assigned the same values as `val_dens` for compatibility reasons since a non-DiSCoVeR novelty learner was specified: {self.novelty_learner}." ) # composition-based featurization - X_train: Union[pd.DataFrame, np.ndarray, List] = [] - X_val: Union[pd.DataFrame, np.ndarray, List] = [] + X_train: Union[np.ndarray, List] = [] + X_val: Union[np.ndarray, List] = [] assert self.train_inputs is not None if self.novelty_prop == "mod_petti": assert isinstance(X_train, list) diff --git a/mat_discover/utils/extraordinary.py b/mat_discover/utils/extraordinary.py index 3fae521..602acfb 100644 --- a/mat_discover/utils/extraordinary.py +++ b/mat_discover/utils/extraordinary.py @@ -17,7 +17,7 @@ def extraordinary_split( ): # set aside high-performing candidates if val_df is not None: - train_val_df = train_df.append(val_df) + train_val_df = pd.concat((train_df, val_df)) else: train_val_df = train_df @@ -31,7 +31,7 @@ def extraordinary_split( train_df, val_df = train_test_split( train_val_df, train_size=train_size, random_state=random_state ) - val_df = val_df.append(extraordinary_df) + val_df = pd.concat((val_df, extraordinary_df)) return train_df, val_df, extraordinary_thresh diff --git a/pyproject.toml b/pyproject.toml index e3bc043..322a35e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ dependencies = [ "umap-learn", "dill", "crabnet >=2.0.5,<3.0.0", + "torch <2", "chem_wasserstein >=1.0.8,<2.0.0", "composition_based_feature_vector", "matbench_genmetrics >= 0.6.1",