diff --git a/.buildinfo b/.buildinfo index fddc69866..926c8c978 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: b5d0ff58eedd87b93894b95d734e1b60 +config: ac9e52e0f6ce67401b12e91af06590f6 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt b/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt deleted file mode 100644 index 03fc8a1d9..000000000 --- a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt +++ /dev/null @@ -1,224 +0,0 @@ -2024-05-17 04:21:19 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem -2024-05-17 04:21:20 (WARNING): Detected old config, converting to new format. Consider updating to avoid potential incompatibilities. -2024-05-17 04:21:20 (INFO): amp: true -cmd: - checkpoint_dir: fine-tuning/checkpoints/2024-05-17-04-22-24-ft-oxides - commit: bcb3cf5 - identifier: ft-oxides - logs_dir: fine-tuning/logs/tensorboard/2024-05-17-04-22-24-ft-oxides - print_every: 10 - results_dir: fine-tuning/results/2024-05-17-04-22-24-ft-oxides - seed: 0 - timestamp_id: 2024-05-17-04-22-24-ft-oxides - version: 1.0.0 -dataset: - a2g_args: - r_energy: true - r_forces: true - format: ase_db - key_mapping: - force: forces - y: energy - src: train.db -eval_metrics: - metrics: - energy: - - mae - forces: - - forcesx_mae - - forcesy_mae - - forcesz_mae - - mae - - cosine_similarity - - magnitude_error - misc: - - energy_forces_within_threshold - primary_metric: forces_mae -gpus: 0 -logger: tensorboard -loss_fns: -- energy: - coefficient: 1 - fn: mae -- forces: - coefficient: 1 - fn: l2mae -model: gemnet_oc -model_attributes: - activation: silu - atom_edge_interaction: true - atom_interaction: true - cbf: - name: spherical_harmonics - cutoff: 12.0 - cutoff_aeaint: 12.0 - cutoff_aint: 12.0 - cutoff_qint: 12.0 - direct_forces: true - edge_atom_interaction: true - emb_size_aint_in: 64 - emb_size_aint_out: 64 - emb_size_atom: 256 - emb_size_cbf: 16 - emb_size_edge: 512 - emb_size_quad_in: 32 - emb_size_quad_out: 32 - emb_size_rbf: 16 - emb_size_sbf: 32 - emb_size_trip_in: 64 - emb_size_trip_out: 64 - envelope: - exponent: 5 - name: polynomial - extensive: true - forces_coupled: false - max_neighbors: 30 - max_neighbors_aeaint: 20 - max_neighbors_aint: 1000 - max_neighbors_qint: 8 - num_after_skip: 2 - num_atom: 3 - num_atom_emb_layers: 2 - num_before_skip: 2 - num_blocks: 4 - num_concat: 1 - num_global_out_layers: 2 - num_output_afteratom: 3 - num_radial: 128 - num_spherical: 7 - otf_graph: true - output_init: HeOrthogonal - qint_tags: - - 1 - - 2 - quad_interaction: true - rbf: - name: gaussian - regress_forces: true - sbf: - name: legendre_outer - symmetric_edge_symmetrization: false -noddp: false -optim: - batch_size: 4 - clip_grad_norm: 10 - ema_decay: 0.999 - energy_coefficient: 1 - eval_batch_size: 16 - eval_every: 10 - factor: 0.8 - force_coefficient: 1 - load_balancing: atoms - loss_energy: mae - lr_initial: 0.0005 - max_epochs: 1 - mode: min - num_workers: 2 - optimizer: AdamW - optimizer_params: - amsgrad: true - patience: 3 - scheduler: ReduceLROnPlateau - weight_decay: 0 -outputs: - energy: - level: system - forces: - eval_on_free_atoms: true - level: atom - train_on_free_atoms: false -slurm: {} -task: - dataset: ase_db -test_dataset: - a2g_args: - r_energy: false - r_forces: false - src: test.db -trainer: ocp -val_dataset: - a2g_args: - r_energy: true - r_forces: true - src: val.db - -2024-05-17 04:21:20 (INFO): Loading dataset: ase_db -2024-05-17 04:21:20 (INFO): rank: 0: Sampler created... -2024-05-17 04:21:20 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 04:21:20 (INFO): rank: 0: Sampler created... -2024-05-17 04:21:20 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 04:21:20 (INFO): rank: 0: Sampler created... -2024-05-17 04:21:20 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 04:21:20 (INFO): Loading model: gemnet_oc -2024-05-17 04:21:20 (WARNING): Unrecognized arguments: ['symmetric_edge_symmetrization'] -2024-05-17 04:21:23 (INFO): Loaded GemNetOC with 38864438 parameters. -2024-05-17 04:21:23 (WARNING): Model gradient logging to tensorboard not yet supported. -2024-05-17 04:21:23 (WARNING): Using `weight_decay` from `optim` instead of `optim.optimizer_params`.Please update your config to use `optim.optimizer_params.weight_decay`.`optim.weight_decay` will soon be deprecated. -2024-05-17 04:21:23 (INFO): Loading checkpoint from: /tmp/ocp_checkpoints/gnoc_oc22_oc20_all_s2ef.pt -2024-05-17 04:21:23 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. -/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() - storage = elem.storage()._new_shared(numel) -/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() - storage = elem.storage()._new_shared(numel) -2024-05-17 04:21:46 (INFO): energy_forces_within_threshold: 0.00e+00, energy_mae: 6.77e+00, forcesx_mae: 4.11e-02, forcesy_mae: 3.63e-02, forcesz_mae: 5.26e-02, forces_mae: 4.33e-02, forces_cosine_similarity: 8.24e-02, forces_magnitude_error: 7.42e-02, loss: 6.86e+00, lr: 5.00e-04, epoch: 1.69e-01, step: 1.00e+01 -2024-05-17 04:21:47 (INFO): Evaluating on val. - device 0: 0%| | 0/2 [00:005,xc=PBE - src: data.db -eval_metrics: - metrics: - energy: - - mae - forces: - - forcesx_mae - - forcesy_mae - - forcesz_mae - - mae - - cosine_similarity - - magnitude_error - misc: - - energy_forces_within_threshold - primary_metric: forces_mae -gpus: 0 -logger: tensorboard -loss_fns: -- energy: - coefficient: 1 - fn: mae -- forces: - coefficient: 1 - fn: l2mae -model: gemnet_t -model_attributes: - activation: silu - cbf: - name: spherical_harmonics - cutoff: 6.0 - direct_forces: true - emb_size_atom: 512 - emb_size_bil_trip: 64 - emb_size_cbf: 16 - emb_size_edge: 512 - emb_size_rbf: 16 - emb_size_trip: 64 - envelope: - exponent: 5 - name: polynomial - extensive: true - max_neighbors: 50 - num_after_skip: 2 - num_atom: 3 - num_before_skip: 1 - num_blocks: 3 - num_concat: 1 - num_radial: 128 - num_spherical: 7 - otf_graph: true - output_init: HeOrthogonal - rbf: - name: gaussian - regress_forces: true -noddp: false -optim: - batch_size: 16 - clip_grad_norm: 10 - ema_decay: 0.999 - energy_coefficient: 1 - eval_batch_size: 16 - eval_every: 5000 - force_coefficient: 1 - loss_energy: mae - loss_force: atomwisel2 - lr_gamma: 0.8 - lr_initial: 0.0005 - lr_milestones: - - 64000 - - 96000 - - 128000 - - 160000 - - 192000 - max_epochs: 80 - num_workers: 2 - optimizer: AdamW - optimizer_params: - amsgrad: true - warmup_steps: -1 -outputs: - energy: - level: system - forces: - eval_on_free_atoms: true - level: atom - train_on_free_atoms: false -slurm: {} -task: - dataset: ase_db - prediction_dtype: float32 -test_dataset: - a2g_args: - r_energy: false - r_forces: false - select_args: - selection: natoms>5,xc=PBE - src: data.db -trainer: ocp -val_dataset: null - -2024-05-17 04:26:52 (INFO): Loading dataset: ase_db -2024-05-17 04:26:52 (INFO): rank: 0: Sampler created... -2024-05-17 04:26:52 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 04:26:52 (INFO): rank: 0: Sampler created... -2024-05-17 04:26:52 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 04:26:52 (INFO): Loading model: gemnet_t -2024-05-17 04:26:54 (INFO): Loaded GemNetT with 31671825 parameters. -2024-05-17 04:26:54 (WARNING): Model gradient logging to tensorboard not yet supported. -2024-05-17 04:26:54 (INFO): Loading checkpoint from: /tmp/ocp_checkpoints/gndt_oc22_all_s2ef.pt -2024-05-17 04:26:54 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. -2024-05-17 04:26:54 (WARNING): Scale factor comment not found in model -2024-05-17 04:26:54 (INFO): Predicting on test. - device 0: 0%| | 0/3 [00:00 fairchem by FAIR Chemistry + +

fairchem by FAIR Chemistry

+#### FAIR-Chem overview + `fairchem` is the [FAIR](https://ai.meta.com/research/) Chemistry's centralized repository of all its data, models, demos, and application efforts for materials science and quantum chemistry. Collaborative projects that contribute or use the models and approaches in this repo: @@ -22,7 +25,7 @@ We re-organized and rebranded the repository in 2024 (previously the `fairchem` general usability of these models beyond catalysis, including things like direct air capture. ``` -### Datasets in `fairchem`: +#### Datasets in `fairchem`: `fairchem` provides training and evaluation code for tasks and models that take arbitrary chemical structures as input to predict energies / forces / positions / stresses, and can be used as a base scaffold for research projects. For an overview of @@ -33,7 +36,7 @@ tasks, data, and metrics, please read the documentations and respective papers: - [OC20Dense](core/datasets/oc20dense) - [OC20NEB](core/datasets/oc20neb) -### Projects and models built on `fairchem`: +#### Projects and models built on `fairchem`: - SchNet [[`arXiv`](https://arxiv.org/abs/1706.08566)] [[`code`](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/models/schnet.py)] - DimeNet++ [[`arXiv`](https://arxiv.org/abs/2011.14115)] [[`code`](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/models/dimenet_plus_plus.py)] @@ -53,7 +56,7 @@ Older model implementations that are no longer supported: - SpinConv [[`arXiv`](https://arxiv.org/abs/2106.09575)] [[`code`](https://github.com/FAIR-Chem/fairchem/blob/e7a8745eb307e8a681a1aa9d30c36e8c41e9457e/ocpmodels/models/spinconv.py)] - ForceNet [[`arXiv`](https://arxiv.org/abs/2103.01436)] [[`code`](https://github.com/FAIR-Chem/fairchem/blob/e7a8745eb307e8a681a1aa9d30c36e8c41e9457e/ocpmodels/models/forcenet.py)] -## Discussion +### Discussion For all non-codebase related questions and to keep up-to-date with the latest OCP announcements, please join the [discussion board](https://discuss.opencatalystproject.org/). @@ -61,7 +64,7 @@ announcements, please join the [discussion board](https://discuss.opencatalystpr All code-related questions and issues should be posted directly on our [issues page](https://github.com/FAIR-Chem/fairchem/issues). -## Acknowledgements +### Acknowledgements - This codebase was initially forked from [CGCNN](https://github.com/txie-93/cgcnn) by [Tian Xie](http://txie.me), but has undergone significant changes since. @@ -70,11 +73,11 @@ by [Tian Xie](http://txie.me), but has undergone significant changes since. - It was then developed as the OCP repo, and includes many contributions from the community and collaborators. - Much of the documentation was developed for various papers or as part of a comprehensive tutorial for the 2023 ACS Fall Chemistry conference. -## License +### License `fairchem` is released under the [MIT](https://github.com/FAIR-Chem/fairchem/blob/main/LICENSE.md) license. -## Citing `fairchem` +### Citing `fairchem` If you use this codebase in your work, please consider citing: diff --git a/_sources/tutorials/cattsunami_walkthrough.md b/_sources/tutorials/cattsunami_walkthrough.md index 648ab4c3b..5d70a38ff 100644 --- a/_sources/tutorials/cattsunami_walkthrough.md +++ b/_sources/tutorials/cattsunami_walkthrough.md @@ -14,6 +14,9 @@ kernelspec: # CatTSunami tutorial ```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- from fairchem.applications.cattsunami.core import Reaction from fairchem.data.oc.core import Slab, Adsorbate, Bulk, AdsorbateSlabConfig from fairchem.core.common.relaxation.ase_utils import OCPCalculator @@ -40,6 +43,9 @@ To start, we generate placements for the reactant and product species on the sur ```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # Instantiate the reaction class for the reaction of interest reaction = Reaction(reaction_str_from_db="*CH -> *C + *H", reaction_db_path=DISSOCIATION_REACTION_DB_PATH, @@ -68,6 +74,9 @@ product2_configs = AdsorbateSlabConfig(slab = slab[0], adsorbate = product2, ``` ```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # Instantiate the calculator # NOTE: If you have a GPU, use cpu = False # NOTE: Change the checkpoint path to locally downloaded files as needed @@ -87,6 +96,9 @@ There are 2 options for how to do this. You need to provide the calculator with a path to a model checkpoint file. That can be downloaded [here](../core/model_checkpoints) ```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # Relax the reactant systems reactant_energies = [] for config in reactant_configs: @@ -116,6 +128,9 @@ Here we use the class we created to handle automatic generation of NEB frames to ![dissociation_scheme](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/applications/cattsunami/tutorial/dissociation_scheme.png) ```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- af = AutoFrameDissociation( reaction = reaction, reactant_system = reactant_configs[reactant_energies.index(min(reactant_energies))], @@ -140,6 +155,9 @@ frame_sets, mapping_idxs = af.get_neb_frames(calc, Here we use the custom child class we created to run NEB relaxations using ML. The class we created allows the frame relaxations to be batched, improving efficiency. ```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- ## This will run all NEBs enumerated - to just run one, run the code cell below. # On GPU, each NEB takes an average of ~1 minute so this could take around a half hour on GPU # But much longer on CPU @@ -172,6 +190,9 @@ Here we use the custom child class we created to run NEB relaxations using ML. T ``` ```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # If you run the above cell -- dont run this one fmax = 0.05 # [eV / ang**2] delta_fmax_climb = 0.4 diff --git a/_static/logo.png b/_static/logo.png deleted file mode 100644 index ffb62c119..000000000 Binary files a/_static/logo.png and /dev/null differ diff --git a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html index 1abbaf8cf..14446143c 100644 --- a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html +++ b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html @@ -8,7 +8,7 @@ - adsorbml.2023_neurips_challenge.challenge_eval — FAIR Chemistry Documentation + adsorbml.2023_neurips_challenge.challenge_eval @@ -146,14 +146,8 @@ - - - - - - FAIR Chemistry Documentation - Home - +