Skip to content

Commit a299cf3

Browse files
authored
Merge pull request #9 from kiharalab/SWORDv2
fix bug in SWORDv2
2 parents 5b3c1f3 + 22e4b70 commit a299cf3

33 files changed

Lines changed: 12516 additions & 376 deletions

.gitignore

Lines changed: 172 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,174 @@
11
.idea/
22
.DS_Store
3+
.pixi/
4+
Predict_Result/
5+
output_half/
6+
af3_models/
7+
8+
# SWORD2
9+
*.dat
10+
*.nrg
11+
*.o
12+
13+
# Byte-compiled / optimized / DLL files
14+
__pycache__/
15+
*.py[cod]
16+
*$py.class
17+
18+
# C extensions
19+
*.so
20+
21+
# Distribution / packaging
22+
.Python
23+
build/
24+
develop-eggs/
25+
dist/
26+
downloads/
27+
eggs/
28+
.eggs/
29+
lib/
30+
lib64/
31+
parts/
32+
sdist/
33+
var/
34+
wheels/
35+
share/python-wheels/
36+
*.egg-info/
37+
.installed.cfg
38+
*.egg
39+
MANIFEST
40+
41+
# PyInstaller
42+
# Usually these files are written by a python script from a template
43+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
44+
*.manifest
45+
*.spec
46+
47+
# Installer logs
48+
pip-log.txt
49+
pip-delete-this-directory.txt
50+
51+
# Unit test / coverage reports
52+
htmlcov/
53+
.tox/
54+
.nox/
55+
.coverage
56+
.coverage.*
57+
.cache
58+
nosetests.xml
59+
coverage.xml
60+
*.cover
61+
*.py,cover
62+
.hypothesis/
63+
.pytest_cache/
64+
cover/
65+
66+
# Translations
67+
*.mo
68+
*.pot
69+
70+
# Django stuff:
71+
*.log
72+
local_settings.py
73+
db.sqlite3
74+
db.sqlite3-journal
75+
76+
# Flask stuff:
77+
instance/
78+
.webassets-cache
79+
80+
# Scrapy stuff:
81+
.scrapy
82+
83+
# Sphinx documentation
84+
docs/_build/
85+
86+
# PyBuilder
87+
.pybuilder/
88+
target/
89+
90+
# Jupyter Notebook
91+
.ipynb_checkpoints
92+
93+
# IPython
94+
profile_default/
95+
ipython_config.py
96+
97+
# pyenv
98+
# For a library or package, you might want to ignore these files since the code is
99+
# intended to run in multiple environments; otherwise, check them in:
100+
# .python-version
101+
102+
# pipenv
103+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
104+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
105+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
106+
# install all needed dependencies.
107+
#Pipfile.lock
108+
109+
# poetry
110+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
111+
# This is especially recommended for binary packages to ensure reproducibility, and is more
112+
# commonly ignored for libraries.
113+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
114+
#poetry.lock
115+
116+
# pdm
117+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
118+
#pdm.lock
119+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
120+
# in version control.
121+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
122+
.pdm.toml
123+
.pdm-python
124+
.pdm-build/
125+
126+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
127+
__pypackages__/
128+
129+
# Celery stuff
130+
celerybeat-schedule
131+
celerybeat.pid
132+
133+
# SageMath parsed files
134+
*.sage.py
135+
136+
# Environments
137+
.env
138+
.venv
139+
env/
140+
venv/
141+
ENV/
142+
env.bak/
143+
venv.bak/
144+
145+
# Spyder project settings
146+
.spyderproject
147+
.spyproject
148+
149+
# Rope project settings
150+
.ropeproject
151+
152+
# mkdocs documentation
153+
/site
154+
155+
# mypy
156+
.mypy_cache/
157+
.dmypy.json
158+
dmypy.json
159+
160+
# Pyre type checker
161+
.pyre/
162+
163+
# pytype static type analyzer
164+
.pytype/
165+
166+
# Cython debug symbols
167+
cython_debug/
168+
169+
# PyCharm
170+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
171+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
172+
# and can be added to the global gitignore or merged into this file. For a more nuclear
173+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
174+
#.idea/

README.md

Lines changed: 32 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -73,34 +73,52 @@ validated its generalizability with plausible performances.
7373

7474
<details>
7575

76-
77-
7876
### System Requirements
79-
CPU: >=4 cores <br>
80-
Memory (RAM): >=12Gb. <br>
81-
GPU: any GPU supports CUDA with at least 12GB memory. <br>
82-
GPU is required for DiffModeler since most computations are done on GPU.
77+
- **CPU**: 4 cores or higher
78+
- **Memory**: 12GB RAM or higher
79+
- **GPU**: CUDA-compatible with minimum 12GB memory
80+
- **Note**: GPU is mandatory as DiffModeler performs most computations on GPU
8381

8482
## Installation
8583
### 1. [`Install git`](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
8684
### 2. Clone the repository in your computer
87-
```
85+
```bash
8886
git clone git@github.com:kiharalab/DiffModeler.git && cd DiffModeler
8987
```
9088

9189
### 3. Configure environment for DiffModeler.
92-
#### 3.1.1 Install anaconda
90+
### **Option A: Conda Environment**
91+
##### 3.A.1 Install anaconda
9392
Install anaconda from https://www.anaconda.com/download#downloads.
94-
#### 3.1.2 Install environment via yml file
95-
Then create the environment via
96-
```commandline
93+
##### 3.A.2 Install environment via yml file
94+
```bash
9795
conda env create -f environment.yml
9896
```
99-
#### 3.1.3 Activate environment for running
97+
##### 3.A.3 Activate environment for running
10098
Each time when you want to run this software, simply activate the environment by
101-
```
99+
```bash
102100
conda activate DiffModeler
103-
conda deactivate(If you want to exit)
101+
# To exit
102+
conda deactivate
103+
```
104+
#### **Option B: Pixi Environment**
105+
106+
#### 3.B.1 Install pixi
107+
```bash
108+
curl -fsSL https://pixi.sh/install.sh | bash
109+
```
110+
111+
##### 3.B.2 Install environment via toml file
112+
```bash
113+
pixi install
114+
```
115+
116+
##### 3.B.3 Activate environment for running
117+
Each time when you want to run this software, simply activate the environment by
118+
```bash
119+
pixi shell
120+
# To exit
121+
exit
104122
```
105123

106124
### 4. Download the pre-trained diffusion model

VESPER_CUDA/fitter.py

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -519,8 +519,9 @@ def _retrieve_data(self, result_list):
519519
result["Nm"] = Nm
520520
result["total"] = total
521521

522+
522523
@staticmethod
523-
def _calc_ldp_recall_score_item(ldp_arr, ca_arr, rot_mtx, trans):
524+
def _calc_ldp_recall_score_item(ldp_tree, ca_arr, rot_mtx, trans):
524525
"""
525526
Calculate the recall score of LDP points given a rotation matrix and translation vector
526527
All arguments have to be torch tensors on GPU
@@ -529,22 +530,22 @@ def _calc_ldp_recall_score_item(ldp_arr, ca_arr, rot_mtx, trans):
529530
# rot_mtx: torch tensor of shape (3, 3)
530531
# trans: torch tensor of shape (3, )
531532
"""
532-
import torch
533-
534-
# rotated backbone CA
535-
rot_backbone_ca = torch.matmul(ca_arr, rot_mtx) + trans
533+
# import torch
534+
#
535+
# # rotated backbone CA
536+
# rot_backbone_ca = torch.matmul(ca_arr, rot_mtx) + trans
537+
rot_backbone_ca = np.dot(ca_arr, rot_mtx) + trans
538+
distances, indices = ldp_tree.query(rot_backbone_ca, k=1)
539+
coverage = np.sum(distances < 3.0) / len(rot_backbone_ca)
540+
return coverage
536541

537-
# calculate all pairwise distances
538-
dist_mtx = torch.cdist(rot_backbone_ca, ldp_arr, p=2)
539-
540-
# get distance from the closest LDP point for each CA atom
541-
min_dist = torch.min(dist_mtx, dim=1).values
542-
543-
# count the coverage of CA atoms within 3.0 angstrom of LDP points in the total amount of CA atoms
544-
return (min_dist < 3.0).sum().item() / len(rot_backbone_ca)
545542

546543
def _calc_ldp_recall(self, results, sort=False, progress_bar=True):
547544
import torch
545+
from scipy.spatial import KDTree
546+
ldp_atoms = self.ldp_atoms.cpu().numpy()
547+
ldp_tree = KDTree(ldp_atoms)
548+
bb_ca = self.backbone_ca.cpu().numpy()
548549

549550
if not progress_bar:
550551
iter_results = results # calculate for each rotation
@@ -553,10 +554,8 @@ def _calc_ldp_recall(self, results, sort=False, progress_bar=True):
553554
for result in iter_results:
554555
r = R.from_euler("xyz", result["angle"], degrees=True)
555556
rot_mtx = (r.as_matrix()).T
556-
rot_mtx = torch.from_numpy(rot_mtx).to(self.device)
557-
# rot_mtx = euler_to_mtx(torch.tensor(result["angle"], device=self.device)).t()
558557
result["ldp_recall"] = self._calc_ldp_recall_score_item(
559-
self.ldp_atoms, self.backbone_ca, rot_mtx, torch.from_numpy(result["real_trans"]).to(self.device)
558+
ldp_tree, bb_ca, rot_mtx, result["real_trans"]
560559
)
561560

562561
# sort by LDP recall

0 commit comments

Comments
 (0)