Skip to content

Commit a9bf5ac

Browse files
authored
Merge pull request lukekoch#1 from lukekoch/unstable
Merge Unstable
2 parents b0e7c5e + 82a648a commit a9bf5ac

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+4801
-39
lines changed

flask-backend/Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,4 @@ RUN source ./venv/bin/activate
1111
RUN pip install -r ./requirements.txt
1212
RUN apt-get -y update && apt install -y curl && apt-get install -y wget unzip genometools samtools tabix
1313

14-
CMD python3 src/worker.py & python3 src/worker.py & python3 src/main.py
14+
CMD python3 -u src/worker.py & python3 -u src/worker.py & python3 -u src/main.py

flask-backend/src/Routes/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ def create_app():
1515
from .combined_imports import imports_bp
1616
from .files import files_bp
1717
from .tasks import tasks_bp
18+
from .taxaminer_data import taxaminer_bp
1819

1920
app.register_blueprint(taxa_bp, url_prefix="/")
2021
app.register_blueprint(users_bp, url_prefix="/")
@@ -26,6 +27,7 @@ def create_app():
2627
app.register_blueprint(imports_bp, url_prefix="/")
2728
app.register_blueprint(files_bp, url_prefix="/")
2829
app.register_blueprint(tasks_bp, url_prefix="/")
30+
app.register_blueprint(taxaminer_bp, url_prefix="/taxaminer/")
2931

3032
CORS(app)
3133
return app

flask-backend/src/Routes/annotations.py

+26
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
fetchFeatures,
1414
import_annotation,
1515
updateAnnotationLabel,
16+
grepFeature
1617
)
1718

1819
# setup blueprint name
@@ -173,6 +174,31 @@ def annotations_bp_fetchFeatures():
173174
return REQUESTMETHODERROR
174175

175176

177+
# FETCH ALL ASSEMBLIES
178+
@annotations_bp.route("/grepFeatures", methods=["POST"])
179+
def annotations_bp_grepFeatures():
180+
if request.method == "POST":
181+
req = request.get_json(force=True)
182+
userID = req.get("userID", None)
183+
token = req.get("token", None)
184+
185+
# token still active
186+
valid_token, error = validateActiveToken(userID, token, ACCESS_LVL_1)
187+
if not valid_token:
188+
response = jsonify({"payload": {}, "notification": error})
189+
response.headers.add("Access-Control-Allow-Origin", "*")
190+
return response
191+
192+
# grep input
193+
search = req.get("search", None)
194+
annotation_id = req.get("annotationID", None)
195+
196+
coords = grepFeature(search, annotation_id)
197+
return jsonify({"coords": coords})
198+
else:
199+
return REQUESTMETHODERROR
200+
201+
176202
# FETCH ALL UNIQUE FEATURE TYPES
177203
@annotations_bp.route("/fetchFeatureSeqIDs", methods=["POST"])
178204
def annotations_bp_fetchFeatureSeqIDs():

flask-backend/src/Routes/file_io.py

+141
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
# TODO remove this once testing has finished
2+
3+
import csv
4+
import json
5+
6+
from flask import jsonify
7+
8+
def load_datasets():
9+
with open('./datasets/datasets.json', 'r') as f:
10+
data = json.load(f)
11+
return data
12+
13+
def convert_csv_to_json(path):
14+
"""Load the main scatterplot datafile and convert it to JSON"""
15+
with open(path, encoding='utf-8') as csvf:
16+
# load csv file data using csv library's dictionary reader
17+
csv_reader = csv.DictReader(csvf)
18+
labeled_dict = dict()
19+
20+
for row in csv_reader:
21+
if row['plot_label'] in labeled_dict.keys():
22+
labeled_dict[row['plot_label']].append(row)
23+
else:
24+
labeled_dict[row['plot_label']] = [row]
25+
26+
traces_list = []
27+
for key in labeled_dict.keys():
28+
traces_list.append(labeled_dict[key])
29+
30+
return traces_list
31+
32+
33+
def fast_fasta_loader(path, fasta_id):
34+
"""Load fasta sequence data"""
35+
seq = ""
36+
start_index = -1
37+
38+
with open(path, "r") as f:
39+
lines = f.readlines()
40+
for i, line in enumerate(lines):
41+
lines[i] = line.rstrip()
42+
43+
for i, line in enumerate(lines):
44+
if line.startswith(">" + fasta_id):
45+
start_index = i
46+
break
47+
48+
if start_index == -1:
49+
return ""
50+
51+
for i in range(start_index + 1, len(lines)):
52+
if not lines[i].startswith(">"):
53+
seq += lines[i]
54+
else:
55+
break
56+
57+
return seq
58+
59+
60+
def diamond_to_json_str(path):
61+
"""Convert diamond table rows to JSON strings"""
62+
rows = []
63+
# format as defined here:
64+
# https://github.com/fdarthen/taXaminer/blob/988f408afed4830e23a1b8b837b24bab3e8fc5b9/taxonomic_assignment.py#L718
65+
fields = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send',
66+
'evalue', 'bitscore', 'taxid', 'taxname']
67+
with open(path, encoding='utf-8') as csvf:
68+
# load csv file data using csv library's dictionary reader
69+
csv_reader = csv.DictReader(csvf, delimiter='\t', fieldnames=fields)
70+
for i, row in enumerate(csv_reader):
71+
json_string = json.dumps(row)
72+
rows.append(json_string)
73+
74+
return rows
75+
76+
def taxonomic_hits_loader(fasta_id, path):
77+
"""Load all taxonomic hits"""
78+
fields = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send',
79+
'evalue', 'bitscore', 'staxids', 'ssciname']
80+
match_rows = []
81+
start_index = -1
82+
with open(path, encoding='utf-8') as csvf:
83+
# load csv file data using csv library's dictionary reader
84+
csv_reader = csv.DictReader(csvf, delimiter='\t', fieldnames=fields)
85+
for i, row in enumerate(csv_reader):
86+
if row['qseqid'] == fasta_id:
87+
match_rows.append(row)
88+
89+
for row in match_rows:
90+
if len(row['ssciname']) > 20:
91+
row['ssciname'] = row['ssciname'][0:20] + "..."
92+
return match_rows
93+
94+
def load_user_config(path):
95+
"""Load a user config"""
96+
with open(f"{path}user_config.json", "r") as file:
97+
lines = file.readlines()
98+
99+
return "".join(lines)
100+
101+
def parse_user_config(path):
102+
"""Parse user config to JSON"""
103+
with open(f'{path}user_config.json', 'r') as f:
104+
data = json.load(f)
105+
return data
106+
107+
def write_user_config(json_data, path):
108+
"""Write user config to disk"""
109+
with open(f'{path}user_config.json', 'w') as json_file:
110+
json.dump(json_data, json_file)
111+
112+
def load_pca_coords(dataset_id):
113+
"""3D plot of variable contribution"""
114+
with open(f"/flask-backend/data/storage/taxa/Burkholderia_multivorans/Burkholderia_multivorans_id1/analyses/taxaminer/Burkholderia_multivorans_id1_taxaminer_id1/pca_loadings.csv", 'r') as file:
115+
lines = file.readlines()
116+
117+
final_lines = []
118+
for line in lines[1:-1]:
119+
fields = line.split(",")
120+
new_dict = dict()
121+
new_dict['label'] = fields[0]
122+
new_dict['x'] = [fields[1]]
123+
new_dict['y'] = [fields[2]]
124+
new_dict['z'] = [fields[3]]
125+
final_lines.append(new_dict)
126+
127+
return final_lines
128+
129+
def indexed_data(path):
130+
"""Load the main scatterplot datafile and convert it to JSON"""
131+
with open(path, encoding='utf-8') as csvf:
132+
# load csv file data using csv library's dictionary reader
133+
csv_reader = csv.DictReader(csvf)
134+
labeled_dict = dict()
135+
136+
for row in csv_reader:
137+
labeled_dict[row['g_name']] = row
138+
139+
140+
return labeled_dict
141+

0 commit comments

Comments
 (0)