-
Notifications
You must be signed in to change notification settings - Fork 0
/
Makefile
178 lines (151 loc) · 5.6 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
# Makefile
include .env
# Setting env variables
export $(shell sed 's/=.*//' .env)
# Define variables
PY_FILES = $(shell find . -name "*.py" -not -path "./hf_cache/*")
REINDEX_ES_DEFAULT=false
REINIT_DB_DEFAULT=false
DEFACTO_DEFAULT=true
REINIT_GRAFANA_DEFAULT=false
RECREATE_DASHBOARDS_DEFAULT=false
# Export the variable for all targets
.EXPORT_ALL_VARIABLES:
PYDEVD_DISABLE_FILE_VALIDATION=1
# Target to create a conda environment and install dependencies
create_local_env:
conda create -y -n $(ENV_NAME) python=$(PYTHON_VERSION)
conda run -n $(ENV_NAME) pip install -r requirements.txt
conda run -n $(ENV_NAME) pip install -r requirements-dev.txt
conda run -n $(ENV_NAME) python -m ipykernel install --user --name=$(ENV_NAME) --display-name "Python ($(ENV_NAME))"
# Target to remove the Jupyter kernel and delete the conda environment
remove_local_env:
@if jupyter kernelspec list | grep -q $(ENV_NAME); then \
jupyter kernelspec remove -f $(ENV_NAME); \
else \
echo "Jupyter kernel '$(ENV_NAME)' not found."; \
fi
@if conda info --envs | grep -q $(ENV_NAME); then \
conda env remove -n $(ENV_NAME); \
else \
echo "Conda environment '$(ENV_NAME)' not found."; \
fi
# Docker-compose
compose:
docker-compose up -d
# Format py files
format_py:
@PYTHONPATH=./ conda run -n $(ENV_NAME) isort $(PY_FILES)
@PYTHONPATH=./ conda run -n $(ENV_NAME) black $(PY_FILES)
# Linting py files
lint_py:
@PYTHONPATH=./ conda run -n $(ENV_NAME) pylint $(PY_FILES) --exit-zero
# Running unit_tests
unit_tests:
@PYTHONPATH=./ conda run -n $(ENV_NAME) pytest ./tests
# Running integration_tests
integration_tests:
@./integration_tests/connectivity_check.sh $(keep-containers-running)
# Download ollama models specified as CHAT_MODEL & EMBED_MODEL
setup_ollama:
@./scripts/setup_ollama.sh
# Cache $ASR_MODEL in hf_cache dir
cache_asr_model:
@PYTHONPATH=./ conda run --no-capture-output -n $(ENV_NAME) \
python ./scripts/cache_asr_model.py $(ASR_MODEL)
# Cache $PODCAST_DATASET in hf_cache dir
cache_dataset:
@PYTHONPATH=./ conda run --no-capture-output -n $(ENV_NAME) \
python ./scripts/cache_dataset.py $(PODCAST_DATASET)
# Start prefect server and worker
prefect_start_server:
@if ! curl -s -o /dev/null localhost:4200; then \
echo "Prefect server is not running. Starting the server..."; \
conda run -n $(ENV_NAME) prefect server start & \
for i in $$(seq 1 10); do \
if curl -s -o /dev/null localhost:4200; then \
echo "\033[0;32mPrefect server started successfully.\033[0m"; \
exit 0; \
else \
echo "Waiting for Prefect server to start... ($$i/10)"; \
sleep 5; \
fi; \
done; \
echo "\033[0;31mFailed to start the Prefect server after multiple attempts.\033[0m"; \
else \
echo "\033[0;32mPrefect server is already running.\033[0m"; \
fi
# Stop prefect server
prefect_stop_server:
@if curl -s -o /dev/null localhost:4200; then \
echo "Prefect server is running. Stopping the server..."; \
kill $$(ps aux | grep "prefect server" | grep -v grep | awk '{print $$2}') 2>/dev/null || true; \
for i in $$(seq 1 10); do \
if ! curl -s -o /dev/null localhost:4200; then \
echo "\033[0;32mPrefect server terminated successfully.\033[0m"; \
exit 0; \
else \
echo "Waiting for Prefect server to terminate... ($$i/10)"; \
sleep 2; \
fi; \
done; \
echo "\033[0;31mFailed to terminate the Prefect server after multiple attempts.\033[0m"; \
else \
echo "\033[0;32mPrefect server is not running.\033[0m"; \
fi
# Start a new prefect worker
prefect_start_worker:
@conda run -n $(ENV_NAME) prefect worker start --pool ${WORK_POOL_NAME} &
@sleep 2
@echo "\033[0;32mWorker started successfully\033[0m"
@count=$$(ps aux | grep "prefect worker start --pool $(WORK_POOL_NAME)" | grep -v grep | wc -l); \
echo "Total number of active workers: $$((count / 2))"
# Kill all prefect workers
prefect_kill_workers:
@./scripts/kill_prefect_workers.sh
# Re-initialize prefect db & work-pool
reinit_prefect:
@./scripts/reinit_prefect.sh
# Deploy or redeploy prefect flows
redeploy_flows:
@echo "Re-deploying prefect flows ..."
@PYTHONPATH=./ conda run -n $(ENV_NAME) python scripts/redeploy_flows.py \
--reindex_es "$(REINDEX_ES_DEFAULT)" \
--reinit_db "$(REINIT_DB_DEFAULT)" \
--defacto "$(DEFACTO_DEFAULT)" \
--reinit_grafana "$(REINIT_GRAFANA_DEFAULT)" \
--recreate_dashboards "$(RECREATE_DASHBOARDS_DEFAULT)"
@echo "\033[0;32mSuccessfully redeployed prefect flows.\033[0m"
# Re-indexing es with defacto mode
reindex_es_defacto:
@PYTHONPATH=./ conda run -n $(ENV_NAME) \
prefect deployment run setup_es/ad-hoc \
-p reindex_es=true \
-p defacto=true
# Re-initilizing app backend db
reinit_db:
@PYTHONPATH=./ conda run -n $(ENV_NAME) \
prefect deployment run init_db/ad-hoc \
-p reinit_db=true
# Re-setting up Grafana data source & dashboards
resetup_grafana:
@PYTHONPATH=./ conda run -n $(ENV_NAME) \
prefect deployment run setup_grafana/ad-hoc \
$(if $(reinit_grafana),-p reinit_grafana=$(reinit_grafana)) \
-p recreate_dashboards=true
# Get number of docs in es index for a passed id (not _id)
es_count_filtered:
@if [ -z "$(ID)" ]; then \
curl -X GET "http://${ELASTIC_SETUP_HOST}:${ELASTIC_PORT}/${ES_INDEX_NAME}/_count" \
-H "Content-Type: application/json" \
-d '{}'; \
else \
curl -X GET "http://${ELASTIC_SETUP_HOST}:${ELASTIC_PORT}/${ES_INDEX_NAME}/_count" \
-H "Content-Type: application/json" \
-d '{"query":{"terms":{"_id":["$(ID)"]}}}'; \
fi
# Run process_new_episodes once (this wouldn't affect its weekly schedule)
process_new_episodes_run:
@PYTHONPATH=./ conda run -n $(ENV_NAME) \
prefect deployment run process_new_episodes/midnight-every-sunday
.PHONY: integration_tests