Skip to content

Commit

Permalink
fixed vectorRM requiring embedding model in the example.
Browse files Browse the repository at this point in the history
  • Loading branch information
AMMAS1 committed Aug 4, 2024
1 parent 178fe0c commit aad6ec0
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion examples/run_storm_wiki_gpt_with_VectorRM.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ def main(args):
'batch_size': args.embed_batch_size,
'vector_db_mode': args.vector_db_mode,
'collection_name': args.collection_name,
'embedding_model': args.embedding_model,
'device': args.device,
}
if args.vector_db_mode == 'offline':
Expand All @@ -110,7 +111,7 @@ def main(args):
)

# Setup VectorRM to retrieve information from your own data
rm = VectorRM(collection_name=args.collection_name, device=args.device, k=engine_args.search_top_k)
rm = VectorRM(collection_name=args.collection_name, embedding_model=args.embedding_model, device=args.device, k=engine_args.search_top_k)

# initialize the vector store, either online (store the db on Qdrant server) or offline (store the db locally):
if args.vector_db_mode == 'offline':
Expand Down Expand Up @@ -146,6 +147,8 @@ def main(args):
# provide local corpus and set up vector db
parser.add_argument('--collection-name', type=str, default="my_documents",
help='The collection name for vector store.')
parser.add_argument('--embedding_model', type=str, default="BAAI/bge-m3",
help='The collection name for vector store.')
parser.add_argument('--device', type=str, default="mps",
help='The device used to run the retrieval model (mps, cuda, cpu, etc).')
parser.add_argument('--vector-db-mode', type=str, choices=['offline', 'online'],
Expand Down

0 comments on commit aad6ec0

Please sign in to comment.