Skip to content
This repository has been archived by the owner on Jan 26, 2025. It is now read-only.

Allow top_retrieval.py to handle --no_cuda #64

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion entity_detection/nn/top_retrieval.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,12 @@

if not args.cuda:
args.gpu = -1
map_location = None
if torch.cuda.is_available() and args.cuda:
print("Note: You are using GPU for training")
torch.cuda.set_device(args.gpu)
torch.cuda.manual_seed(args.seed)
map_location = lambda storage, location: storage.cuda(args.gpu)
if torch.cuda.is_available() and not args.cuda:
print("Warning: You have Cuda but not use it. You are using CPU for training.")

Expand All @@ -44,7 +46,7 @@
sort=False, shuffle=False)

# load the model
model = torch.load(args.trained_model, map_location=lambda storage,location: storage.cuda(args.gpu))
model = torch.load(args.trained_model, map_location=map_location)

print(model)

Expand Down
4 changes: 3 additions & 1 deletion relation_prediction/nn/top_retrieval.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@

if not args.cuda:
args.gpu = -1
map_location = None
if torch.cuda.is_available() and args.cuda:
print("Note: You are using GPU for training")
torch.cuda.set_device(args.gpu)
torch.cuda.manual_seed(args.seed)
map_location = lambda storage, location: storage.cuda(args.gpu)
if torch.cuda.is_available() and not args.cuda:
print("Warning: You have Cuda but not use it. You are using CPU for training.")

Expand All @@ -40,7 +42,7 @@
sort=False, shuffle=False)

# load the model
model = torch.load(args.trained_model, map_location=lambda storage,location: storage.cuda(args.gpu))
model = torch.load(args.trained_model, map_location=map_location)

print(model)

Expand Down