From e9ad92ebfd5faee4b2437bec705a74a7b3ba43b2 Mon Sep 17 00:00:00 2001 From: Jeff Zhang Date: Mon, 10 Aug 2015 10:45:26 +0800 Subject: [PATCH] fix convert bug, use double instead of float --- convert.lua | 4 ++-- sample.lua | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/convert.lua b/convert.lua index b7b0682..d083c87 100644 --- a/convert.lua +++ b/convert.lua @@ -33,6 +33,6 @@ else end checkpoint = torch.load(opt.load_model) -checkpoint.protos.rnn:float() -checkpoint.protos.criterion:float() +checkpoint.protos.rnn:double() +checkpoint.protos.criterion:double() torch.save(opt.save_file, checkpoint) diff --git a/sample.lua b/sample.lua index fb58685..a15b27c 100644 --- a/sample.lua +++ b/sample.lua @@ -80,7 +80,7 @@ local num_layers = checkpoint.opt.num_layers current_state = {} for L = 1,checkpoint.opt.num_layers do -- c and h for all layers - local h_init = torch.zeros(1, checkpoint.opt.rnn_size):float() + local h_init = torch.zeros(1, checkpoint.opt.rnn_size):double() if opt.gpuid >= 0 then h_init = h_init:cuda() end table.insert(current_state, h_init:clone()) table.insert(current_state, h_init:clone())