1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
| PATH1 = './gpumodel/my_encoderrnn.pth' PATH2 = './gpumodel/my_attndecoderrnn.pth' def dm_test_Seq2Seq_Evaluate(): mypairsdataset = MyPairsDataset(my_pairs) mydataloader = DataLoader(dataset=mypairsdataset, batch_size=1, shuffle=True)
input_size = english_word_n hidden_size = 256 my_encoderrnn = EncoderRNN(input_size, hidden_size) my_encoderrnn.load_state_dict(torch.load(PATH1, map_location=lambda storage, loc: storage), False) print('my_encoderrnn模型结构--->', my_encoderrnn)
input_size = french_word_n hidden_size = 256 my_attndecoderrnn = AttnDecoderRNN(input_size, hidden_size) my_attndecoderrnn.load_state_dict(torch.load(PATH2, map_location=lambda storage, loc: storage), False) print('my_decoderrnn模型结构--->', my_attndecoderrnn)
my_samplepairs = [ ['i m impressed with your french .', 'je suis impressionne par votre francais .'], ['i m more than a friend .', 'je suis plus qu une amie .'], ['she is beautiful like her mother .', 'elle est belle comme sa mere .'] ] print('my_samplepairs--->', len(my_samplepairs))
for index, pair in enumerate(my_samplepairs): x = pair[0] y = pair[1]
tmpx = [english_word2index[word] for word in x.split(' ')] tmpx.append(EOS_token) tensor_x = torch.tensor(tmpx, dtype=torch.long, device=device).view(1, -1)
decoded_words, attentions = Seq2Seq_Evaluate(tensor_x, my_encoderrnn, my_attndecoderrnn) output_sentence = ' '.join(decoded_words)
print('\n') print('>', x) print('=', y) print('<', output_sentence)
|