66: volatile = not train) for name in ('c1', 'h1', 'c2', 'h2', 'c3', 'h3','c4','h4','c5','h5')}
P.95 リスト3.3 train.pyのコード15行目からのload_data関数
誤
15: def load_data(args):
16: vocab = {}
17: print ('%s/input.txt'% args.data_dir)
18: # words = codecs.open('%s/input.txt' % args.data_dir, 'rb','UTF-8').read()
19:
20: dat = []
21: for l in open('%s/input.txt' % args.data_dir).readlines():
22: data = eval(l[:-1])
23: dat.append(data)
24:
25: words = dat
26: dataset = np.ndarray((1,1), dtype=np.float32)
27: for i, word in enumerate(words):
28: if word not in vocab:
29: vocab[word] = len(vocab)
30: dataset[i] = word
31: print 'corpus length:', len(words)
32: print 'vocab size:', len(vocab)
33: return dataset, words, vocab
正
15: def load_data(args):
16: vocab = {}
17: print ('%s/input.txt'% args.data_dir)
18: words = codecs.open('%s/input.txt' % args.data_dir, 'rb','UTF-8').read()
19: words = list(words)
20: dataset = np.ndarray((len(words),), dtype=np.int32)
21: for i, word in enumerate(words):
22: if word not in vocab:
23: vocab[word] = len(vocab)
24: dataset[i] = vocab[word]
25: print 'corpus length:', len(words)
26: print 'vocab size:', len(vocab)
27: return dataset, words, vocab