55import os
66import time
77from dnlp .core .dnn_crf_base import DnnCrfBase
8- from dnlp .config import DnnCrfConfig
8+ from dnlp .config . config import DnnCrfConfig
99
1010
1111class DnnCrf (DnnCrfBase ):
@@ -84,11 +84,7 @@ def __init__(self, *, config: DnnCrfConfig = None, task='cws', data_path: str =
8484if train == 'll' :
8585self .crf_loss ,_ = tf .contrib .crf .crf_log_likelihood (self .output ,self .real_indices ,self .seq_length ,
8686self .transition )
87- # self.loss = -self.loss
8887self .loss = - self .crf_loss / self .batch_size + self .regularization
89- # self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)
90- # self.optimizer.minimize(self.loss)
91- # self.train = self.optimizer.minimize(self.loss)
9288else :
9389self .true_seq = tf .placeholder (tf .int32 , [self .batch_size ,self .batch_length ])
9490self .pred_seq = tf .placeholder (tf .int32 , [self .batch_size ,self .batch_length ])
@@ -100,7 +96,6 @@ def __init__(self, *, config: DnnCrfConfig = None, task='cws', data_path: str =
10096state_difference = tf .reduce_sum (
10197tf .gather_nd (self .output_placeholder ,pred_index )- tf .gather_nd (self .output_placeholder ,true_index ),
10298axis = 1 )
103- # r = tf.stack([self.true_seq[:, :-1], self.true_seq[:, 1:]], 2)
10499transition_difference = tf .reduce_sum (
105100tf .gather_nd (self .transition ,tf .stack ([self .pred_seq [:, :- 1 ],self .pred_seq [:,1 :]],2 ))- tf .gather_nd (
106101self .transition ,tf .stack ([self .true_seq [:, :- 1 ],self .true_seq [:,1 :]],2 )),axis = 1 )