model

来源:互联网 发布:centos7安装网络配置 编辑:程序博客网 时间:2024/05/16 15:03
import sysif sys.version[0] == '2':    import cPickle as pklelse:    import pickle as pklimport numpy as npimport tensorflow as tfimport utilsdtype = utils.DTYPEclass Model:    def __init__(self):        self.sess = None        self.X = None        self.y = None        self.layer_keeps = None        self.vars = None        self.keep_prob_train = None        self.keep_prob_test = None    def run(self, fetches, X=None, y=None, mode='train'):            feed_dict = {}            if type(self.X) is list:                for i in range(len(X)):                    feed_dict[self.X[i]] = X[i]            else:                feed_dict[self.X] = X            if y is not None:                feed_dict[self.y] = y            if self.layer_keeps is not None:                if mode == 'train':                    feed_dict[self.layer_keeps] = self.keep_prob_train                elif mode == 'test':                    feed_dict[self.layer_keeps] = self.keep_prob_test            return self.sess.run(fetches, feed_dict)    def dump(self, model_path):        var_map = {}        for name, var in self.vars.iteritems():            var_map[name] = self.run(var)        pkl.dump(var_map, open(model_path, 'wb'))        print('model dumped at', model_path)class LR(Model):    def __init__(self, input_dim=None, output_dim=1, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_weight=0,                 random_seed=None):        Model.__init__(self)        init_vars = [('w', [input_dim, output_dim], 'tnormal', dtype),                     ('b', [output_dim], 'zero', dtype)]        self.graph = tf.Graph()        with self.graph.as_default():            if random_seed is not None:                tf.set_random_seed(random_seed)            self.X = tf.sparse_placeholder(dtype)            self.y = tf.placeholder(dtype)            self.vars = utils.init_var_map(init_vars, init_path)            w = self.vars['w']            b = self.vars['b']            xw = tf.sparse_tensor_dense_matmul(self.X, w)            logits = tf.reshape(xw + b, [-1])            self.y_prob = tf.sigmoid(logits)            self.loss = tf.reduce_mean(                tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=logits)) + \                        l2_weight * tf.nn.l2_loss(xw)            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)            config = tf.ConfigProto()            config.gpu_options.allow_growth = True            self.sess = tf.Session(config=config)            tf.global_variables_initializer().run(session=self.sess)class FM(Model):    def __init__(self, input_dim=None, output_dim=1, factor_order=10, init_path=None, opt_algo='gd', learning_rate=1e-2,                 l2_w=0, l2_v=0, random_seed=None):        Model.__init__(self)        init_vars = [('w', [input_dim, output_dim], 'tnormal', dtype),                     ('v', [input_dim, factor_order], 'tnormal', dtype),                     ('b', [output_dim], 'zero', dtype)]        self.graph = tf.Graph()        with self.graph.as_default():            if random_seed is not None:                tf.set_random_seed(random_seed)            self.X = tf.sparse_placeholder(dtype)            self.y = tf.placeholder(dtype)            self.vars = utils.init_var_map(init_vars, init_path)            w = self.vars['w']            v = self.vars['v']            b = self.vars['b']            X_square = tf.SparseTensor(self.X.indices, tf.square(self.X.values), tf.to_int64(tf.shape(self.X)))            xv = tf.square(tf.sparse_tensor_dense_matmul(self.X, v))            p = 0.5 * tf.reshape(                tf.reduce_sum(xv - tf.sparse_tensor_dense_matmul(X_square, tf.square(v)), 1),                [-1, output_dim])            xw = tf.sparse_tensor_dense_matmul(self.X, w)            logits = tf.reshape(xw + b + p, [-1])            self.y_prob = tf.sigmoid(logits)            self.loss = tf.reduce_mean(                tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)) + \                        l2_w * tf.nn.l2_loss(xw) + \                        l2_v * tf.nn.l2_loss(xv)            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)            config = tf.ConfigProto()            config.gpu_options.allow_growth = True            self.sess = tf.Session(config=config)            tf.global_variables_initializer().run(session=self.sess)class FNN(Model):    def __init__(self, layer_sizes=None, layer_acts=None, drop_out=None, layer_l2=None, init_path=None, opt_algo='gd',                 learning_rate=1e-2, random_seed=None):        Model.__init__(self)        init_vars = []        num_inputs = len(layer_sizes[0])        factor_order = layer_sizes[1]        for i in range(num_inputs):            layer_input = layer_sizes[0][i]            layer_output = factor_order            init_vars.append(('w0_%d' % i, [layer_input, layer_output], 'tnormal', dtype))            init_vars.append(('b0_%d' % i, [layer_output], 'zero', dtype))        init_vars.append(('w1', [num_inputs * factor_order, layer_sizes[2]], 'tnormal', dtype))        init_vars.append(('b1', [layer_sizes[2]], 'zero', dtype))        for i in range(2, len(layer_sizes) - 1):            layer_input = layer_sizes[i]            layer_output = layer_sizes[i + 1]            init_vars.append(('w%d' % i, [layer_input, layer_output], 'tnormal', dtype))            init_vars.append(('b%d' % i, [layer_output], 'zero', dtype))        self.graph = tf.Graph()        with self.graph.as_default():            if random_seed is not None:                tf.set_random_seed(random_seed)            self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)]            self.y = tf.placeholder(dtype)            self.keep_prob_train = 1 - np.array(drop_out)            self.keep_prob_test = np.ones_like(drop_out)            self.layer_keeps = tf.placeholder(dtype)            self.vars = utils.init_var_map(init_vars, init_path)            w0 = [self.vars['w0_%d' % i] for i in range(num_inputs)]            b0 = [self.vars['b0_%d' % i] for i in range(num_inputs)]            xw = [tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs)]            x = tf.concat([xw[i] + b0[i] for i in range(num_inputs)], 1)            l = tf.nn.dropout(                utils.activate(x, layer_acts[0]),                self.layer_keeps[0])            for i in range(1, len(layer_sizes) - 1):                wi = self.vars['w%d' % i]                bi = self.vars['b%d' % i]                l = tf.nn.dropout(                    utils.activate(                        tf.matmul(l, wi) + bi,                        layer_acts[i]),                    self.layer_keeps[i])            l = tf.reshape(l, [-1])            self.y_prob = tf.sigmoid(l)            self.loss = tf.reduce_mean(                tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y))            if layer_l2 is not None:                # for i in range(num_inputs):                self.loss += layer_l2[0] * tf.nn.l2_loss(tf.concat(xw, 1))                for i in range(1, len(layer_sizes) - 1):                    wi = self.vars['w%d' % i]                    # bi = self.vars['b%d' % i]                    self.loss += layer_l2[i] * tf.nn.l2_loss(wi)            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)            config = tf.ConfigProto()            config.gpu_options.allow_growth = True            self.sess = tf.Session(config=config)            tf.global_variables_initializer().run(session=self.sess)class CCPM(Model):    def __init__(self, layer_sizes=None, layer_acts=None, drop_out=None, init_path=None, opt_algo='gd',                 learning_rate=1e-2, random_seed=None):        Model.__init__(self)        init_vars = []        num_inputs = len(layer_sizes[0])        embedding_order = layer_sizes[1]        for i in range(num_inputs):            layer_input = layer_sizes[0][i]            layer_output = embedding_order            init_vars.append(('w0_%d' % i, [layer_input, layer_output], 'tnormal', dtype))            init_vars.append(('b0_%d' % i, [layer_output], 'zero', dtype))        init_vars.append(('f1', [embedding_order, layer_sizes[2], 1, 2], 'tnormal', dtype))        init_vars.append(('f2', [embedding_order, layer_sizes[3], 2, 2], 'tnormal', dtype))        init_vars.append(('w1', [2 * 3 * embedding_order, 1], 'tnormal', dtype))        init_vars.append(('b1', [1], 'zero', dtype))        self.graph = tf.Graph()        with self.graph.as_default():            if random_seed is not None:                tf.set_random_seed(random_seed)            self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)]            self.y = tf.placeholder(dtype)            self.keep_prob_train = 1 - np.array(drop_out)            self.keep_prob_test = np.ones_like(drop_out)            self.layer_keeps = tf.placeholder(dtype)            self.vars = utils.init_var_map(init_vars, init_path)            w0 = [self.vars['w0_%d' % i] for i in range(num_inputs)]            b0 = [self.vars['b0_%d' % i] for i in range(num_inputs)]            l = tf.nn.dropout(                utils.activate(                    tf.concat([tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) + b0[i]                               for i in range(num_inputs)], 1),                    layer_acts[0]),                self.layer_keeps[0])            l = tf.transpose(tf.reshape(l, [-1, num_inputs, embedding_order, 1]), [0, 2, 1, 3])            f1 = self.vars['f1']            l = tf.nn.conv2d(l, f1, [1, 1, 1, 1], 'SAME')            l = tf.transpose(                utils.max_pool_4d(                    tf.transpose(l, [0, 1, 3, 2]),                    num_inputs / 2),                [0, 1, 3, 2])            f2 = self.vars['f2']            l = tf.nn.conv2d(l, f2, [1, 1, 1, 1], 'SAME')            l = tf.transpose(                utils.max_pool_4d(                    tf.transpose(l, [0, 1, 3, 2]), 3),                [0, 1, 3, 2])            l = tf.nn.dropout(                utils.activate(                    tf.reshape(l, [-1, embedding_order * 3 * 2]),                    layer_acts[1]),                self.layer_keeps[1])            w1 = self.vars['w1']            b1 = self.vars['b1']            l = tf.nn.dropout(                utils.activate(                    tf.matmul(l, w1) + b1,                    layer_acts[2]),                self.layer_keeps[2])            l = tf.reshape(l, [-1])            self.y_prob = tf.sigmoid(l)            self.loss = tf.reduce_mean(                tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y))            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)            config = tf.ConfigProto()            config.gpu_options.allow_growth = True            self.sess = tf.Session(config=config)            tf.global_variables_initializer().run(session=self.sess)class PNN1(Model):    def __init__(self, layer_sizes=None, layer_acts=None, drop_out=None, layer_l2=None, kernel_l2=None, init_path=None,                 opt_algo='gd', learning_rate=1e-2, random_seed=None):        Model.__init__(self)        init_vars = []        num_inputs = len(layer_sizes[0])        factor_order = layer_sizes[1]        for i in range(num_inputs):            layer_input = layer_sizes[0][i]            layer_output = factor_order            init_vars.append(('w0_%d' % i, [layer_input, layer_output], 'tnormal', dtype))            init_vars.append(('b0_%d' % i, [layer_output], 'zero', dtype))        init_vars.append(('w1', [num_inputs * factor_order, layer_sizes[2]], 'tnormal', dtype))        init_vars.append(('k1', [num_inputs, layer_sizes[2]], 'tnormal', dtype))        init_vars.append(('b1', [layer_sizes[2]], 'zero', dtype))        for i in range(2, len(layer_sizes) - 1):            layer_input = layer_sizes[i]            layer_output = layer_sizes[i + 1]            init_vars.append(('w%d' % i, [layer_input, layer_output], 'tnormal',))            init_vars.append(('b%d' % i, [layer_output], 'zero', dtype))        self.graph = tf.Graph()        with self.graph.as_default():            if random_seed is not None:                tf.set_random_seed(random_seed)            self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)]            self.y = tf.placeholder(dtype)            self.keep_prob_train = 1 - np.array(drop_out)            self.keep_prob_test = np.ones_like(drop_out)            self.layer_keeps = tf.placeholder(dtype)            self.vars = utils.init_var_map(init_vars, init_path)            w0 = [self.vars['w0_%d' % i] for i in range(num_inputs)]            b0 = [self.vars['b0_%d' % i] for i in range(num_inputs)]            xw = [tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs)]            x = tf.concat([xw[i] + b0[i] for i in range(num_inputs)], 1)            l = tf.nn.dropout(                utils.activate(x, layer_acts[0]),                self.layer_keeps[0])            w1 = self.vars['w1']            k1 = self.vars['k1']            b1 = self.vars['b1']            p = tf.reduce_sum(                tf.reshape(                    tf.matmul(                        tf.reshape(                            tf.transpose(                                tf.reshape(l, [-1, num_inputs, factor_order]),                                [0, 2, 1]),                            [-1, num_inputs]),                        k1),                    [-1, factor_order, layer_sizes[2]]),                1)            l = tf.nn.dropout(                utils.activate(                    tf.matmul(l, w1) + b1 + p,                    layer_acts[1]),                self.layer_keeps[1])            for i in range(2, len(layer_sizes) - 1):                wi = self.vars['w%d' % i]                bi = self.vars['b%d' % i]                l = tf.nn.dropout(                    utils.activate(                        tf.matmul(l, wi) + bi,                        layer_acts[i]),                    self.layer_keeps[i])            l = tf.reshape(l, [-1])            self.y_prob = tf.sigmoid(l)            self.loss = tf.reduce_mean(                tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y))            if layer_l2 is not None:                # for i in range(num_inputs):                self.loss += layer_l2[0] * tf.nn.l2_loss(tf.concat(xw, 1))                for i in range(1, len(layer_sizes) - 1):                    wi = self.vars['w%d' % i]                    # bi = self.vars['b%d' % i]                    self.loss += layer_l2[i] * tf.nn.l2_loss(wi)            if kernel_l2 is not None:                self.loss += kernel_l2 * tf.nn.l2_loss(k1)            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)            config = tf.ConfigProto()            config.gpu_options.allow_growth = True            self.sess = tf.Session(config=config)            tf.global_variables_initializer().run(session=self.sess)class PNN2(Model):    def __init__(self, layer_sizes=None, layer_acts=None, drop_out=None, layer_l2=None, kernel_l2=None, init_path=None,                 opt_algo='gd', learning_rate=1e-2, random_seed=None):        Model.__init__(self)        init_vars = []        num_inputs = len(layer_sizes[0])        factor_order = layer_sizes[1]        for i in range(num_inputs):            layer_input = layer_sizes[0][i]            layer_output = factor_order            init_vars.append(('w0_%d' % i, [layer_input, layer_output], 'tnormal', dtype))            init_vars.append(('b0_%d' % i, [layer_output], 'zero', dtype))        init_vars.append(('w1', [num_inputs * factor_order, layer_sizes[2]], 'tnormal', dtype))        init_vars.append(('k1', [factor_order * factor_order, layer_sizes[2]], 'tnormal', dtype))        init_vars.append(('b1', [layer_sizes[2]], 'zero', dtype))        for i in range(2, len(layer_sizes) - 1):            layer_input = layer_sizes[i]            layer_output = layer_sizes[i + 1]            init_vars.append(('w%d' % i, [layer_input, layer_output], 'tnormal',))            init_vars.append(('b%d' % i, [layer_output], 'zero', dtype))        self.graph = tf.Graph()        with self.graph.as_default():            if random_seed is not None:                tf.set_random_seed(random_seed)            self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)]            self.y = tf.placeholder(dtype)            self.keep_prob_train = 1 - np.array(drop_out)            self.keep_prob_test = np.ones_like(drop_out)            self.layer_keeps = tf.placeholder(dtype)            self.vars = utils.init_var_map(init_vars, init_path)            w0 = [self.vars['w0_%d' % i] for i in range(num_inputs)]            b0 = [self.vars['b0_%d' % i] for i in range(num_inputs)]            xw = [tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs)]            x = tf.concat([xw[i] + b0[i] for i in range(num_inputs)], 1)            l = tf.nn.dropout(                utils.activate(x, layer_acts[0]),                self.layer_keeps[0])            w1 = self.vars['w1']            k1 = self.vars['k1']            b1 = self.vars['b1']            z = tf.reduce_sum(tf.reshape(l, [-1, num_inputs, factor_order]), 1)            p = tf.reshape(                tf.matmul(tf.reshape(z, [-1, factor_order, 1]),                          tf.reshape(z, [-1, 1, factor_order])),                [-1, factor_order * factor_order])            l = tf.nn.dropout(                utils.activate(                    tf.matmul(l, w1) + tf.matmul(p, k1) + b1,                    layer_acts[1]),                self.layer_keeps[1])            for i in range(2, len(layer_sizes) - 1):                wi = self.vars['w%d' % i]                bi = self.vars['b%d' % i]                l = tf.nn.dropout(                    utils.activate(                        tf.matmul(l, wi) + bi,                        layer_acts[i]),                    self.layer_keeps[i])            l = tf.reshape(l, [-1])            self.y_prob = tf.sigmoid(l)            self.loss = tf.reduce_mean(                tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y))            if layer_l2 is not None:                # for i in range(num_inputs):                self.loss += layer_l2[0] * tf.nn.l2_loss(tf.concat(xw, 1))                for i in range(1, len(layer_sizes) - 1):                    wi = self.vars['w%d' % i]                    # bi = self.vars['b%d' % i]                    self.loss += layer_l2[i] * tf.nn.l2_loss(wi)            if kernel_l2 is not None:                self.loss += kernel_l2 * tf.nn.l2_loss(k1)            self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)            config = tf.ConfigProto()            config.gpu_options.allow_growth = True            self.sess = tf.Session(config=config)            tf.global_variables_initializer().run(session=self.sess)
阅读全文
'); })();
0 0
原创粉丝点击
热门IT博客
热门问题 老师的惩罚 人脸识别 我在镇武司摸鱼那些年 重生之率土为王 我在大康的咸鱼生活 盘龙之生命进化 天生仙种 凡人之先天五行 春回大明朝 姑娘不必设防,我是瞎子 怎么办国内签证和护照 未办理税务登记怎么办 法院判决不执行怎么办 深圳个人怎么办社保卡 小孩的医保卡怎么办 小孩子的医保卡怎么办 换工作社保卡怎么办 zine一直在同步怎么办 zine没有流量了怎么办 税务登记证过期怎么办 住宿发票丢了怎么办 作废的发票遗失怎么办 已作废发票丢失怎么办 发票作废后丢失怎么办 公司注销了商标怎么办 教师资格证丢了怎么办 脚甲沟炎化脓了怎么办 小孩长青春痘怎么办呢 入网许可证丢了怎么办 胎儿自己流下来怎么办 万达广场会员卡怎么办 我被强制执行了怎么办 微信登录不了怎么办 厨房下水有异味怎么办 玩股票一直赔怎么办 烧伤的疤痕增生怎么办 塑料阀门拧不动怎么办 果树叶子卷起来怎么办 机械表走不准怎么办 机械表不准了怎么办 机械表走时慢怎么办 砂岩浮雕断了怎么办 防盗门锁不好开怎么办 解锁密码忘了怎么办 长虹电视坏了怎么办 云筑网账号忘了怎么办 当销售遇到瓶颈怎么办 海鲜中毒的症状怎么办 论文写不出来怎么办 初中学生成绩差怎么办 准考证号忘了怎么办