lgfj1

来源:互联网 发布:json.parse无效字符 编辑:程序博客网 时间:2024/06/05 20:22
# -*- coding: utf-8 -*-"""Created on Thu Aug 24 15:14:07 2017@author: Administrator"""import pymongofrom pymongo import MongoClientimport numpy as npimport pandas as  pdfrom pandas import DataFrame,Seriesfrom numpy import row_stack,column_stackfrom dateutil.parser import parsefrom matplotlib.pylab import date2numimport random#导入经度和纬度#从公司的数据库中导入数据client1 = MongoClient('192.168.0.xxx',xxxxx)db1 = client1.fangjiaseaweed1 = db1.seaweed#print(seaweed.find_one({"city":"上海","region":"浦东","name":"康桥半岛二期"},{"lat2":1,"lng2":1}))'''print(seaweed.find_one({"city":"上海","region":"浦东",                        "name":{"$in":["康桥半岛二期","康桥半岛三期","绿洲清水湾","中邦城市"]}}                        ,{"lat2":1,"lng2":1}))'''query1 = {"status":0,"cat":"district","city":"上海","region":"浦东", "name":{"$in":["康桥半岛二期","康桥半岛三期","绿洲清水湾","中邦城市"]}}fields1 = {"lat2":1,"lng2":1, "city":1,"region":1,"cat":1,"name":1}lct= list()for s in seaweed.find(query1, fields1):    lct.append(s)lf=DataFrame(lct)le=lf    le.index=le['name'] lr=le[['lng2','lat2']]#从公司的数据库中导入数据client = MongoClient('192.168.xx.xx',xxxxxx)db = client.fangjiaseawater = db.seawaterseawater.find_one()# 索引数据库里的数据query = {"city":"上海","cat":"sell","region":"浦东",         "district_name":{"$in":["康桥半岛二期","康桥半岛三期","绿洲清水湾","中邦城市"]},         "p_date":{"$gt":20160508}}lt= seawater.count(query)print(lt)pos = list()#数据转化为数组,数组的元素为字典for s in seawater.find(query).limit(lt-1):    pos.append(s)#将数据转化为  DataFramedata=DataFrame(pos)data.to_excel('data.xls')#需要提取的特征choose_class=['total_price','area','height','room',             'direction','hall','toilet','fitment','district_name','p_date'             ]dc=data[choose_class]dc['lng2']=0dc['lat2']=1'''for i in range(dc.shape[0]):    bn=dc['district_name']    p=bn[i]    dc['lng2'][i]=lo['lng2'][p]'''for i in range(dc.shape[0]):    if dc['district_name'][i]==lr.index[0]:        dc['lng2'][i]=lr['lng2'][0]        dc['lat2'][i]=lr['lat2'][0]    elif dc['district_name'][i]==lr.index[1]:        dc['lng2'][i]=lr['lng2'][1]        dc['lat2'][i]=lr['lat2'][1]    elif dc['district_name'][i]==lr.index[2]:        dc['lng2'][i]=lr['lng2'][2]        dc['lat2'][i]=lr['lat2'][2]    elif dc['district_name'][i]==lr.index[3]:        dc['lng2'][i]=lr['lng2'][3]        dc['lat2'][i]=lr['lat2'][3]#将'total_price' 转化为均价,并把均价赋值给'total_price'mean_price=dc['total_price']/dc['area']dc['total_price']=mean_price #将'total_price' 转化为均价#这段代码用于把时间转化成一个连续的数,至于是否有效有待观察####################h=dc['p_date']for i in range(1,len(h)):       a=int(h[i])        b=str(a)        c=parse(b)            e = date2num(c)        h[i]=e dc['p_date']=h################### dc.to_excel('dc.xls')'''#给每个小区赋予一个标签for i in dc['district_name'].index :    if dc['district_name'][i]=='康桥半岛二期':        dc['district_name'][i]=0    elif dc['district_name'][i]=='康桥半岛三期':        dc['district_name'][i]=1    elif dc['district_name'][i]=='绿洲清水湾':        dc['district_name'][i]=2    elif dc['district_name'][i]=='中邦城市':         dc['district_name'][i] =3'''for i in dc['direction'].index:    if ('南' in str(dc['direction'][i])) :        dc['direction'][i]=0    else:        dc['direction'][i]=1for i in dc['fitment'].index:    if ('豪' or '精') in str(dc['fitment'][i]) :        dc['fitment'][i]=0    else :        dc['fitment'][i]=1dc=dc.fillna({'height':dc['height'].mean(),              'room':dc['room'].mean(),              'toilet':dc['toilet'].mean(),              'hall':dc['hall'].mean(),              })ds=dc.drop('district_name',axis=1)data_all = ds.drop([0],axis=0)sample_number=data_all.shape[0]kk=int(0.05 *sample_number)test_label=[random.randint(1,sample_number) for _ in range(kk)]data_train= data_all.drop(test_label,axis=0)#data_train.to_excel('data_train.xls')data_max = data_train.max()data_min = data_train.min()data_train1 = (data_train-data_min)/(data_max-data_min+0.2) #数据标准化#knife=int(0.95*(data_train.shape[0]))#用于切割数据80%用于训练,20%用于计算x_train = data_train1.iloc[:,1:11].as_matrix() #训练样本标签列y_train = data_train1.iloc[:,0:1].as_matrix() #训练样本特征from keras.models import Sequentialfrom keras.layers.core import Dense, Dropout, Activationmodel = Sequential() #建立模型model.add(Dense(input_dim = 10, output_dim = 48)) #添加输入层、隐藏层的连接model.add(Activation('relu')) #以Relu函数为激活函数model.add(Dense(input_dim = 48, output_dim = 100)) #添加隐藏层、隐藏层的连接model.add(Activation('relu')) #以Relu函数为激活函数model.add(Dense(input_dim = 100, output_dim = 50)) #添加隐藏层、隐藏层的连接model.add(Activation('relu')) #以Relu函数为激活函数model.add(Dense(input_dim = 50, output_dim = 36)) #添加隐藏层、隐藏层的连接model.add(Activation('relu')) #以Relu函数为激活函数model.add(Dense(input_dim = 36, output_dim = 12)) #添加隐藏层、隐藏层的连接model.add(Activation('relu')) #以Relu函数为激活函数model.add(Dense(input_dim = 12, output_dim = 12)) #添加隐藏层、隐藏层的连接model.add(Activation('relu')) #以Relu函数为激活函数model.add(Dense(input_dim = 12, output_dim = 1)) #添加隐藏层、输出层的连接model.add(Activation('sigmoid')) #以sigmoid函数为激活函数#编译模型,损失函数为binary_crossentropy,用adam法求解model.compile(loss='mean_squared_error', optimizer='adam')model.fit(x_train, y_train, nb_epoch = 300, batch_size = 2) #训练模型model.save_weights('net.model') #保存模型参数test=data_all.ix[test_label,:]#test_max = test.max()#test_min = test.min()data_test = (test-data_min)/(data_max-data_min+0.2) x_test = data_test.iloc[:,1:11].as_matrix()y_test = data_test.iloc[:,0:1].as_matrix()#x_test = data_train1.iloc[knife:,1:9].as_matrix() #训练样本标签列#y_test = data_train1.iloc[knife:,0:1].as_matrix() #训练样本特征r = (model.predict(x_test))rt=r*(data_max.values-data_min.values+0.2)+data_min.values#print(rt.round(2))predict=rt[:,0:1]realvalue= test.iloc[:,0:1].as_matrix()error=abs((predict-realvalue)/realvalue)*100geek=column_stack((predict,realvalue,error))DataFrame(geek).to_excel('geek.xls')print(geek)print('平均计算误差:','%.2f'%error.mean(),'%')
原创粉丝点击