hive删除表失败
来源:互联网 发布:php代理ip访问网站 编辑:程序博客网 时间:2024/05/20 14:27
删除hive表报错,报错信息如下
com.mysql.jdbc.PacketTooBigException: Packet for query is too large (1103 > 1024). You can change this value on the server by setting th
at com.mysql.jdbc.MysqlIO.send(MysqlIO.java:3540)
at com.mysql.jdbc.MysqlIO.sendCommand(MysqlIO.java:2417)
at com.mysql.jdbc.MysqlIO.sqlQueryDirect(MysqlIO.java:2582)
at com.mysql.jdbc.ConnectionImpl.execSQL(ConnectionImpl.java:2535)
at com.mysql.jdbc.PreparedStatement.executeInternal(PreparedStatement.java:1911)
at com.mysql.jdbc.PreparedStatement.executeQuery(PreparedStatement.java:2034)
at com.jolbox.bonecp.PreparedStatementHandle.executeQuery(PreparedStatementHandle.java:174)
at org.datanucleus.store.rdbms.ParamLoggingPreparedStatement.executeQuery(ParamLoggingPreparedStatement.java:381)
at org.datanucleus.store.rdbms.SQLController.executeStatementQuery(SQLController.java:504)
at org.datanucleus.store.rdbms.query.JDOQLQuery.performExecute(JDOQLQuery.java:651)
at org.datanucleus.store.query.Query.performDeletePersistentAll(Query.java:2053)
at org.datanucleus.store.query.AbstractJavaQuery.performDeletePersistentAll(AbstractJavaQuery.java:106)
at org.datanucleus.store.query.Query.deletePersistentAll(Query.java:2029)
at org.datanucleus.store.query.Query.deletePersistentAll(Query.java:1985)
at org.datanucleus.api.jdo.JDOQuery.deletePersistentAll(JDOQuery.java:180)
at org.apache.hadoop.hive.metastore.ObjectStore.dropPartitionGrantsNoTxn(ObjectStore.java:4743)
at org.apache.hadoop.hive.metastore.ObjectStore.dropPartitions(ObjectStore.java:1635)
at sun.reflect.GeneratedMethodAccessor53.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:98)
at com.sun.proxy.$Proxy0.dropPartitions(Unknown Source)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.dropPartitionsAndGetLocations(HiveMetaStore.java:1655)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.drop_table_core(HiveMetaStore.java:1505)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.drop_table_with_environment_context(HiveMetaStore.java:1676)
at sun.reflect.GeneratedMethodAccessor51.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:102)
at com.sun.proxy.$Proxy3.drop_table_with_environment_context(Unknown Source)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$drop_table_with_environment_context.getResult(ThriftHiveMe
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$drop_table_with_environment_context.getResult(ThriftHiveMe
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:110)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:106)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1671)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:118)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
)
at com.mysql.jdbc.MysqlIO.send(MysqlIO.java:3540)
at com.mysql.jdbc.MysqlIO.sendCommand(MysqlIO.java:2417)
at com.mysql.jdbc.MysqlIO.sqlQueryDirect(MysqlIO.java:2582)
at com.mysql.jdbc.ConnectionImpl.execSQL(ConnectionImpl.java:2535)
at com.mysql.jdbc.PreparedStatement.executeInternal(PreparedStatement.java:1911)
at com.mysql.jdbc.PreparedStatement.executeQuery(PreparedStatement.java:2034)
at com.jolbox.bonecp.PreparedStatementHandle.executeQuery(PreparedStatementHandle.java:174)
at org.datanucleus.store.rdbms.ParamLoggingPreparedStatement.executeQuery(ParamLoggingPreparedStatement.java:381)
at org.datanucleus.store.rdbms.SQLController.executeStatementQuery(SQLController.java:504)
at org.datanucleus.store.rdbms.query.JDOQLQuery.performExecute(JDOQLQuery.java:651)
at org.datanucleus.store.query.Query.performDeletePersistentAll(Query.java:2053)
at org.datanucleus.store.query.AbstractJavaQuery.performDeletePersistentAll(AbstractJavaQuery.java:106)
at org.datanucleus.store.query.Query.deletePersistentAll(Query.java:2029)
at org.datanucleus.store.query.Query.deletePersistentAll(Query.java:1985)
at org.datanucleus.api.jdo.JDOQuery.deletePersistentAll(JDOQuery.java:180)
at org.apache.hadoop.hive.metastore.ObjectStore.dropPartitionGrantsNoTxn(ObjectStore.java:4743)
at org.apache.hadoop.hive.metastore.ObjectStore.dropPartitions(ObjectStore.java:1635)
at sun.reflect.GeneratedMethodAccessor53.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:98)
at com.sun.proxy.$Proxy0.dropPartitions(Unknown Source)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.dropPartitionsAndGetLocations(HiveMetaStore.java:1655)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.drop_table_core(HiveMetaStore.java:1505)
at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.drop_table_with_environment_context(HiveMetaStore.java:1676)
at sun.reflect.GeneratedMethodAccessor51.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:102)
at com.sun.proxy.$Proxy3.drop_table_with_environment_context(Unknown Source)
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$drop_table_with_environment_context.getResult(ThriftHiveMe
at org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$drop_table_with_environment_context.getResult(ThriftHiveMe
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:110)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:106)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1671)
at org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:118)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:285)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
)
解决办法:
修改hive数据库编码:
mysql> alter database hivedb character set latin1;
然后重启hive metastore和hive server就可以了
mysql> alter database hivedb character set latin1;
然后重启hive metastore和hive server就可以了
0 0
- hive删除表失败
- Hive 删除型DDL 失败
- hive删除表错误
- Hive删除表
- Hive创建TABLE_PARAMS表失败
- Hive创建TABLE_PARAMS表失败
- hive删除表时候失败,For direct MetaStore DB connections, we don't support retries at the client level
- hive 创建/删除/截断表
- Hive 删除行, 表 ,清空类容
- 删除hive表的脚本
- 批量删除hive表分区
- hive 创建/删除/截断 表(翻译自Hive wiki)
- hive 创建/删除/截断 表(翻译自Hive wiki)
- hive 创建/删除/截断 表(翻译自Hive wiki)
- hive 创建/删除/截断 表(翻译自Hive wiki)
- hive 创建/删除/截断 表(翻译自Hive wiki)
- Hive 表被锁定,无法删除表
- hive 表数据加载、表删除试验
- Spring mvc 动态数据源的配置
- 黑马程序员--OC的Foundation框架之日期
- JSONObject与JSONArray的使用
- hadoop全分布式环境的搭建
- not exists 和not in
- hive删除表失败
- easyui.datagrid
- linux centos 安装mysql-5.6
- SSL与TLS的区别以及介绍
- CentOS VirtualBox虚拟机配置访问Virtual Box虚拟机共享文件夹
- openstack 网络相关问题的解决
- 【游戏科普】使用心理学法则创建优秀的用户界面
- zookeeper集群安装
- 支付宝支付设计和开发方案