beeline -u jdbc:hive2://127.0.0.1:10000 连接失败

来源:2-3 【项目文档】环境部署步骤

慕沐218169

2024-09-20

[root@flinkriskproject imooc-dmp-env]# docker-compose ps
WARN[0000] network default: network.external.name is deprecated in favor of network.name
NAME COMMAND SERVICE STATUS PORTS
clickhouse “/entrypoint.sh” clickhouse running 0.0.0.0:8123->8123/tcp, 0.0.0.0:9011->9000/tcp, :::8123->8123/tcp, :::9011->9000/tcp
datanode1 “/entrypoint.sh /run…” datanode1 running (healthy) 0.0.0.0:50175->50075/tcp, 0.0.0.0:50110->50110/tcp, :::50175->50075/tcp, :::50110->50110/tcp
datanode2 “/entrypoint.sh /run…” datanode2 running (healthy) 0.0.0.0:50275->50075/tcp, 0.0.0.0:50210->50210/tcp, :::50275->50075/tcp, :::50210->50210/tcp
datanode3 “/entrypoint.sh /run…” datanode3 running (healthy) 0.0.0.0:50375->50075/tcp, 0.0.0.0:50310->50310/tcp, :::50375->50075/tcp, :::50310->50310/tcp
es “/docker-entrypoint.…” es running 0.0.0.0:9201->9200/tcp, 0.0.0.0:9301->9300/tcp, :::9201->9200/tcp, :::9301->9300/tcp
hbase-master “/entrypoint.sh /run…” hbase-master running 0.0.0.0:8765->8765/tcp, 0.0.0.0:16000->16000/tcp, 0.0.0.0:16010->16010/tcp, :::8765->8765/tcp, :::16000->16000/tcp, :::16010->16010/tcp
hbase-regionserver-1 “/entrypoint.sh /run…” hbase-regionserver-1 running 0.0.0.0:16120->16120/tcp, 0.0.0.0:16130->16130/tcp, :::16120->16120/tcp, :::16130->16130/tcp
hbase-regionserver-2 “/entrypoint.sh /run…” hbase-regionserver-2 running 0.0.0.0:16220->16220/tcp, 0.0.0.0:16230->16230/tcp, :::16220->16220/tcp, :::16230->16230/tcp
hbase-regionserver-3 “/entrypoint.sh /run…” hbase-regionserver-3 running 0.0.0.0:16320->16320/tcp, 0.0.0.0:16330->16330/tcp, :::16320->16320/tcp, :::16330->16330/tcp
historyserver “/entrypoint.sh /run…” historyserver running (healthy) 0.0.0.0:8188->8188/tcp, :::8188->8188/tcp
hive-metastore “entrypoint.sh /opt/…” hive-metastore exited (1)
hive-server “entrypoint.sh /bin/…” hive-server running 0.0.0.0:10000->10000/tcp, 0.0.0.0:10002->10002/tcp, :::10000->10000/tcp, :::10002->10002/tcp
mysql “docker-entrypoint.s…” mysql running 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp
namenode “/entrypoint.sh /run…” namenode running (healthy) 0.0.0.0:8020->8020/tcp, 0.0.0.0:9000->9000/tcp, 0.0.0.0:50070->50070/tcp, :::8020->8020/tcp, :::9000->9000/tcp, :::50070->50070/tcp
nginx “/docker-entrypoint.…” nginx running 0.0.0.0:80->80/tcp, :::80->80/tcp
nodemanager1 “/entrypoint.sh /run…” nodemanager1 running (healthy) 0.0.0.0:8140->8040/tcp, 0.0.0.0:8142->8042/tcp, 0.0.0.0:41655->46655/tcp, :::8140->8040/tcp, :::8142->8042/tcp, :::41655->46655/tcp
nodemanager2 “/entrypoint.sh /run…” nodemanager2 running (healthy) 0.0.0.0:8240->8040/tcp, 0.0.0.0:8242->8042/tcp, 0.0.0.0:42655->46655/tcp, :::8240->8040/tcp, :::8242->8042/tcp, :::42655->46655/tcp
nodemanager3 “/entrypoint.sh /run…” nodemanager3 running (healthy) 0.0.0.0:8340->8040/tcp, 0.0.0.0:8342->8042/tcp, 0.0.0.0:43655->46655/tcp, :::8340->8040/tcp, :::8342->8042/tcp, :::43655->46655/tcp
phoenix “/run-phoenix-server…” phoenix running 0.0.0.0:8766->8765/tcp, :::8766->8765/tcp
redis “docker-entrypoint.s…” redis running 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp
resourcemanager “/entrypoint.sh /run…” resourcemanager running (healthy) 0.0.0.0:8030-8033->8030-8033/tcp, 0.0.0.0:8088->8088/tcp, :::8030-8033->8030-8033/tcp, :::8088->8088/tcp
spark-master “/bin/bash /master.sh” spark-master running 0.0.0.0:7077->7077/tcp, 0.0.0.0:8180->8080/tcp, :::7077->7077/tcp, :::8180->8080/tcp
spark-worker-1 “/bin/bash /worker.sh” spark-worker-1 running 0.0.0.0:8181->8081/tcp, :::8181->8081/tcp
spark-worker-2 “/bin/bash /worker.sh” spark-worker-2 running 0.0.0.0:8182->8081/tcp, :::8182->8081/tcp
spark-worker-3 “/bin/bash /worker.sh” spark-worker-3 running 0.0.0.0:8183->8081/tcp, :::8183->8081/tcp
zoo1 “/docker-entrypoint.…” zoo1 running 0.0.0.0:2182->2181/tcp, :::2182->2181/tcp
zoo2 “/docker-entrypoint.…” zoo2 running 0.0.0.0:2183->2181/tcp, :::2183->2181/tcp
zoo3 “/docker-entrypoint.…” zoo3 running 0.0.0.0:2184->2181/tcp, :::2184->2181/tcp
[root@flinkriskproject imooc-dmp-env]# docker exec -it hive-sever bash
Error response from daemon: No such container: hive-sever
[root@flinkriskproject imooc-dmp-env]# docker exec -it hive-server bash
root@hive-server:/opt# beeline -u jdbc:hive2://127.0.0.1:10000
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/opt/hive/lib/log4j-slf4j-impl-2.6.2.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/opt/hadoop-2.7.4/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Connecting to jdbc:hive2://127.0.0.1:10000
24/09/20 16:39:33 [main]: WARN jdbc.HiveConnection: Failed to connect to 127.0.0.1:10000
Could not open connection to the HS2 server. Please check the server URI and if the URI is correct, then ask the administrator to check the server status.
Error: Could not open client transport with JDBC Uri: jdbc:hive2://127.0.0.1:10000: java.net.ConnectException: Connection refused (Connection refused) (state=08S01,code=0)
Beeline version 2.3.2 by Apache Hive
beeline> show databases;
No current connection

写回答

1回答

小简同学

2024-09-24

同学你好,连接失败检查几个方面,hive ,hadoop ,mysql 是否都正常安装和启动,文档有搭建的演示动画,可以参考一下
0
0

Spark+ES+ClickHouse 构建DMP用户画像

大数据主流技术,数据挖掘核心算法,用户画像完整知识轻松掌握

305 学习 · 219 问题

查看课程