利用Java連接Hadoop進行編程
實驗環(huán)境
- hadoop版本:3.3.2
- jdk版本:1.8
- hadoop安裝系統(tǒng):ubuntu18.04
- 編程環(huán)境:IDEA
- 編程主機:windows
實驗內(nèi)容
測試Java遠程連接hadoop
創(chuàng)建maven工程,引入以下依賴:
<dependency> <groupId>org.testng</groupId> <artifactId>testng</artifactId> <version>RELEASE</version> <scope>compile</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>3.3.2</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>3.3.2</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-core</artifactId> <version>1.2.1</version> </dependency>
虛擬機的/etc/hosts配置
hdfs-site.xml配置
<configuration> <property> <name>dfs.replication</name> <value>1</value> </property> <property> <name>dfs.namenode.name.dir</name> <value>file:/root/rDesk/hadoop-3.3.2/tmp/dfs/name</value> </property> <property> <name>dfs.datanode.http.address</name> <value>VM-12-11-ubuntu:50010</value> </property> <property> <name>dfs.client.use.datanode.hostname</name> <value>true</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/root/rDesk/hadoop-3.3.2/tmp/dfs/data</value> </property> </configuration>
core-site.xml配置
<configuration> <property> <name>hadoop.tmp.dir</name> <value>file:/root/rDesk/hadoop-3.3.2/tmp</value> <description>Abase for other temporary directories.</description> </property> <property> <name>fs.defaultFS</name> <value>hdfs://VM-12-11-ubuntu:9000</value> </property> </configuration>
啟動hadoop
sbin/start-dfs.sh
主機的hosts(C:\Windows\System32\drivers\etc)文件配置
嘗試連接到虛擬機的hadoop并讀取文件內(nèi)容,這里我讀取hdfs下的/root/iinput文件內(nèi)容
Java代碼:
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; public class TestConnectHadoop { public static void main(String[] args) throws Exception { String hostname = "VM-12-11-ubuntu"; String HDFS_PATH = "hdfs://" + hostname + ":9000"; Configuration conf = new Configuration(); conf.set("fs.defaultFS", HDFS_PATH); conf.set("fs.hdfs.impl", DistributedFileSystem.class.getName()); conf.set("dfs.client.use.datanode.hostname", "true"); FileSystem fs = FileSystem.get(conf); FileStatus[] fileStatuses = fs.listStatus(new Path("/")); for (FileStatus fileStatus : fileStatuses) { System.out.println(fileStatus.toString()); } FileStatus fileStatus = fs.getFileStatus(new Path("/root/iinput")); System.out.println(fileStatus.getOwner()); System.out.println(fileStatus.getGroup()); System.out.println(fileStatus.getPath()); FSDataInputStream open = fs.open(fileStatus.getPath()); byte[] buf = new byte[1024]; int n = -1; StringBuilder sb = new StringBuilder(); while ((n = open.read(buf)) > 0) { sb.append(new String(buf, 0, n)); } System.out.println(sb); } }
運行結(jié)果:
編程實現(xiàn)一個類“MyFSDataInputStream”,該類繼承“org.apache.hadoop.fs.FSDataInputStream",要求如下: ①實現(xiàn)按行讀取HDFS中指定文件的方法”readLine()“,如果讀到文件末尾,則返回為空,否則返回文件一行的文本
思路:emmm我的思路比較簡單,只適用于該要求,僅作參考。
將所有的數(shù)據(jù)讀取出來存儲起來,然后根據(jù)換行符進行拆分,將拆分的字符串數(shù)組存儲起來,用于readline返回
Java代碼
import org.apache.hadoop.fs.FSDataInputStream; import java.io.IOException; import java.io.InputStream; public class MyFSDataInputStream extends FSDataInputStream { private String data = null; private String[] lines = null; private int count = 0; private FSDataInputStream in; public MyFSDataInputStream(InputStream in) throws IOException { super(in); this.in = (FSDataInputStream) in; init(); } private void init() throws IOException { byte[] buf = new byte[1024]; int n = -1; StringBuilder sb = new StringBuilder(); while ((n = this.in.read(buf)) > 0) { sb.append(new String(buf, 0, n)); } data = sb.toString(); lines = data.split("\n"); } /** * 實現(xiàn)按行讀取HDFS中指定文件的方法”readLine()“,如果讀到文件末尾,則返回為空,否則返回文件一行的文本 */ public String read_line() { return count < lines.length ? lines[count++] : null; } }
測試類:
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; public class TestConnectHadoop { public static void main(String[] args) throws Exception { String hostname = "VM-12-11-ubuntu"; String HDFS_PATH = "hdfs://" + hostname + ":9000"; Configuration conf = new Configuration(); conf.set("fs.defaultFS", HDFS_PATH); conf.set("fs.hdfs.impl", DistributedFileSystem.class.getName()); conf.set("dfs.client.use.datanode.hostname", "true"); FileSystem fs = FileSystem.get(conf); FileStatus fileStatus = fs.getFileStatus(new Path("/root/iinput")); System.out.println(fileStatus.getOwner()); System.out.println(fileStatus.getGroup()); System.out.println(fileStatus.getPath()); FSDataInputStream open = fs.open(fileStatus.getPath()); MyFSDataInputStream myFSDataInputStream = new MyFSDataInputStream(open); String line = null; int count = 0; while ((line = myFSDataInputStream.read_line()) != null ) { System.out.printf("line %d is: %s\n", count++, line); } System.out.println("end"); } }
運行結(jié)果:
②實現(xiàn)緩存功能,即利用”MyFSDataInputStream“讀取若干字節(jié)數(shù)據(jù)時,首先查找緩存,如果緩存中有所需要數(shù)據(jù),則直接由緩存提供,否則從HDFS中讀取數(shù)據(jù)
import org.apache.hadoop.fs.FSDataInputStream; import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; public class MyFSDataInputStream extends FSDataInputStream { private BufferedInputStream buffer; private String[] lines = null; private int count = 0; private FSDataInputStream in; public MyFSDataInputStream(InputStream in) throws IOException { super(in); this.in = (FSDataInputStream) in; init(); } private void init() throws IOException { byte[] buf = new byte[1024]; int n = -1; StringBuilder sb = new StringBuilder(); while ((n = this.in.read(buf)) > 0) { sb.append(new String(buf, 0, n)); } //緩存數(shù)據(jù)讀取 buffer = new BufferedInputStream(this.in); lines = sb.toString().split("\n"); } /** * 實現(xiàn)按行讀取HDFS中指定文件的方法”readLine()“,如果讀到文件末尾,則返回為空,否則返回文件一行的文本 */ public String read_line() { return count < lines.length ? lines[count++] : null; } @Override public int read() throws IOException { return this.buffer.read(); } public int readWithBuf(byte[] buf, int offset, int len) throws IOException { return this.buffer.read(buf, offset, len); } public int readWithBuf(byte[] buf) throws IOException { return this.buffer.read(buf); } }
到此這篇關(guān)于利用Java連接Hadoop進行編程的文章就介紹到這了,更多相關(guān)Java連接Hadoop內(nèi)容請搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!
相關(guān)文章
Java基礎(chǔ)之內(nèi)部類與代理知識總結(jié)
今天帶大家復習Java的基礎(chǔ)知識,文中有非常詳細的介紹及圖文示例,對正在學習Java的小伙伴們很有幫助,需要的朋友可以參考下2021-06-06IDEA巧用Postfix Completion讓碼速起飛(小技巧)
這篇文章主要介紹了IDEA巧用Postfix Completion讓碼速起飛,文中通過示例代碼介紹的非常詳細,對大家的學習或者工作具有一定的參考學習價值,需要的朋友們下面隨著小編來一起學習學習吧2020-08-08Java操作數(shù)據(jù)庫(行級鎖,for update)
這篇文章主要介紹了Java操作數(shù)據(jù)庫(行級鎖,for update),文章圍繞Java操作數(shù)據(jù)庫的相關(guān)資料展開詳細內(nèi)容,需要的小伙伴可以參考一下,希望對你有所幫助2021-12-12SpringBoot集成EasyExcel實現(xiàn)Excel導入的方法
這篇文章主要介紹了SpringBoot集成EasyExcel實現(xiàn)Excel導入的方法,本文給大家介紹的非常詳細,對大家的學習或工作具有一定的參考借鑒價值,需要的朋友可以參考下2021-01-01零基礎(chǔ)寫Java知乎爬蟲之獲取知乎編輯推薦內(nèi)容
上篇文章我們拿百度首頁做了個小測試,今天我們來個復雜的,直接抓取知乎編輯推薦的內(nèi)容,小伙伴們可算松了口氣,終于進入正題了,哈哈。2014-11-11