In addition to Weibo, there is also WeChat
Please pay attention
WeChat public account
Shulou
2025-04-12 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Internet Technology >
Share
Shulou(Shulou.com)06/03 Report--
Required jar package
I. URL API operation mode
Import java.io.InputStream;import java.net.URL;import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;import org.apache.hadoop.io.IOUtils;import org.junit.Test;public class HDFSUrlTest {/ * HDFS URL API operation mode * there is no need to read core-site.xml and hdfs-site.xml configuration files * / / Let the JAVA program recognize HDFS's URL static {URL.setURLStreamHandlerFactory (new FsUrlStreamHandlerFactory ()) } / / View the file content @ Test public void testRead () throws Exception {InputStream in = null; / / file path String fileUrl = "hdfs://hadoop-master.dragon.org:9000/opt/data/test/01.data"; try {/ / get the file input stream in = new URL (fileUrl) .openStream () / / read out the contents of the file and print the console IOUtils.copyBytes (in, System.out, 4096, false);} finally {IOUtils.closeStream (in);}
Second, operate HDFS through FileSystem API
HDFS utility class
Import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FileSystem;public class HDFSUtils {/ * HDFS utility class * / public static FileSystem getFileSystem () {/ / declare FileSystem FileSystem hdfs=null; try {/ / get file configuration information Configuration conf = new Configuration () / / get file system hdfs=FileSystem.get (conf);} catch (IOException e) {e.printStackTrace ();} return hdfs;}}
Common operation implementation class
Import org.apache.hadoop.fs.BlockLocation;import org.apache.hadoop.fs.FSDataInputStream;import org.apache.hadoop.fs.FSDataOutputStream;import org.apache.hadoop.fs.FileStatus;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.hdfs.DistributedFileSystem;import org.apache.hadoop.hdfs.protocol.DatanodeInfo;import org.apache.hadoop.io.IOUtils;import org.apache.hadoop.mapred.gethistory_jsp;import org.junit.Test Public class HDFSFsTest {/ * * read file contents @ Test public void testRead () throws Exception through FileSystem API operation HDFS * / / get file system FileSystem hdfs = HDFSUtils.getFileSystem (); / / file name Path path = new Path ("/ opt/data/test/touch.data") / / Open the file input stream FSDataInputStream inStream = hdfs.open (path); / / read the file to the console to display IOUtils.copyBytes (inStream, System.out, 4096, false); / / close the stream IOUtils.closeStream (inStream);} / / View the directory @ Test public void testList () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem () / / File name Path path = new Path ("/ opt/data"); FileStatus [] fileStatus = hdfs.listStatus (path); for (FileStatus file: fileStatus) {Path p = file.getPath (); String info = file.isDir ()? "directory": "file"; System.out.println (info + ":" + p);}} / create directory @ Test public void testDirectory () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem (); / / directory to be created Path path = new Path ("/ opt/data/dir"); boolean isSuccessful = hdfs.mkdirs (path) / / equivalent to mkdir-p / opt/data/dir String info = isSuccessful under linux? "success": "failure"; System.out.println ("create directory [" + path + "]" + info);} / / upload files-- put copyFromLocal @ Test public void testPut () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem (); / / local files (directory + file name) Path srcPath = new Path ("c:/0125.log") / / hdfs file upload path Path dstPath = new Path ("/ opt/data/dir/"); hdfs.copyFromLocalFile (srcPath, dstPath);} / create hdfs file and write content @ Test public void testCreate () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem (); Path path = new Path ("/ opt/data/dir/touch.data") / / create a file and get the output stream FSDataOutputStream fSDataOutputStream = hdfs.create (path); / / write data fSDataOutputStream.write ("Hello" .getBytes ()) through the output stream; fSDataOutputStream.writeUTF ("hello hadoop!"); IOUtils.closeStream (fSDataOutputStream);} / / File rename @ Test public void testRename () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem () Path oldPath = newPath ("/ opt/data/dir/touch.data"); Path newPath = newPath ("/ opt/data/dir/rename.data"); boolean flag = hdfs.rename (oldPath, newPath); System.out.println (flag);} / / Delete file public void testDelete () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem () Path path = new Path ("/ opt/data/dir/touch.data"); boolean flag = hdfs.deleteOnExit (path); System.out.println (flag);} / / Delete directory public void testDeleteDir () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem (); Path path = new Path ("/ opt/data/dir"); boolean flag = hdfs.delete (path, true) / / if the directory second parameter must be true System.out.println (flag);} / / find the location of a file in the hdfs cluster public void testLocation () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem (); Path path = new Path ("/ opt/data/test.file"); FileStatus fileStatus = hdfs.getFileStatus (path) BlockLocation [] blockLocations = hdfs.getFileBlockLocations (fileStatus, 0, fileStatus.getLen ()); for (BlockLocation blockLocation: blockLocations) {String [] hosts = blockLocation.getHosts (); for (String host: hosts) {System.out.print (host + ");} System.out.println () }} / / get all node name information on hdfs cluster public void testCluster () throws Exception {FileSystem hdfs = HDFSUtils.getFileSystem (); DistributedFileSystem distributedFileSystem = (DistributedFileSystem) hdfs; DatanodeInfo [] datanodeInfos = distributedFileSystem.getDataNodeStats (); for (DatanodeInfo datanodeInfo: datanodeInfos) {String hostName = datanodeInfo.getHostName (); System.out.println (hostName);}
Upload and merge small files to hdfs
Implementation idea: loop through the local file input stream
Import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataInputStream;import org.apache.hadoop.fs.FSDataOutputStream;import org.apache.hadoop.fs.FileStatus;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path / * in the process of uploading copied files to hdfs, merge the file names on the file * * / public class PutMerge {/ * * @ param localDir * the local directory of files to be uploaded * @ param hdfsFile * HDFS Include path * / public static void put (String localDir, String hdfsFile) throws Exception {/ / get configuration information Configuration conf = new Configuration () Path localPath = new Path (localDir); Path hdfsPath = new Path (hdfsFile); / / get the local file system FileSystem localFs = FileSystem.getLocal (conf); / / get HDFS FileSystem hdfs = FileSystem.get (conf); / / all files in the specified directory of the local file system FileStatus [] status = localFs.listStatus (localPath) / / Open the output stream of the file on hdfs FSDataOutputStream fSDataOutputStream = hdfs.create (hdfsPath); / / Loop through the local file for (FileStatus fileStatus: status) {/ / get the file Path path = fileStatus.getPath (); System.out.println ("File is:" + path.getName ()) / / Open the file input stream FSDataInputStream fSDataInputStream = localFs.open (path); / / read and write the stream byte [] buff = new byte [1024]; int len = 0; while ((len = fSDataInputStream.read (buff)) > 0) {fSDataOutputStream.write (buff, 0, len) } fSDataInputStream.close ();} fSDataOutputStream.close ();} public static void main (String [] args) {String localDir= "D:/logs"; String hdfsFile= "hdfs://hadoop-master.dragon.org:9000/opt/data/logs.data"; try {put (localDir,hdfsFile) } catch (Exception e) {e.printStackTrace ();}
Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.
Views: 0
*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.
Continue with the installation of the previous hadoop.First, install zookooper1. Decompress zookoope
"Every 5-10 years, there's a rare product, a really special, very unusual product that's the most un
© 2024 shulou.com SLNews company. All rights reserved.