0%

大数据作业

hadoop1

建议执行前先打快照,我原来就是不知道怎么折腾把环境搞崩了,然后又折腾花了好久

java代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class WordCount {
public WordCount() {
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = (new GenericOptionsParser(conf, args)).getRemainingArgs();
if(otherArgs.length < 2) {
System.err.println("Usage: wordcount <in> [<in>...] <out>");
System.exit(2);
}
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(WordCount.TokenizerMapper.class);
job.setCombinerClass(WordCount.IntSumReducer.class);
job.setReducerClass(WordCount.IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
for(int i = 0; i < otherArgs.length - 1; ++i) {
FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
}
FileOutputFormat.setOutputPath(job, new Path(otherArgs[otherArgs.length - 1]));
System.exit(job.waitForCompletion(true)?0:1);
}
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
private static final IntWritable one = new IntWritable(1);
private Text word = new Text();
public TokenizerMapper() {
}
public void map(Object key, Text value, Mapper<Object, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while(itr.hasMoreTokens()) {
this.word.set(itr.nextToken());
context.write(this.word, one);
}
}
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public IntSumReducer() {
}
public void reduce(Text key, Iterable<IntWritable> values, Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
int sum = 0;
IntWritable val;
for(Iterator i$ = values.iterator(); i$.hasNext(); sum += val.get()) {
val = (IntWritable)i$.next();
}
this.result.set(sum);
context.write(key, this.result);
}
}
}

选择File->Export->一系列next,最后要选择Main_Class

20191025211017021_1663740123
导出jar后,scp传上去

1
scp wordCounter.jar hadoop@192.168.150.133:/usr/local/hadoop

启动hadoop

1
./sbin/start-all.sh

创建文件夹

1
2
./bin/hdfs dfs -mkdir /test/input
./bin/hdfs dfs -mkdir /test/output

移动文件到目录下

1
./bin/hdfs dfs -mv 你的单词文件 /test/input

可以看到目录下有wordCounter.jar

1
2
hadoop@dblab-VirtualBox:/usr/local/hadoop$ ls
bin etc include lib libexec LICENSE.txt logs NOTICE.txt README.txt sbin share tmp wordCounter.jar word.txt

执行hadoop

1
/bin/hadoop jar wordCounter.jar '/test/input' '/test/ouput'

查看结果

1
2
3
4
5
6
7
./bin/hdfs dfs -cat /test/ouput/part-r-00000
game 1
hadoop 2
i 5
love 5
mapreduce 2
this 1

自行配置eclipse自动补全

20191025214212938_984636996

配置自动补全

.abcdefghijklmnopqrstuvwxyz(,

本文作者:NoOne
本文地址https://noonegroup.xyz/posts/c690256a/
版权声明:转载请注明出处!