bigdata program
bigdata program
Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
class MatrixMultiplication {
public static class MatrixMultiplicationMapper extends Mapper<Object, Text, Text, IntWritable> {
private static final int A = 0;
private static final int B = 1;
@Override
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString().trim();
String[] elements = line.split("\\s+");
if (key.toString().contains("A")) {
for (int k = 0; k < elements.length; k++) {
context.write(new Text("A#" + key.toString().split("_")[0] + "," + k), new
IntWritable(Integer.parseInt(elements[k])));
}
} else if (key.toString().contains("B")) {
for (int j = 0; j < elements.length; j++) {
context.write(new Text("B#" + j + "," + key.toString().split("_")[1]), new
IntWritable(Integer.parseInt(elements[j])));
}
}
}
}
int sum = 0;
for (int i = 0; i < A_values.size(); i++) {
sum += A_values.get(i) * B_values.get(i);
}
job.setJarByClass(MatrixMultiplicationDriver.class);
job.setMapperClass(MatrixMultiplicationMapper.class);
job.setReducerClass(MatrixMultiplicationReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.io.LongWritable;
import java.io.IOException;
import java.util.StringTokenizer;
import java.util.Iterator;
@Override
public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter
reporter) throws IOException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
@Override
public void close() throws IOException {}
}
@Override
public void configure(JobConf job) {}
@Override
public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
result.set(sum);
output.collect(key, result);
}
@Override
public void close() throws IOException {}
}
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
JobClient.runJob(job);
}
}