我是hadoop初学者。我想了解mapreduce函数的流程。我有点困惑如何通过代码而不是任何文件向 map 作业提供输入。我应该如何配置它。请帮助我。这是我的代码。
public class TemperatureMapper : MapperBase
{
private static int MISSING = 9999;
public override void Map(string line, MapperContext context)
{
//Extract the namespace declarations in the Csharp files
string year = line.Substring(15, 4);
int startIndex = line[87] == '+'?88 : 87;
int airTemp = int.Parse(line.Substring(startIndex, 92 - startIndex));
string quality = line.Substring(92, 1);
Regex r = new Regex(quality, RegexOptions.IgnoreCase);
Match m = r.Match("[01459]");
if (airTemp != MISSING && r.Match("[01459]").Success)
{
context.EmitKeyValue(year.ToString(), airTemp.ToString());
}
}
}
//Reducer
public class TempReducer : ReducerCombinerBase
{
//Accepts each key and count the occurrances
public override void Reduce(string key, IEnumerable<string> values,ReducerCombinerContext context)
{
//Write back
int maxvalue = int.MinValue;
foreach(string value in values)
{
maxvalue = Math.Max(maxvalue, int.Parse(value));
}
context.EmitKeyValue(key, maxvalue.ToString());
}
}
static void Main(string[] args)
{
try
{
string line;
StreamReader file = new StreamReader("temp.txt");
ArrayList al = new ArrayList();
while ((line = file.ReadLine()) != null)
{
al.Add(line);
}
file.Close();
string[] input = (string[])al.ToArray(typeof(string));
Environment.SetEnvironmentVariable("HADOOP_HOME", @"c:\hadoop");
Environment.SetEnvironmentVariable("Java_HOME", @"c:\hadoop\jvm");
var output = StreamingUnit.Execute<TemperatureMapper, TempReducer>(input);//this code is executed successfully
//runnning the job in azure
var hadoop = Hadoop.Connect(); // connected to hadoop successfully
var config = new HadoopJobConfiguration();
hadoop.MapReduceJob.Execute<TemperatureMapper, TempReducer>(config);//how to I provide input here...
Console.ReadLine();
}
我通过流单元得到了正确的结果。现在,我想在 azure 中执行这项工作。那么如何通过代码而不是文件提供输入呢? 我已经通过配置给出了输入,即
config.AdditionalStreamingArguments.AddRange(input); //input is array of string
但是当我执行作业时出现此异常:
The argument must not be empty string.
Parameter name: blobName
最佳答案
默认情况下,Hadoop MapReduce 仅对文件进行操作(您可以编写不基于文件的存储处理程序,但这并不常见)。
如果您尝试将 MapReduce 应用于流中生成的内容,而不是 HDFS 上文件中存在的内容,您可能需要查看 Storm on YARN 之类的内容。
关于c# - 通过代码向hadoop作业提供输入,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/23487046/