将文档的分词结果转化为非严格的UCI格式,即没有前三行的统计信息,因为这三行在LightLDA中没有使用到。
分词后结果的格式
doc1seqword1 word2 word3...
doc2seqword2 word3...
doc3seqword2 word3
seq是自定义的分割符,分割文档名和文档内容的分词结果,作为参数传入函数
/**
* 将文本文件转化为UCI模式
* @param seq 文档与分词结果的分隔符
* @param filePath input 文本路径
* @param docwordPath output 转化后的文档信息存放路径
* @param vocabPath output 转化后的词汇信息存放路径
* @throws Exception
*/
public static void text2UCI(String seq, String filePath, String docwordPath, String vocabPath) throws Exception{
//先将分词结果中的各个词打乱赋予id存放到单词表中
//再对文档进行处理
//文档需要根据文档id排序,wordid从1开始
//单词不能有空格或tab
BufferedReader br = new BufferedReader(new FileReader(filePath));
BufferedWriter vocab_bw = new BufferedWriter(new FileWriter(vocabPath));
BufferedWriter doc_bw = new BufferedWriter(new FileWriter(docwordPath));
HashSet<String> vocabs = new HashSet<>();
String doc = null;
while((doc = br.readLine()) != null){
doc = doc.split(seq)[1];
doc = doc.trim();
if(doc.length() == 0){
continue;
}
String[] words = doc.split(" +"); //按空格分组
for(String word : words){
vocabs.add(word);
}
}
br.close();
List<String> vocab_list = new ArrayList<>(vocabs);
HashMap<String, Integer> vocab_id = new HashMap<>();
int id = 1;
for(String token : vocab_list){
vocab_id.put(token, id++);
vocab_bw.write(token);
vocab_bw.newLine();
vocab_bw.flush(); //写入到词汇表文件
}
vocab_bw.close();
//防止文件过大,就不用mark和reset了
br = new BufferedReader(new FileReader(filePath));
int doc_id = 1; //从1开始
while((doc = br.readLine()) != null){
doc = doc.split(seq)[1];
doc = doc.trim();
if(doc.length() == 0) continue;
String[] words = doc.split(" +"); //按空格分组
int[] ids = new int[words.length];
for(int i = 0; i < words.length; ++i){
ids[i] = vocab_id.get(words[i]); //存放id
}
Arrays.sort(ids);
HashMap<Integer, Integer> id_cnt = new HashMap<>();
for(int word_id : ids){
int value = id_cnt.containsKey(word_id) ? id_cnt.get(word_id) + 1 : 1;
id_cnt.put(word_id, value); //更新值
}
StringBuilder doc_info = new StringBuilder();
doc_info.append(doc_id).append(" ").append(ids[0]).append(" ").append(id_cnt.get(ids[0])).append("\n");
for(int i = 1; i < ids.length; ++i){
if(ids[i] == ids[i-1]){
continue;
}
doc_info.append(doc_id).append(" ").append(ids[i]).append(" ").append(id_cnt.get(ids[i])).append("\n");
}
doc_bw.write(doc_info.toString());
doc_bw.flush();
doc_id++;
}
br.close();
doc_bw.close();
System.out.println("vocab size : " + vocab_list.size());
System.out.println("doc size: " + (doc_id - 1));
}