|
| 1 | +#include <iostream> |
| 2 | +#include <fstream> |
| 3 | +#include <vector> |
| 4 | +#include <stdexcept> |
| 5 | + |
| 6 | +#include <boost/lexical_cast.hpp> |
| 7 | +#include <boost/algorithm/string.hpp> |
| 8 | +#include <boost/filesystem/operations.hpp> |
| 9 | + |
| 10 | +#include <pfp/config.h> |
| 11 | +#include <pfp/tokenizer.h> |
| 12 | +#include <pfp/state_list.hpp> |
| 13 | +#include <pfp/lexicon.hpp> |
| 14 | +#include <pfp/unary_grammar.hpp> |
| 15 | +#include <pfp/binary_grammar.hpp> |
| 16 | +#include <pfp/binary_grammar.hpp> |
| 17 | +#include <pfp/pcfg_parser.hpp> |
| 18 | + |
| 19 | +using namespace com::wavii::pfp; |
| 20 | +using namespace boost; |
| 21 | +namespace fs = boost::filesystem; |
| 22 | + |
| 23 | +template<class T> |
| 24 | +void load(T & obj, boost::filesystem::path p) |
| 25 | +{ |
| 26 | + if (!fs::exists(p)) |
| 27 | + throw std::runtime_error("can't find " + p.string()); |
| 28 | + std::ifstream in(p.string().c_str()); |
| 29 | + obj.load(in); |
| 30 | +} |
| 31 | + |
| 32 | +int main(int argc, char * argv[]) |
| 33 | +{ |
| 34 | + std::clog << "pfpc: command line interface for pfp!" << std::endl; |
| 35 | + std::clog << "build: " << __DATE__ << " (" << __TIME__ << ") of pfp version " << consts::version << " (c) Wavii,Inc. 2010" << std::endl; |
| 36 | + std::clog << "usage: " << argv[0] << " <max sentence length=45> <data dir=/usr/share/pfp/>" << std::endl; |
| 37 | + |
| 38 | + size_t sentence_length = argc < 2 ? 45 : lexical_cast<size_t>(argv[1]); |
| 39 | + std::string data_dir = argc < 3 ? "/usr/share/pfp/" : argv[2]; // make install copies files to /usr/share/pfp by default |
| 40 | + |
| 41 | + tokenizer tokenizer; |
| 42 | + state_list states; |
| 43 | + lexicon lexicon(states); |
| 44 | + unary_grammar ug(states); |
| 45 | + binary_grammar bg(states); |
| 46 | + pcfg_parser pcfg(states, ug, bg); |
| 47 | + |
| 48 | + std::clog << "loading lexicon and grammar" << std::endl; |
| 49 | + load(tokenizer, fs::path(data_dir) / "americanizations"); |
| 50 | + load(states, fs::path(data_dir) / "states"); |
| 51 | + { |
| 52 | + fs::path ps[] = { fs::path(data_dir) / "words", fs::path(data_dir) / "sigs", fs::path(data_dir) / "word_state", fs::path(data_dir) / "sig_state" }; |
| 53 | + std::ifstream ins[4]; |
| 54 | + for (int i = 0; i != 4; ++i) |
| 55 | + { |
| 56 | + if (!fs::exists(ps[i])) |
| 57 | + throw std::runtime_error("can't find " + ps[i].string()); |
| 58 | + ins[i].open(ps[i].string().c_str()); |
| 59 | + } |
| 60 | + lexicon.load(ins[0], ins[1], ins[2], ins[3]); |
| 61 | + } |
| 62 | + load(ug, fs::path(data_dir) / "unary_rules"); |
| 63 | + load(bg, fs::path(data_dir) / "binary_rules"); |
| 64 | + workspace w(sentence_length, states.size()); |
| 65 | + |
| 66 | + std::vector< std::string > words; |
| 67 | + std::clog << "ready! enter lines to parse:" << std::endl; |
| 68 | + for (std::string word; std::getline(std::cin, word); ) { |
| 69 | + boost::trim(word); |
| 70 | + if (word == "") |
| 71 | + break; |
| 72 | + words.push_back(word); |
| 73 | + } |
| 74 | + |
| 75 | + std::vector< std::pair< state_t, float > > state_weight; |
| 76 | + std::vector< std::vector< state_score_t > > sentence_f; |
| 77 | + node result; |
| 78 | + // tokenizer.tokenize(sentence, words); |
| 79 | + |
| 80 | + for (std::vector< std::string >::const_iterator it = words.begin(); it != words.end(); ++it) |
| 81 | + { |
| 82 | + state_weight.clear(); lexicon.score(*it, std::back_inserter(state_weight)); |
| 83 | + sentence_f.push_back(std::vector< state_score_t >(state_weight.size())); |
| 84 | + // scale by score_resolution in case we are downcasting our weights |
| 85 | + for (size_t i = 0; i != state_weight.size(); ++i) |
| 86 | + sentence_f.back()[i] = state_score_t(state_weight[i].first, state_weight[i].second * consts::score_resolution); |
| 87 | + } |
| 88 | + // add the boundary symbol |
| 89 | + sentence_f.push_back( std::vector< state_score_t >(1, state_score_t(consts::boundary_state, 0.0f))); |
| 90 | + // and parse! |
| 91 | + if (!pcfg.parse(sentence_f, w, result)) |
| 92 | + std::cout << "ERRROR!" << std::endl; |
| 93 | + // stitch together the results |
| 94 | + std::ostringstream oss; |
| 95 | + std::vector< std::string >::iterator word_it = words.begin(); |
| 96 | + stitch(oss, result, word_it, states); |
| 97 | + std::cout << oss.str() << std::endl; |
| 98 | +} |
0 commit comments