diff --git a/cs236/Makefile b/cs236/Makefile new file mode 100644 index 0000000..4d23986 --- /dev/null +++ b/cs236/Makefile @@ -0,0 +1,63 @@ +CXXFLAGS= -Wall -g -std=c++0x -I . + +lexor_objs=labs/lab01.o \ + lexer/lexi.o \ + lexer/token.o + +parser_objs=labs/lab02.o \ + lexer/lexi.o \ + lexer/token.o \ + parser/parser.o \ + +rdbms_objs=labs/lab03.o \ + lexer/lexi.o \ + lexer/token.o \ + parser/parser.o \ + +lab01=bin/lab01 +lab02=bin/lab02 +lab03=bin/lab03 + +all: $(lab01) $(lab02) $(lab03) + +$(lab01): $(lexor_objs) + $(CXX) $(CXXFLAGS) $(lexor_objs) -o $@ + +$(lab02): $(parser_objs) + $(CXX) $(CXXFLAGS) $^ -o $@ + +$(lab03): $(rdbms_objs) + $(CXX) $(CXXFLAGS) $^ -o $@ + +labs/lab01.o: labs/lab01.cpp lexer/util.h lexer/lexi.h lexer/token.h +lexer/lexi.o: lexer/lexi.cpp lexer/lexi.h +lexer/token.o: lexer/token.h lexer/token.cpp + +labs/lab02.o: labs/lab02.cpp lexer/util.h lexer/lexi.h lexer/token.h \ + parser/scheme.h parser/fact.h parser/rule.h parser/query.h\ + parser/predicate.h parser/parameter.h parser/parser.o + +labs/lab03.o: labs/lab02.cpp lexer/util.h lexer/lexi.h lexer/token.h \ + parser/scheme.h parser/fact.h parser/rule.h parser/query.h\ + parser/predicate.h parser/parameter.h parser/parser.o rdbms/db.h \ + rdbms/relation.h rdbms/schema.h rdbms/Tuple.h +parser/parser.o: parser/parser.h parser/parser.cpp + +clean: + @rm -vf **/*.o + @rm -vf $(EXE) + @rm -vf **/*.1 + @rm -vf **/*.0 + @rm -vf test + @rm -rvf **/*.dSYM + @rm -vf output.txt + @rm -vf bin/* + +drun: main + gdb ./main + +valgrind: $(EXE) + valgrind --tool=memcheck --leak-check=yes ./$(EXE) input.txt output.txt + +pmc: **/*.h **/*.cpp + pmccabe **/*.h **/*.cpp diff --git a/cs236/bin/.hgdir b/cs236/bin/.hgdir new file mode 100644 index 0000000..e69de29 diff --git a/cs236/labs/lab01.cpp b/cs236/labs/lab01.cpp new file mode 100644 index 0000000..30cd696 --- /dev/null +++ b/cs236/labs/lab01.cpp @@ -0,0 +1,19 @@ +#include +#include "lexer/lexi.h" +#include "lexer/util.h" +#include "lexer/token.h" + +const string usage = "usage: app "; + +int main(int argc, char* argv[]) { + if(argc != 3) { + cerr << usage << endl; + return 1; + } + get_file_name(argv[1]); + vector data = open_file(argv[1]); + lexi l; + string temp = argv[2]; + l.lexical_analyzer(data, temp); + cout << "getting called here in lab 1" << endl; +} diff --git a/cs236/labs/lab02.cpp b/cs236/labs/lab02.cpp new file mode 100644 index 0000000..3ce3a04 --- /dev/null +++ b/cs236/labs/lab02.cpp @@ -0,0 +1,30 @@ +#include +#include "lexer/lexi.h" +#include "lexer/util.h" +#include "lexer/token.h" +#include "parser/parser.h" + +const string usage = "usage: app "; + +int main(int argc, char* argv[]) { + if(argc != 3) { + cerr << usage << endl; + return 1; + } + get_file_name(argv[1]); + vector data = open_file(argv[1]); + lexi l; + string temp = argv[2]; + vector s = l.lexical_analyzer(data, temp); + parser p; + p.tokens = s; + try { + p.check_datalog(); + string out = p.out(); + write_file(out, argv[2]); + } catch(string str) { + stringstream s; + s << "Failure!\n " << str; + write_file(s.str(), argv[2]); + } +} diff --git a/cs236/labs/lab03.cpp b/cs236/labs/lab03.cpp new file mode 100644 index 0000000..d7267ef --- /dev/null +++ b/cs236/labs/lab03.cpp @@ -0,0 +1,32 @@ +#include +#include "lexer/lexi.h" +#include "lexer/util.h" +#include "lexer/token.h" +#include "parser/parser.h" +#include "rdbms/db.h" + +const string usage = "usage: app "; + +int main(int argc, char* argv[]) { + if(argc != 3) { + cerr << usage << endl; + return 1; + } + get_file_name(argv[1]); + vector data = open_file(argv[1]); + lexi l; + string temp = argv[2]; + vector s = l.lexical_analyzer(data, temp); + parser p; + p.tokens = s; + try { + p.check_datalog(); + string out = p.out(); + write_file(out, argv[2]); + } catch(string str) { + stringstream s; + s << "Failure!\n " << str; + write_file(s.str(), argv[2]); + } + db database(p); +} diff --git a/cs236/lexer/lexi.cpp b/cs236/lexer/lexi.cpp new file mode 100644 index 0000000..fc3fd5f --- /dev/null +++ b/cs236/lexer/lexi.cpp @@ -0,0 +1,361 @@ +#include "lexi.h" + +vector lexi::lexical_analyzer(vector data, string file_name) { + string cur_string; + string next_character; + for(unsigned int i = 0; i < data.size(); i++) { + for(unsigned int j = 0; j < data[i].size(); j ++) { + cur_string = data[i].at(j); + if(j < data[i].size() - 1) { + next_character = data[i].at(j + 1); + } + else { + next_character = ""; + } + string state = determiner(cur_string, next_character); + simple_state(data, state, cur_string, next_character, i, j); + simple_state_string(data, state, cur_string, next_character, i, j); + if(state == "start of string") { + string token_symbol = string_finder(data, i, j); + if(token_symbol != "error") { + token_symbol.erase(0,1); + token t("STRING", token_symbol, i + 1); + tokens.push_back(t); + } + else { + write_to_file(file_name, i + 1); + return tokens; + } + } + simple_comment(data, state, cur_string, next_character, i, j); + simple_id(data, state, cur_string, next_character, i, j); + if(state == "error") { + write_to_file(file_name, i + 1); + return tokens; + } + } + } + write_to_file(file_name); + return tokens; +} + +void lexi::write_to_file(string file_name, int line) { + ofstream myfile; + myfile.open(file_name.c_str()); + myfile << "Error on line " << line << endl; + myfile.close(); +} + +void lexi::write_to_file(string file_name) { + ofstream myfile; + myfile.open(file_name.c_str()); + for(unsigned int i = 0; i < tokens.size(); i++) { + if(i < tokens.size()) { + myfile << tokens[i] << endl; + } + else { + myfile << tokens[i]; + } + } + myfile << "Total Tokens = " << tokens.size(); + myfile << endl; + myfile.close(); +} + + +bool lexi::simple_comment(vector & data, string state, string cur_string, string next_character, int i, int j) { + if(state == "comment") { + string token_symbol = comment_finder(data, i, j); + } + return true; +} + +bool lexi::simple_id(vector & data, string state, string cur_string, string next_character, int i, int j) { + if(state == "id") { + string token_symbol = id_finder(data, i, j); + if(token_symbol != "error") { + token t("ID", token_symbol, i + 1); + tokens.push_back(t); + } + } + return true; +} + +bool lexi::simple_state_string(vector & data, string state, string cur_string, string next_character, int i, int j) { + if(state == "simple_string") { + string token_symbol = det_type_simple_string(data, i, j); + if(token_symbol != "wrong") { + string token_type = type_simple_string(token_symbol); + token t(token_type, token_symbol, i + 1); + tokens.push_back(t); + } + else { + string token_symbol = id_finder(data, i, j); + if(token_symbol != "error") { + token t("ID", token_symbol, i + 1); + tokens.push_back(t); + } + } + } + return true; +} + +bool lexi::simple_state(vector & data, string state, string cur_string, string next_character, int i, int j) { + if(state == "simple") { + string token_symbol = type_simple(cur_string, next_character); + if(next_character == "-") { + data[i].replace(j,2, " "); + } + else { + data[i].replace(j,1, " "); + } + string token_id = type_simple_caps(cur_string, next_character); + token t(token_id, token_symbol, i + 1); + tokens.push_back(t); + } + return true; +} + +string lexi::determiner(string cur_string, string next_character) { + if(det_help_simple(cur_string, next_character)) { + return "simple"; + } + else if(det_help_simple_string(cur_string, next_character)) { + return "simple_string"; + } + else if(det_help_id(cur_string)) { + return "id"; + } + else if(cur_string == "'") { + return "start of string"; + } + else if(cur_string == "#") { + return "comment"; + } + else { + string temp = incorrect(cur_string); + return temp; + } + return ""; +} + +bool lexi::det_help_id(string cur_string) { + if(('A' <= cur_string[0] && cur_string[0] <= 'Z') || + ('a' <= cur_string[0] && cur_string[0] <= 'z')) { + return true; + } + return false; +} + +bool lexi::quick_help(string a, string b) { + if(a == "S" && b == "c") { + return true; + } + return false; +} + +bool lexi::det_help_simple_string(string cur_string, string next_character) { + if(quick_help(cur_string, next_character)) { + return true; + } + else if((cur_string == "Q" && next_character == "u")) { + return true; + } + else if((cur_string == "R" && next_character == "u")) { + return true; + } + else if((cur_string == "F" && next_character == "a")) { + return true; + } + return false; +} + +bool lexi::det_help_simple(string cur_string, string next_character) { + if(cur_string == "," || cur_string == "." || cur_string == "?" || + cur_string == "(" || cur_string == ")" || cur_string == ":") { + type_simple(cur_string, next_character); + return true; + } + return false; +} + +string lexi::incorrect(string cur_string) { + if(cur_string == " " || cur_string == "\t") { + return "fine"; + } + else if(!(('A' <= cur_string[0] && cur_string[0] <= 'Z') || + ('a' <= cur_string[0] && cur_string[0] <= 'z'))) { + return "error"; + } + return " "; +} + +string lexi::id_finder(vector & data, int a, int b) { + string cur_string; + string next_character; + for(unsigned int j = b; j < data[a].size(); j++) { + cur_string += data[a].at(j); + if(j < data[a].size() - 1) { + next_character = data[a].at(j + 1); + } + else { + next_character = "!"; + } + if(is_char_valid(next_character[0]) || next_character == "!") { + data[a].replace(data[a].begin() + b, data[a].begin() + j + 1, " "); + return cur_string; + } + } + return " "; +} + +string lexi::comment_finder(vector & data, int i, int b) { + string cur_string; + string next_character; + for(unsigned int j = b; j < data[i].size(); j++) { + cur_string += data[i].at(j); + if(j < data[i].size() - 1) { + next_character = data[i].at(j + 1); + } + else { + next_character = "!"; + } + if((j > data[i].size()) - 1 && next_character != "!") { + data[i].replace(data[i].begin() + b, data[i].end(), " "); + return cur_string; + } + } + return "error"; +} + +string lexi::string_finder(vector & data, int a, int b) { + string cur_string; + string next_character; + b = data[a].find('\''); + for(unsigned int j = b; j < data[a].size(); j++) { + cur_string += data[a].at(j); + if(j < data[a].size() - 1) { + next_character = data[a].at(j + 1); + } + if(next_character == "'") { + data[a].replace(data[a].begin() + b, data[a].begin() + j + 2, " "); + data[a].insert(data[a].begin() + b, ' '); + return cur_string; + } + } + return "error"; +} + +string lexi::type_simple_caps(string symbol, string next_symbol) { + if(symbol == ",") { + return "COMMA"; + } + else if(symbol == ".") { + return "PERIOD"; + } + else if(symbol == "?") { + return "Q_MARK"; + } + else if(symbol == "(") { + return "LEFT_PAREN"; + } + else if(symbol == ")") { + return "RIGHT_PAREN"; + } + else if(symbol == ":") { + if(next_symbol == "-") { + return "COLON_DASH"; + } + return "COLON"; + } + return ""; +} + +string lexi::type_simple(string symbol, string next_symbol) { + if(symbol == ",") { + return ","; + } + else if(symbol == ".") { + return "."; + } + else if(symbol == "?") { + return "?"; + } + else if(symbol == "(") { + return "("; + } + else if(symbol == ")") { + return ")"; + } + else if(symbol == ":") { + if(next_symbol == "-") { + return ":-"; + } + return ":"; + } + return ""; +} + +string lexi::det_type_simple_string(vector & data, int i, int b) { + string cur_string; + string next_character; + string special_case; + if(b > 0) { + special_case = data[i].at(b -1); + } + for(unsigned int j = b; j < data[i].size(); j++) { + cur_string += data[i].at(j); + if(j < data[i].size() - 1) { + next_character = data[i].at(j + 1); + } + else { + next_character = "!"; + } + if((is_simple_string(cur_string)) && (is_char_valid(next_character.at(0))) && (is_char_valid(special_case[0]))) { + data[i].replace(data[i].begin() + b, data[i].begin() + j + 1, " "); + return cur_string; + } + } + return "wrong"; +} + +bool lexi::is_char_valid(char next_character) { + if(!(('A' <= next_character && next_character <= 'Z') || + ('a' <= next_character && next_character <= 'z') || + ('0' <= next_character && next_character <= '9')) || (next_character == '\'')) { + return true; + } + return false; +} + +bool lexi::is_simple_string(string simple_com) { + if(simple_com == "Schemes") { + return true; + } + else if(simple_com == "Facts") { + return true; + } + else if(simple_com == "Rules") { + return true; + } + else if(simple_com == "Queries") { + return true; + } + return false; +} + +string lexi::type_simple_string(string simple_com) { + if(simple_com == "Schemes") { + return "SCHEMES"; + } + else if(simple_com == "Facts") { + return "FACTS"; + } + else if(simple_com == "Rules") { + return "RULES"; + } + else if(simple_com == "Queries") { + return "QUERIES"; + } + return ""; +} diff --git a/cs236/lexer/lexi.h b/cs236/lexer/lexi.h new file mode 100644 index 0000000..979b6e0 --- /dev/null +++ b/cs236/lexer/lexi.h @@ -0,0 +1,44 @@ +#ifndef __LEXI_H__ +#define __LEXI_H__ + +#include +#include +#include +#include "token.h" +#include +#include +#include + + +using namespace std; + +class lexi { + public: + lexi(){} + vector tokens; + string determiner(string, string); + vector lexical_analyzer(vector, string); + string type_simple(string, string); + string type_simple_string(string); + bool is_simple_string(string); + string det_type_simple_string(vector&, int, int); + bool is_char_valid(char); + string string_finder(vector&, int, int); + string comment_finder(vector&, int, int); + string id_finder(vector&, int, int); + string incorrect(string); + bool det_help_simple(string, string); + bool det_help_simple_string(string, string); + bool quick_help(string, string); + bool det_help_id(string); + string type_simple_caps(string, string); + bool simple_state(vector&, string, string, string, int, int); + bool simple_state_string(vector&, string, string, string, int, int); + bool simple_id(vector&, string, string, string, int, int); + bool simple_comment(vector&, string, string, string, int, int); + void write_to_file(string); + void write_to_file(string, int); +}; + +#endif + diff --git a/cs236/lexer/token.cpp b/cs236/lexer/token.cpp new file mode 100644 index 0000000..9a5cdb8 --- /dev/null +++ b/cs236/lexer/token.cpp @@ -0,0 +1,11 @@ +#include "token.h" + +token::token(string type, string character, int line_num) : + type(type), character(character), line_num(line_num) {} + +ostream & operator<<(ostream & os, token tk) { + os << "(" << tk.type + << ",\"" << tk.character + << "\"," << tk.line_num << ")"; + return os; +} diff --git a/cs236/lexer/token.h b/cs236/lexer/token.h new file mode 100644 index 0000000..d832eb3 --- /dev/null +++ b/cs236/lexer/token.h @@ -0,0 +1,16 @@ +#ifndef __TOKEN_H__ +#define __TOKEN_H__ + +#include + +using namespace std; + +class token { + public: + token(string, string, int); + string type; + string character; + int line_num; + friend ostream & operator<<(ostream & os, token tk); +}; +#endif diff --git a/cs236/lexer/util.h b/cs236/lexer/util.h new file mode 100644 index 0000000..e17ff8b --- /dev/null +++ b/cs236/lexer/util.h @@ -0,0 +1,56 @@ +#ifndef __UTIL_H__ +#define __UTIL_H__ +#include +#include +#include + +vector open_file(string file_name) { + ifstream myfile; + vector data; + myfile.open(file_name.c_str()); + string temp; + while(!myfile.eof()) { + getline(myfile, temp); + data.push_back(temp); + } + myfile.close(); + return data; +} + +bool get_file_name(string input) { + bool file_correct = false; + string input_file_name; + while(!file_correct) { + ifstream inputs(input.c_str()); + if(inputs.good()) { + input_file_name = input; + file_correct = true; + open_file(input_file_name); + return true; + } + else { + cerr << "incorrect file name" << endl; + return false; + } + } + return false; +} + +void write_file(string output, string file_name) { + ofstream myfile; + myfile.open(file_name.c_str()); + myfile << output << "\n"; +} + +void write_file(vector output, string file_name) { + ofstream myfile; + myfile.open(file_name.c_str()); + for(unsigned int i = 0; i < output.size(); i++) { + if(i != output.size() -1) { + myfile << output[i] << "\n"; + } + myfile << output[i]; + } +} + +#endif diff --git a/cs236/parser/fact.h b/cs236/parser/fact.h new file mode 100644 index 0000000..2e5ea35 --- /dev/null +++ b/cs236/parser/fact.h @@ -0,0 +1,14 @@ +#ifndef __FACT_H__ +#define __FACT_H__ + +#include "predicate.h" +#include +#include + +using namespace std; + +class fact { + public: + vector pred_facts; +}; +#endif diff --git a/cs236/parser/parameter.h b/cs236/parser/parameter.h new file mode 100644 index 0000000..1e223b0 --- /dev/null +++ b/cs236/parser/parameter.h @@ -0,0 +1,13 @@ +#ifndef __PARAMETER_H__ +#define __PARAMETER_H__ + +#include + +using namespace std; + +class parameter { + public: + string param; + string type; +}; +#endif diff --git a/cs236/parser/parser.cpp b/cs236/parser/parser.cpp new file mode 100644 index 0000000..04bd379 --- /dev/null +++ b/cs236/parser/parser.cpp @@ -0,0 +1,200 @@ +#include "parser.h" + +string parser::get_token() { + string type = tokens[0].type; + return type; +} + +void parser::check_datalog() { + match("SCHEMES"); + match("COLON"); + if(get_token() == "FACTS") { + error(); + } + check_schemelist(get_token()); + match("FACTS"); + match("COLON"); + check_factlist(get_token()); + match("RULES"); + match("COLON"); + check_rulelist(get_token()); + match("QUERIES"); + match("COLON"); + check_querylist(get_token()); + out(); +} + +string parser::out() { + stringstream s; + s << "Success!" << endl; + s << "Schemes(" << schemelist.size() << "):" << endl; + for(unsigned int i = 0; i < schemelist.size(); i++) { + s << " " << schemelist[i].toString(); + } + s << "Facts(" << factlist.size() << "):" << endl; + for(unsigned int i = 0; i < factlist.size(); i++) { + s << " " << factlist[i].toString(false); + } + s << "Rules(" << rulelist.size() << "):" << endl; + for(unsigned int i = 0; i < rulelist.size(); i++) { + s << " " << rulelist[i].toString(); + } + s << "Queries(" << querylist.size() << "):" << endl; + double a = 0; + for(unsigned int i = 0; i < querylist.size(); i++) { + s << " " << querylist[i].toString(a); + } + s << "Domain(" << domain.size() << "):" << endl; + for (auto it=domain.cbegin(); it != domain.cend(); ++it) { + s << " '" << *it << "'" << endl; + } + return s.str(); +} + +void parser::check_schemelist(string type) { + if(type == "FACTS") { + return; + } + else { + check_scheme(type); + check_schemelist(get_token()); + } +} + +void parser::check_scheme(string type) { + schemelist.push_back(check_predicate(type)); +} + +void parser::check_factlist(string type) { + if(type == "RULES") { + return; + } + else { + check_fact(type); + check_factlist(get_token()); + } +} + +void parser::check_fact(string type) { + factlist.push_back(check_predicate(type)); + match("PERIOD"); +} + +void parser::check_rulelist(string type) { + if(type == "QUERIES") { + return; + } + else { + check_rule(type); + check_rulelist(get_token()); + } +} + +void parser::check_rule(string type) { + rule r; + r.head = check_predicate(type); + match("COLON_DASH"); + check_predicate_list(get_token(), r); + match("PERIOD"); + rulelist.push_back(r); +} + +void parser::check_querylist(string type) { + check_query(type); + if(tokens.empty()) { + return; + } + else { + check_querylist(get_token()); + } +} + +void parser::check_query(string type) { + querylist.push_back(check_predicate(type)); + match("Q_MARK"); +} + +void parser::check_predicate_list(string type, rule& r) { + r.pred_rule.push_back(check_predicate(type)); + if(get_token() == "COMMA") { + match("COMMA"); + check_predicate_list(get_token(), r); + } + else { + return; + } +} + +predicate parser::check_predicate(string type) { + predicate pred; + pred.id = tokens[0].character; + match("ID"); + match("LEFT_PAREN"); + if(get_token() == "RIGHT_PAREN") { + error(); + } + check_parameterlist(get_token(), pred); + match("RIGHT_PAREN"); + return pred; +} + +void parser::check_parameterlist(string type, predicate& pred) { + if(type == "RIGHT_PAREN") { + return; + } + else { + check_parameter(type, pred); + if(get_token() == "COMMA") { + match("COMMA"); + if(get_token() == "RIGHT_PAREN") { + error(); + } + check_parameterlist(get_token(), pred); + } + else { + return; + } + } +} + +void parser::check_parameter(string type, predicate& pred) { + parameter para; + if(type == "STRING") { + domain.insert(tokens[0].character); + para.param = tokens[0].character; + para.type = tokens[0].type; + pred.pred_list.push_back(para); + match("STRING"); + return; + } + else if(type == "ID") { + para.param = tokens[0].character; + para.type = tokens[0].type; + pred.pred_list.push_back(para); + match("ID"); + return; + } + else { + error(); + } +} + +void parser::match(string type) { + if(get_token() == type) { + if(tokens.empty()) { + error(); + } + else { + tokens.erase(tokens.begin()); + } + } + else { + error(); + } +} + +void parser::error() { + stringstream oss; + oss << tokens[0] << endl; + throw oss.str(); +} diff --git a/cs236/parser/parser.h b/cs236/parser/parser.h new file mode 100644 index 0000000..58b10c1 --- /dev/null +++ b/cs236/parser/parser.h @@ -0,0 +1,49 @@ +#ifndef __PARSER_H__ +#define __PARSER_H__ + +#include +#include +#include +#include + +#include "../lexer/token.h" +#include "scheme.h" +#include "fact.h" +#include "rule.h" +#include "query.h" +#include "predicate.h" +#include "parameter.h" + +using namespace std; + +class parser { + public: + parser() {} + vector tokens; + set domain; + vector schemelist; + vector factlist; + vector querylist; + vector predlist; + vector rulelist; + + string get_token(); + void check_datalog(); + void check_schemelist(string); + void check_scheme(string); + void check_factlist(string); + void check_fact(string); + void check_rulelist(string); + void check_rule(string); + void check_querylist(string); + void check_query(string); + void check_predicate_list(string, rule&); + predicate check_predicate(string); + void check_parameterlist(string type, predicate&); + void check_parameter(string, predicate&); + void match(string); + void error(); + string out(); + +}; +#endif diff --git a/cs236/parser/predicate.h b/cs236/parser/predicate.h new file mode 100644 index 0000000..cde0d7d --- /dev/null +++ b/cs236/parser/predicate.h @@ -0,0 +1,83 @@ +#ifndef __PREDICATE_H__ +#define __PREDICATE_H__ + +#include "parameter.h" +#include +#include + +using namespace std; + +class predicate { + public: + string id; + vector pred_list; + string toString() { + //schemes + stringstream s; + s << id << "("; + for(unsigned int i = 0; i < pred_list.size(); i++) { + s << pred_list[i].param; + if(i < pred_list.size()-1) { + s << ","; + } + } + s << ")\n"; + return s.str(); + } + string toString(bool a) { + //facts + stringstream s; + s << id << "("; + for(unsigned int i = 0; i < pred_list.size(); i++) { + s << "'" << pred_list[i].param << "'"; + if(i < pred_list.size()-1) { + s << ","; + } + } + s << ").\n"; + return s.str(); + } + string toString(double a) { + //query + stringstream s; + s << id << "("; + for(unsigned int i = 0; i < pred_list.size(); i++) { + if(pred_list[i].type == "STRING") { + s << "'" << pred_list[i].param << "'"; + if(i < pred_list.size()-1) { + s << ","; + } + } + if(pred_list[i].type == "ID") { + s << pred_list[i].param; + if(i < pred_list.size()-1) { + s << ","; + } + } + } + s << ")?\n"; + return s.str(); + } + string toString(int a) { + //rules + stringstream s; + s << id << "("; + for(unsigned int i = 0; i < pred_list.size(); i++) { + if(pred_list[i].type == "STRING") { + s << "'" << pred_list[i].param << "'"; + if(i < pred_list.size()-1) { + s << ","; + } + } + if(pred_list[i].type == "ID") { + s << pred_list[i].param; + if(i < pred_list.size()-1) { + s << ","; + } + } + } + s << ")"; + return s.str(); + } +}; +#endif diff --git a/cs236/parser/query.h b/cs236/parser/query.h new file mode 100644 index 0000000..159e407 --- /dev/null +++ b/cs236/parser/query.h @@ -0,0 +1,14 @@ +#ifndef __QUERY_H__ +#define __QUERY_H__ + +#include "predicate.h" +#include +#include + +using namespace std; + +class query { + public: + vector pred_queries; +}; +#endif diff --git a/cs236/parser/rule.h b/cs236/parser/rule.h new file mode 100644 index 0000000..f4945a1 --- /dev/null +++ b/cs236/parser/rule.h @@ -0,0 +1,27 @@ +#ifndef __RULE_H__ +#define __RULE_H__ + +#include "predicate.h" +#include +#include + +using namespace std; + +class rule { + public: + predicate head; + vector pred_rule; + string toString() { + stringstream s; + s << head.toString(1) << " :- "; + for(unsigned int i = 0; i < pred_rule.size(); i++) { + s << pred_rule[i].toString(1); + if(i < pred_rule.size()-1) { + s << ","; + } + } + s << ".\n"; + return s.str(); + } +}; +#endif diff --git a/cs236/parser/scheme.h b/cs236/parser/scheme.h new file mode 100644 index 0000000..236a5c7 --- /dev/null +++ b/cs236/parser/scheme.h @@ -0,0 +1,14 @@ +#ifndef __SCHEME_H__ +#define __SCHEME_H__ + +#include "predicate.h" +#include +#include + +using namespace std; + +class scheme { + public: + vector pred_schemes; +}; +#endif diff --git a/cs236/rdbms/Tuple.h b/cs236/rdbms/Tuple.h new file mode 100644 index 0000000..5bbc3bd --- /dev/null +++ b/cs236/rdbms/Tuple.h @@ -0,0 +1,10 @@ +#ifndef __TUPLE_H__ +#define __TUPLE_H__ + +#include +#include + +class Tuple: public vector { +}; + +#endif diff --git a/cs236/rdbms/db.h b/cs236/rdbms/db.h new file mode 100644 index 0000000..7a62e1f --- /dev/null +++ b/cs236/rdbms/db.h @@ -0,0 +1,45 @@ +#ifndef __DB_H__ +#define __DB_H__ + +#include "parser/parser.h" +#include "relation.h" + +class db { + public: + db(parser incoming) { + p = incoming; + setup(); + } + parser p; + vector relations; + + + void setup() { + for(unsigned int i = 0; i < p.schemelist.size(); i++) { + relation r; + r.name = p.schemelist[i].id; + for(unsigned int j = 0; j < p.schemelist[i].pred_list.size(); j++) { + r.schemas.s.push_back(p.schemelist[i].pred_list[j].param); + } + for(unsigned int k = 0; k < p.factlist.size(); k++) { + if(r.name == p.factlist[k].id) { + Tuple t; + for(unsigned int l = 0; l < p.factlist[k].pred_list.size(); k++) { + t.push_back(p.factlist[k].pred_list[l].param); + } + r.tuples.insert(t); + } + } + } + } + + //set domain; + //vector schemelist; + //vector factlist; + //vector querylist; + //vector predlist; + //vector rulelist; + +}; + +#endif diff --git a/cs236/rdbms/relation.h b/cs236/rdbms/relation.h new file mode 100644 index 0000000..e6a7b2e --- /dev/null +++ b/cs236/rdbms/relation.h @@ -0,0 +1,15 @@ +#ifndef __RELATION_H__ +#define __RELATION_H__ + +#include "Tuple.h" +#include "schema.h" +#include + +class relation { + public: + string name; + schema schemas; + set tuples; +}; + +#endif diff --git a/cs236/rdbms/schema.h b/cs236/rdbms/schema.h new file mode 100644 index 0000000..253735d --- /dev/null +++ b/cs236/rdbms/schema.h @@ -0,0 +1,11 @@ +#ifndef __SCHEMA_H__ +#define __SCHEMA_H__ + +#include "Tuple.h" + +class schema { + public: + Tuple s; +}; + +#endif diff --git a/cs236/submission/lab02/fact.h b/cs236/submission/lab02/fact.h new file mode 100644 index 0000000..2e5ea35 --- /dev/null +++ b/cs236/submission/lab02/fact.h @@ -0,0 +1,14 @@ +#ifndef __FACT_H__ +#define __FACT_H__ + +#include "predicate.h" +#include +#include + +using namespace std; + +class fact { + public: + vector pred_facts; +}; +#endif diff --git a/cs236/submission/lab02/lab02.cpp b/cs236/submission/lab02/lab02.cpp new file mode 100644 index 0000000..5f28eb8 --- /dev/null +++ b/cs236/submission/lab02/lab02.cpp @@ -0,0 +1,30 @@ +#include +#include "lexi.h" +#include "util.h" +#include "token.h" +#include "parser.h" + +const string usage = "usage: app "; + +int main(int argc, char* argv[]) { + if(argc != 3) { + cerr << usage << endl; + return 1; + } + get_file_name(argv[1]); + vector data = open_file(argv[1]); + lexi l; + string temp = argv[2]; + vector s = l.lexical_analyzer(data, temp); + parser p; + p.tokens = s; + try { + p.check_datalog(); + string out = p.out(); + write_file(out, argv[2]); + } catch(string str) { + stringstream s; + s << "Failure!\n " << str; + write_file(s.str(), argv[2]); + } +} diff --git a/cs236/submission/lab02/lexi.cpp b/cs236/submission/lab02/lexi.cpp new file mode 100644 index 0000000..fc3fd5f --- /dev/null +++ b/cs236/submission/lab02/lexi.cpp @@ -0,0 +1,361 @@ +#include "lexi.h" + +vector lexi::lexical_analyzer(vector data, string file_name) { + string cur_string; + string next_character; + for(unsigned int i = 0; i < data.size(); i++) { + for(unsigned int j = 0; j < data[i].size(); j ++) { + cur_string = data[i].at(j); + if(j < data[i].size() - 1) { + next_character = data[i].at(j + 1); + } + else { + next_character = ""; + } + string state = determiner(cur_string, next_character); + simple_state(data, state, cur_string, next_character, i, j); + simple_state_string(data, state, cur_string, next_character, i, j); + if(state == "start of string") { + string token_symbol = string_finder(data, i, j); + if(token_symbol != "error") { + token_symbol.erase(0,1); + token t("STRING", token_symbol, i + 1); + tokens.push_back(t); + } + else { + write_to_file(file_name, i + 1); + return tokens; + } + } + simple_comment(data, state, cur_string, next_character, i, j); + simple_id(data, state, cur_string, next_character, i, j); + if(state == "error") { + write_to_file(file_name, i + 1); + return tokens; + } + } + } + write_to_file(file_name); + return tokens; +} + +void lexi::write_to_file(string file_name, int line) { + ofstream myfile; + myfile.open(file_name.c_str()); + myfile << "Error on line " << line << endl; + myfile.close(); +} + +void lexi::write_to_file(string file_name) { + ofstream myfile; + myfile.open(file_name.c_str()); + for(unsigned int i = 0; i < tokens.size(); i++) { + if(i < tokens.size()) { + myfile << tokens[i] << endl; + } + else { + myfile << tokens[i]; + } + } + myfile << "Total Tokens = " << tokens.size(); + myfile << endl; + myfile.close(); +} + + +bool lexi::simple_comment(vector & data, string state, string cur_string, string next_character, int i, int j) { + if(state == "comment") { + string token_symbol = comment_finder(data, i, j); + } + return true; +} + +bool lexi::simple_id(vector & data, string state, string cur_string, string next_character, int i, int j) { + if(state == "id") { + string token_symbol = id_finder(data, i, j); + if(token_symbol != "error") { + token t("ID", token_symbol, i + 1); + tokens.push_back(t); + } + } + return true; +} + +bool lexi::simple_state_string(vector & data, string state, string cur_string, string next_character, int i, int j) { + if(state == "simple_string") { + string token_symbol = det_type_simple_string(data, i, j); + if(token_symbol != "wrong") { + string token_type = type_simple_string(token_symbol); + token t(token_type, token_symbol, i + 1); + tokens.push_back(t); + } + else { + string token_symbol = id_finder(data, i, j); + if(token_symbol != "error") { + token t("ID", token_symbol, i + 1); + tokens.push_back(t); + } + } + } + return true; +} + +bool lexi::simple_state(vector & data, string state, string cur_string, string next_character, int i, int j) { + if(state == "simple") { + string token_symbol = type_simple(cur_string, next_character); + if(next_character == "-") { + data[i].replace(j,2, " "); + } + else { + data[i].replace(j,1, " "); + } + string token_id = type_simple_caps(cur_string, next_character); + token t(token_id, token_symbol, i + 1); + tokens.push_back(t); + } + return true; +} + +string lexi::determiner(string cur_string, string next_character) { + if(det_help_simple(cur_string, next_character)) { + return "simple"; + } + else if(det_help_simple_string(cur_string, next_character)) { + return "simple_string"; + } + else if(det_help_id(cur_string)) { + return "id"; + } + else if(cur_string == "'") { + return "start of string"; + } + else if(cur_string == "#") { + return "comment"; + } + else { + string temp = incorrect(cur_string); + return temp; + } + return ""; +} + +bool lexi::det_help_id(string cur_string) { + if(('A' <= cur_string[0] && cur_string[0] <= 'Z') || + ('a' <= cur_string[0] && cur_string[0] <= 'z')) { + return true; + } + return false; +} + +bool lexi::quick_help(string a, string b) { + if(a == "S" && b == "c") { + return true; + } + return false; +} + +bool lexi::det_help_simple_string(string cur_string, string next_character) { + if(quick_help(cur_string, next_character)) { + return true; + } + else if((cur_string == "Q" && next_character == "u")) { + return true; + } + else if((cur_string == "R" && next_character == "u")) { + return true; + } + else if((cur_string == "F" && next_character == "a")) { + return true; + } + return false; +} + +bool lexi::det_help_simple(string cur_string, string next_character) { + if(cur_string == "," || cur_string == "." || cur_string == "?" || + cur_string == "(" || cur_string == ")" || cur_string == ":") { + type_simple(cur_string, next_character); + return true; + } + return false; +} + +string lexi::incorrect(string cur_string) { + if(cur_string == " " || cur_string == "\t") { + return "fine"; + } + else if(!(('A' <= cur_string[0] && cur_string[0] <= 'Z') || + ('a' <= cur_string[0] && cur_string[0] <= 'z'))) { + return "error"; + } + return " "; +} + +string lexi::id_finder(vector & data, int a, int b) { + string cur_string; + string next_character; + for(unsigned int j = b; j < data[a].size(); j++) { + cur_string += data[a].at(j); + if(j < data[a].size() - 1) { + next_character = data[a].at(j + 1); + } + else { + next_character = "!"; + } + if(is_char_valid(next_character[0]) || next_character == "!") { + data[a].replace(data[a].begin() + b, data[a].begin() + j + 1, " "); + return cur_string; + } + } + return " "; +} + +string lexi::comment_finder(vector & data, int i, int b) { + string cur_string; + string next_character; + for(unsigned int j = b; j < data[i].size(); j++) { + cur_string += data[i].at(j); + if(j < data[i].size() - 1) { + next_character = data[i].at(j + 1); + } + else { + next_character = "!"; + } + if((j > data[i].size()) - 1 && next_character != "!") { + data[i].replace(data[i].begin() + b, data[i].end(), " "); + return cur_string; + } + } + return "error"; +} + +string lexi::string_finder(vector & data, int a, int b) { + string cur_string; + string next_character; + b = data[a].find('\''); + for(unsigned int j = b; j < data[a].size(); j++) { + cur_string += data[a].at(j); + if(j < data[a].size() - 1) { + next_character = data[a].at(j + 1); + } + if(next_character == "'") { + data[a].replace(data[a].begin() + b, data[a].begin() + j + 2, " "); + data[a].insert(data[a].begin() + b, ' '); + return cur_string; + } + } + return "error"; +} + +string lexi::type_simple_caps(string symbol, string next_symbol) { + if(symbol == ",") { + return "COMMA"; + } + else if(symbol == ".") { + return "PERIOD"; + } + else if(symbol == "?") { + return "Q_MARK"; + } + else if(symbol == "(") { + return "LEFT_PAREN"; + } + else if(symbol == ")") { + return "RIGHT_PAREN"; + } + else if(symbol == ":") { + if(next_symbol == "-") { + return "COLON_DASH"; + } + return "COLON"; + } + return ""; +} + +string lexi::type_simple(string symbol, string next_symbol) { + if(symbol == ",") { + return ","; + } + else if(symbol == ".") { + return "."; + } + else if(symbol == "?") { + return "?"; + } + else if(symbol == "(") { + return "("; + } + else if(symbol == ")") { + return ")"; + } + else if(symbol == ":") { + if(next_symbol == "-") { + return ":-"; + } + return ":"; + } + return ""; +} + +string lexi::det_type_simple_string(vector & data, int i, int b) { + string cur_string; + string next_character; + string special_case; + if(b > 0) { + special_case = data[i].at(b -1); + } + for(unsigned int j = b; j < data[i].size(); j++) { + cur_string += data[i].at(j); + if(j < data[i].size() - 1) { + next_character = data[i].at(j + 1); + } + else { + next_character = "!"; + } + if((is_simple_string(cur_string)) && (is_char_valid(next_character.at(0))) && (is_char_valid(special_case[0]))) { + data[i].replace(data[i].begin() + b, data[i].begin() + j + 1, " "); + return cur_string; + } + } + return "wrong"; +} + +bool lexi::is_char_valid(char next_character) { + if(!(('A' <= next_character && next_character <= 'Z') || + ('a' <= next_character && next_character <= 'z') || + ('0' <= next_character && next_character <= '9')) || (next_character == '\'')) { + return true; + } + return false; +} + +bool lexi::is_simple_string(string simple_com) { + if(simple_com == "Schemes") { + return true; + } + else if(simple_com == "Facts") { + return true; + } + else if(simple_com == "Rules") { + return true; + } + else if(simple_com == "Queries") { + return true; + } + return false; +} + +string lexi::type_simple_string(string simple_com) { + if(simple_com == "Schemes") { + return "SCHEMES"; + } + else if(simple_com == "Facts") { + return "FACTS"; + } + else if(simple_com == "Rules") { + return "RULES"; + } + else if(simple_com == "Queries") { + return "QUERIES"; + } + return ""; +} diff --git a/cs236/submission/lab02/lexi.h b/cs236/submission/lab02/lexi.h new file mode 100644 index 0000000..979b6e0 --- /dev/null +++ b/cs236/submission/lab02/lexi.h @@ -0,0 +1,44 @@ +#ifndef __LEXI_H__ +#define __LEXI_H__ + +#include +#include +#include +#include "token.h" +#include +#include +#include + + +using namespace std; + +class lexi { + public: + lexi(){} + vector tokens; + string determiner(string, string); + vector lexical_analyzer(vector, string); + string type_simple(string, string); + string type_simple_string(string); + bool is_simple_string(string); + string det_type_simple_string(vector&, int, int); + bool is_char_valid(char); + string string_finder(vector&, int, int); + string comment_finder(vector&, int, int); + string id_finder(vector&, int, int); + string incorrect(string); + bool det_help_simple(string, string); + bool det_help_simple_string(string, string); + bool quick_help(string, string); + bool det_help_id(string); + string type_simple_caps(string, string); + bool simple_state(vector&, string, string, string, int, int); + bool simple_state_string(vector&, string, string, string, int, int); + bool simple_id(vector&, string, string, string, int, int); + bool simple_comment(vector&, string, string, string, int, int); + void write_to_file(string); + void write_to_file(string, int); +}; + +#endif + diff --git a/cs236/submission/lab02/parameter.h b/cs236/submission/lab02/parameter.h new file mode 100644 index 0000000..1e223b0 --- /dev/null +++ b/cs236/submission/lab02/parameter.h @@ -0,0 +1,13 @@ +#ifndef __PARAMETER_H__ +#define __PARAMETER_H__ + +#include + +using namespace std; + +class parameter { + public: + string param; + string type; +}; +#endif diff --git a/cs236/submission/lab02/parser.cpp b/cs236/submission/lab02/parser.cpp new file mode 100644 index 0000000..04bd379 --- /dev/null +++ b/cs236/submission/lab02/parser.cpp @@ -0,0 +1,200 @@ +#include "parser.h" + +string parser::get_token() { + string type = tokens[0].type; + return type; +} + +void parser::check_datalog() { + match("SCHEMES"); + match("COLON"); + if(get_token() == "FACTS") { + error(); + } + check_schemelist(get_token()); + match("FACTS"); + match("COLON"); + check_factlist(get_token()); + match("RULES"); + match("COLON"); + check_rulelist(get_token()); + match("QUERIES"); + match("COLON"); + check_querylist(get_token()); + out(); +} + +string parser::out() { + stringstream s; + s << "Success!" << endl; + s << "Schemes(" << schemelist.size() << "):" << endl; + for(unsigned int i = 0; i < schemelist.size(); i++) { + s << " " << schemelist[i].toString(); + } + s << "Facts(" << factlist.size() << "):" << endl; + for(unsigned int i = 0; i < factlist.size(); i++) { + s << " " << factlist[i].toString(false); + } + s << "Rules(" << rulelist.size() << "):" << endl; + for(unsigned int i = 0; i < rulelist.size(); i++) { + s << " " << rulelist[i].toString(); + } + s << "Queries(" << querylist.size() << "):" << endl; + double a = 0; + for(unsigned int i = 0; i < querylist.size(); i++) { + s << " " << querylist[i].toString(a); + } + s << "Domain(" << domain.size() << "):" << endl; + for (auto it=domain.cbegin(); it != domain.cend(); ++it) { + s << " '" << *it << "'" << endl; + } + return s.str(); +} + +void parser::check_schemelist(string type) { + if(type == "FACTS") { + return; + } + else { + check_scheme(type); + check_schemelist(get_token()); + } +} + +void parser::check_scheme(string type) { + schemelist.push_back(check_predicate(type)); +} + +void parser::check_factlist(string type) { + if(type == "RULES") { + return; + } + else { + check_fact(type); + check_factlist(get_token()); + } +} + +void parser::check_fact(string type) { + factlist.push_back(check_predicate(type)); + match("PERIOD"); +} + +void parser::check_rulelist(string type) { + if(type == "QUERIES") { + return; + } + else { + check_rule(type); + check_rulelist(get_token()); + } +} + +void parser::check_rule(string type) { + rule r; + r.head = check_predicate(type); + match("COLON_DASH"); + check_predicate_list(get_token(), r); + match("PERIOD"); + rulelist.push_back(r); +} + +void parser::check_querylist(string type) { + check_query(type); + if(tokens.empty()) { + return; + } + else { + check_querylist(get_token()); + } +} + +void parser::check_query(string type) { + querylist.push_back(check_predicate(type)); + match("Q_MARK"); +} + +void parser::check_predicate_list(string type, rule& r) { + r.pred_rule.push_back(check_predicate(type)); + if(get_token() == "COMMA") { + match("COMMA"); + check_predicate_list(get_token(), r); + } + else { + return; + } +} + +predicate parser::check_predicate(string type) { + predicate pred; + pred.id = tokens[0].character; + match("ID"); + match("LEFT_PAREN"); + if(get_token() == "RIGHT_PAREN") { + error(); + } + check_parameterlist(get_token(), pred); + match("RIGHT_PAREN"); + return pred; +} + +void parser::check_parameterlist(string type, predicate& pred) { + if(type == "RIGHT_PAREN") { + return; + } + else { + check_parameter(type, pred); + if(get_token() == "COMMA") { + match("COMMA"); + if(get_token() == "RIGHT_PAREN") { + error(); + } + check_parameterlist(get_token(), pred); + } + else { + return; + } + } +} + +void parser::check_parameter(string type, predicate& pred) { + parameter para; + if(type == "STRING") { + domain.insert(tokens[0].character); + para.param = tokens[0].character; + para.type = tokens[0].type; + pred.pred_list.push_back(para); + match("STRING"); + return; + } + else if(type == "ID") { + para.param = tokens[0].character; + para.type = tokens[0].type; + pred.pred_list.push_back(para); + match("ID"); + return; + } + else { + error(); + } +} + +void parser::match(string type) { + if(get_token() == type) { + if(tokens.empty()) { + error(); + } + else { + tokens.erase(tokens.begin()); + } + } + else { + error(); + } +} + +void parser::error() { + stringstream oss; + oss << tokens[0] << endl; + throw oss.str(); +} diff --git a/cs236/submission/lab02/parser.h b/cs236/submission/lab02/parser.h new file mode 100644 index 0000000..fa1a820 --- /dev/null +++ b/cs236/submission/lab02/parser.h @@ -0,0 +1,49 @@ +#ifndef __PARSER_H__ +#define __PARSER_H__ + +#include +#include +#include +#include + +#include "token.h" +#include "scheme.h" +#include "fact.h" +#include "rule.h" +#include "query.h" +#include "predicate.h" +#include "parameter.h" + +using namespace std; + +class parser { + public: + parser() {} + vector tokens; + set domain; + vector schemelist; + vector factlist; + vector querylist; + vector predlist; + vector rulelist; + + string get_token(); + void check_datalog(); + void check_schemelist(string); + void check_scheme(string); + void check_factlist(string); + void check_fact(string); + void check_rulelist(string); + void check_rule(string); + void check_querylist(string); + void check_query(string); + void check_predicate_list(string, rule&); + predicate check_predicate(string); + void check_parameterlist(string type, predicate&); + void check_parameter(string, predicate&); + void match(string); + void error(); + string out(); + +}; +#endif diff --git a/cs236/submission/lab02/predicate.h b/cs236/submission/lab02/predicate.h new file mode 100644 index 0000000..cde0d7d --- /dev/null +++ b/cs236/submission/lab02/predicate.h @@ -0,0 +1,83 @@ +#ifndef __PREDICATE_H__ +#define __PREDICATE_H__ + +#include "parameter.h" +#include +#include + +using namespace std; + +class predicate { + public: + string id; + vector pred_list; + string toString() { + //schemes + stringstream s; + s << id << "("; + for(unsigned int i = 0; i < pred_list.size(); i++) { + s << pred_list[i].param; + if(i < pred_list.size()-1) { + s << ","; + } + } + s << ")\n"; + return s.str(); + } + string toString(bool a) { + //facts + stringstream s; + s << id << "("; + for(unsigned int i = 0; i < pred_list.size(); i++) { + s << "'" << pred_list[i].param << "'"; + if(i < pred_list.size()-1) { + s << ","; + } + } + s << ").\n"; + return s.str(); + } + string toString(double a) { + //query + stringstream s; + s << id << "("; + for(unsigned int i = 0; i < pred_list.size(); i++) { + if(pred_list[i].type == "STRING") { + s << "'" << pred_list[i].param << "'"; + if(i < pred_list.size()-1) { + s << ","; + } + } + if(pred_list[i].type == "ID") { + s << pred_list[i].param; + if(i < pred_list.size()-1) { + s << ","; + } + } + } + s << ")?\n"; + return s.str(); + } + string toString(int a) { + //rules + stringstream s; + s << id << "("; + for(unsigned int i = 0; i < pred_list.size(); i++) { + if(pred_list[i].type == "STRING") { + s << "'" << pred_list[i].param << "'"; + if(i < pred_list.size()-1) { + s << ","; + } + } + if(pred_list[i].type == "ID") { + s << pred_list[i].param; + if(i < pred_list.size()-1) { + s << ","; + } + } + } + s << ")"; + return s.str(); + } +}; +#endif diff --git a/cs236/submission/lab02/query.h b/cs236/submission/lab02/query.h new file mode 100644 index 0000000..159e407 --- /dev/null +++ b/cs236/submission/lab02/query.h @@ -0,0 +1,14 @@ +#ifndef __QUERY_H__ +#define __QUERY_H__ + +#include "predicate.h" +#include +#include + +using namespace std; + +class query { + public: + vector pred_queries; +}; +#endif diff --git a/cs236/submission/lab02/rule.h b/cs236/submission/lab02/rule.h new file mode 100644 index 0000000..f4945a1 --- /dev/null +++ b/cs236/submission/lab02/rule.h @@ -0,0 +1,27 @@ +#ifndef __RULE_H__ +#define __RULE_H__ + +#include "predicate.h" +#include +#include + +using namespace std; + +class rule { + public: + predicate head; + vector pred_rule; + string toString() { + stringstream s; + s << head.toString(1) << " :- "; + for(unsigned int i = 0; i < pred_rule.size(); i++) { + s << pred_rule[i].toString(1); + if(i < pred_rule.size()-1) { + s << ","; + } + } + s << ".\n"; + return s.str(); + } +}; +#endif diff --git a/cs236/submission/lab02/scheme.h b/cs236/submission/lab02/scheme.h new file mode 100644 index 0000000..236a5c7 --- /dev/null +++ b/cs236/submission/lab02/scheme.h @@ -0,0 +1,14 @@ +#ifndef __SCHEME_H__ +#define __SCHEME_H__ + +#include "predicate.h" +#include +#include + +using namespace std; + +class scheme { + public: + vector pred_schemes; +}; +#endif diff --git a/cs236/submission/lab02/token.cpp b/cs236/submission/lab02/token.cpp new file mode 100644 index 0000000..9a5cdb8 --- /dev/null +++ b/cs236/submission/lab02/token.cpp @@ -0,0 +1,11 @@ +#include "token.h" + +token::token(string type, string character, int line_num) : + type(type), character(character), line_num(line_num) {} + +ostream & operator<<(ostream & os, token tk) { + os << "(" << tk.type + << ",\"" << tk.character + << "\"," << tk.line_num << ")"; + return os; +} diff --git a/cs236/submission/lab02/token.h b/cs236/submission/lab02/token.h new file mode 100644 index 0000000..d832eb3 --- /dev/null +++ b/cs236/submission/lab02/token.h @@ -0,0 +1,16 @@ +#ifndef __TOKEN_H__ +#define __TOKEN_H__ + +#include + +using namespace std; + +class token { + public: + token(string, string, int); + string type; + string character; + int line_num; + friend ostream & operator<<(ostream & os, token tk); +}; +#endif diff --git a/cs236/submission/lab02/util.h b/cs236/submission/lab02/util.h new file mode 100644 index 0000000..e17ff8b --- /dev/null +++ b/cs236/submission/lab02/util.h @@ -0,0 +1,56 @@ +#ifndef __UTIL_H__ +#define __UTIL_H__ +#include +#include +#include + +vector open_file(string file_name) { + ifstream myfile; + vector data; + myfile.open(file_name.c_str()); + string temp; + while(!myfile.eof()) { + getline(myfile, temp); + data.push_back(temp); + } + myfile.close(); + return data; +} + +bool get_file_name(string input) { + bool file_correct = false; + string input_file_name; + while(!file_correct) { + ifstream inputs(input.c_str()); + if(inputs.good()) { + input_file_name = input; + file_correct = true; + open_file(input_file_name); + return true; + } + else { + cerr << "incorrect file name" << endl; + return false; + } + } + return false; +} + +void write_file(string output, string file_name) { + ofstream myfile; + myfile.open(file_name.c_str()); + myfile << output << "\n"; +} + +void write_file(vector output, string file_name) { + ofstream myfile; + myfile.open(file_name.c_str()); + for(unsigned int i = 0; i < output.size(); i++) { + if(i != output.size() -1) { + myfile << output[i] << "\n"; + } + myfile << output[i]; + } +} + +#endif diff --git a/cs236/tests/lab01/input.txt b/cs236/tests/lab01/input.txt new file mode 100644 index 0000000..78d3e12 --- /dev/null +++ b/cs236/tests/lab01/input.txt @@ -0,0 +1,21 @@ + + +Schemes: + student(Name, ID, Address, Major) + +Facts: + Student('North', '51', '10 Main', 'CS'). +# |student('Reagan', '52', '11 Maple', 'CS').| + student('Reagan', '52', '11 Maple', 'CS'). + student('Clinton', '53', '12 Ashton', 'MATH'). + +Rules: + major(Name,Major):-student(Name,ID,Address,Major). + +Queries: + major('North',Major)? + major(Name,'MATH')? +#| major(Name,'MATH')? +# major(Name,'MATH')? |# +# |major(Name,'MATH')?| + diff --git a/cs236/tests/lab01/test1.txt b/cs236/tests/lab01/test1.txt new file mode 100644 index 0000000..78d3e12 --- /dev/null +++ b/cs236/tests/lab01/test1.txt @@ -0,0 +1,21 @@ + + +Schemes: + student(Name, ID, Address, Major) + +Facts: + Student('North', '51', '10 Main', 'CS'). +# |student('Reagan', '52', '11 Maple', 'CS').| + student('Reagan', '52', '11 Maple', 'CS'). + student('Clinton', '53', '12 Ashton', 'MATH'). + +Rules: + major(Name,Major):-student(Name,ID,Address,Major). + +Queries: + major('North',Major)? + major(Name,'MATH')? +#| major(Name,'MATH')? +# major(Name,'MATH')? |# +# |major(Name,'MATH')?| + diff --git a/cs236/tests/lab02/in/in21.txt b/cs236/tests/lab02/in/in21.txt new file mode 100644 index 0000000..2c7a841 --- /dev/null +++ b/cs236/tests/lab02/in/in21.txt @@ -0,0 +1,17 @@ + + +Schemes: + snap(S,N,A,P) + HasSameAddress(X,Y) + +Facts: + snap('12345','C. Brown','12 Apple','555-1234'). + snap('33333','Snoopy','12 Apple','555-1234'). + +Rules: + HasSameAddress(X,'Y') :- snap(A,X,B,C),snap(D,Y,'B',E). + +Queries: + HasSameAddress('Snoopy',Who)? + + diff --git a/cs236/tests/lab02/in/in22.txt b/cs236/tests/lab02/in/in22.txt new file mode 100644 index 0000000..4c0ea91 --- /dev/null +++ b/cs236/tests/lab02/in/in22.txt @@ -0,0 +1,16 @@ +Schemes: + snap(S,N,A,P) + NameHasID(N,S) + +Facts: + snap('12345','C. Brown','12 Apple','555-1234'). + snap('67890','L. Van Pelt','34 Pear','555-5678'). + +Rules: + NameHasID(N,S) :- snap(S,N,A,P)? + +Queries: + NameHasID('Snoopy',Id)? + + + diff --git a/cs236/tests/lab02/in/in23.txt b/cs236/tests/lab02/in/in23.txt new file mode 100644 index 0000000..d0af012 --- /dev/null +++ b/cs236/tests/lab02/in/in23.txt @@ -0,0 +1,19 @@ + + +Schemes: + bob(A,B) + +Facts: + bob('4','2'). + +Rules: + bob(A,B) :- bob(B,A). + +Queries: + bob(X,Y)? + + +# extra tokens at end of file + +???? + diff --git a/cs236/tests/lab02/in/in24.txt b/cs236/tests/lab02/in/in24.txt new file mode 100644 index 0000000..65c3f20 --- /dev/null +++ b/cs236/tests/lab02/in/in24.txt @@ -0,0 +1,19 @@ + + +Schemes: + bob(A,B) + +Facts: + bob('4','2'). + +Rules: + bob(A,B) :- bob(B,A). + + +# wrong punctuation for query + +Queries: + bob(X,Y). + + + diff --git a/cs236/tests/lab02/in/in25.txt b/cs236/tests/lab02/in/in25.txt new file mode 100644 index 0000000..1f10822 --- /dev/null +++ b/cs236/tests/lab02/in/in25.txt @@ -0,0 +1,18 @@ + + +Schemes: + bob(A,B) + +# missing punctuation on fact + +Facts: + bob('4','2') + +Rules: + bob(A,B) :- bob(B,A). + +Queries: + bob(X,Y)? + + + diff --git a/cs236/tests/lab02/in/in26.txt b/cs236/tests/lab02/in/in26.txt new file mode 100644 index 0000000..8aafe58 --- /dev/null +++ b/cs236/tests/lab02/in/in26.txt @@ -0,0 +1,16 @@ + + +Schemes: + bob(A,B) + +# fact/rule lists should be optional + +Facts: + +Rules: + +Queries: + bob(X,Y)? + + + diff --git a/cs236/tests/lab02/in/in27.txt b/cs236/tests/lab02/in/in27.txt new file mode 100644 index 0000000..8f63895 --- /dev/null +++ b/cs236/tests/lab02/in/in27.txt @@ -0,0 +1,14 @@ + + +# scheme/query lists should be required + +Schemes: + +Facts: + +Rules: + +Queries: + + + diff --git a/cs236/tests/lab02/in/in28.txt b/cs236/tests/lab02/in/in28.txt new file mode 100644 index 0000000..0128278 --- /dev/null +++ b/cs236/tests/lab02/in/in28.txt @@ -0,0 +1,21 @@ + + +Schemes: + bob(A,B) + +Facts: + bob('4','2'). + +# rules section should exist +# queries should come after rules + +Queries: + bob(X,Y)? + +# facts can't be repeated + +Facts: + bob('4','4'). + + + diff --git a/cs236/tests/lab02/out/out21.txt b/cs236/tests/lab02/out/out21.txt new file mode 100644 index 0000000..260a46b --- /dev/null +++ b/cs236/tests/lab02/out/out21.txt @@ -0,0 +1,19 @@ +Success! +Schemes(2): + snap(S,N,A,P) + HasSameAddress(X,Y) +Facts(2): + snap('12345','C. Brown','12 Apple','555-1234'). + snap('33333','Snoopy','12 Apple','555-1234'). +Rules(1): + HasSameAddress(X,Y) :- snap(A,X,B,C),snap(D,Y,B,E). +Queries(1): + HasSameAddress('Snoopy',Who)? +Domain(6): + '12 Apple' + '12345' + '33333' + '555-1234' + 'C. Brown' + 'Snoopy' + diff --git a/cs236/tests/lab02/out/out22.txt b/cs236/tests/lab02/out/out22.txt new file mode 100644 index 0000000..42e9ee4 --- /dev/null +++ b/cs236/tests/lab02/out/out22.txt @@ -0,0 +1,3 @@ +Failure! + (Q_MARK,"?",10) + diff --git a/cs236/tests/lab02/out/out23.txt b/cs236/tests/lab02/out/out23.txt new file mode 100644 index 0000000..e495019 --- /dev/null +++ b/cs236/tests/lab02/out/out23.txt @@ -0,0 +1,3 @@ +Failure! + (Q_MARK,"?",18) + diff --git a/cs236/tests/lab02/out/out24.txt b/cs236/tests/lab02/out/out24.txt new file mode 100644 index 0000000..7dad9a7 --- /dev/null +++ b/cs236/tests/lab02/out/out24.txt @@ -0,0 +1,3 @@ +Failure! + (PERIOD,".",16) + diff --git a/cs236/tests/lab02/out/out25.txt b/cs236/tests/lab02/out/out25.txt new file mode 100644 index 0000000..d82a4ba --- /dev/null +++ b/cs236/tests/lab02/out/out25.txt @@ -0,0 +1,3 @@ +Failure! + (RULES,"Rules",11) + diff --git a/cs236/tests/lab02/out/out26.txt b/cs236/tests/lab02/out/out26.txt new file mode 100644 index 0000000..d4f5647 --- /dev/null +++ b/cs236/tests/lab02/out/out26.txt @@ -0,0 +1,9 @@ +Success! +Schemes(1): + bob(A,B) +Facts(0): +Rules(0): +Queries(1): + bob(X,Y)? +Domain(0): + diff --git a/cs236/tests/lab02/out/out27.txt b/cs236/tests/lab02/out/out27.txt new file mode 100644 index 0000000..7ac5084 --- /dev/null +++ b/cs236/tests/lab02/out/out27.txt @@ -0,0 +1,3 @@ +Failure! + (FACTS,"Facts",7) + diff --git a/cs236/tests/lab02/out/out28.txt b/cs236/tests/lab02/out/out28.txt new file mode 100644 index 0000000..6e74b2f --- /dev/null +++ b/cs236/tests/lab02/out/out28.txt @@ -0,0 +1,3 @@ +Failure! + (QUERIES,"Queries",12) + diff --git a/cs236/tests/lab03/in/in40.txt b/cs236/tests/lab03/in/in40.txt new file mode 100644 index 0000000..52147d3 --- /dev/null +++ b/cs236/tests/lab03/in/in40.txt @@ -0,0 +1,15 @@ +Schemes: + SK(A,B) +Facts: + SK('a','c'). + SK('b','c'). + SK('b','b'). + SK('b','c'). +Rules: + DoNothing(Z) :- Stuff(Z). +Queries: + SK(A,'c')? + SK('b','c')? + SK(X,X)? + SK(A,B)? + diff --git a/cs236/tests/lab03/out/out40.txt b/cs236/tests/lab03/out/out40.txt new file mode 100644 index 0000000..c8cf4f7 --- /dev/null +++ b/cs236/tests/lab03/out/out40.txt @@ -0,0 +1,11 @@ +SK(A,'c')? Yes(2) + A='a' + A='b' +SK('b','c')? Yes(1) +SK(X,X)? Yes(1) + X='b' +SK(A,B)? Yes(3) + A='a', B='c' + A='b', B='b' + A='b', B='c' +