From 83ad3c045dd92f4322f3925d72ff9872aa617351 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Fri, 30 Oct 2020 16:06:24 -0700 Subject: [PATCH 01/61] Add in linalg parser that still uses IndexExpr notation --- .../index_notation/index_notation_nodes.h | 7 + include/taco/parser/lexer.h | 1 + include/taco/parser/linalg_parser.h | 103 +++++ include/taco/parser/parser.h | 18 +- src/CMakeLists.txt | 2 +- src/lower/expr_tools.cpp | 4 + src/parser/lexer.cpp | 10 + src/parser/linalg_parser.cpp | 411 ++++++++++++++++++ tools/taco.cpp | 37 +- 9 files changed, 575 insertions(+), 18 deletions(-) create mode 100644 include/taco/parser/linalg_parser.h create mode 100644 src/parser/linalg_parser.cpp diff --git a/include/taco/index_notation/index_notation_nodes.h b/include/taco/index_notation/index_notation_nodes.h index 95439cd6b..e7374a4b2 100644 --- a/include/taco/index_notation/index_notation_nodes.h +++ b/include/taco/index_notation/index_notation_nodes.h @@ -68,6 +68,13 @@ struct NegNode : public UnaryExprNode { } }; +struct TransposeNode : public UnaryExprNode { + TransposeNode(IndexExpr operand) : UnaryExprNode(operand) {} + + void accept (IndexExprVisitorStrict* v) const { + v->visit(this); + } +}; struct BinaryExprNode : public IndexExprNode { virtual std::string getOperatorString() const = 0; diff --git a/include/taco/parser/lexer.h b/include/taco/parser/lexer.h index 55dc74410..30b2d05f8 100644 --- a/include/taco/parser/lexer.h +++ b/include/taco/parser/lexer.h @@ -23,6 +23,7 @@ enum class Token { mul, div, eq, + caretT, eot, // End of tokens error }; diff --git a/include/taco/parser/linalg_parser.h b/include/taco/parser/linalg_parser.h new file mode 100644 index 000000000..8875ba723 --- /dev/null +++ b/include/taco/parser/linalg_parser.h @@ -0,0 +1,103 @@ +#ifndef TACO_LINALG_PARSER_H +#define TACO_LINALG_PARSER_H + +#include +#include +#include +#include +#include + +#include "taco/tensor.h" +#include "taco/util/uncopyable.h" +#include "taco/type.h" +#include "taco/parser/parser.h" + +namespace taco { +class TensorBase; +class Format; +class IndexVar; +class IndexExpr; +class Access; + +namespace parser { +enum class Token; + +class LinalgParser : public AbstractParser { + +public: + + /// Create a parser object from linalg notation + LinalgParser(std::string expression, const std::map& formats, + const std::map& dataTypes, + const std::map>& tensorDimensions, + const std::map& tensors, + int defaultDimension=5); + + /// Parses the linalg expression and sets the result tensor to the result of that expression + /// @throws ParserError if there is an error with parsing the linalg string + void parse() override; + + /// Gets the result tensor after parsing is complete. + const TensorBase& getResultTensor() const override; + + /// Gets all tensors + const std::map& getTensors() const override; + + /// Retrieve the tensor with the given name + const TensorBase& getTensor(std::string name) const override; + + /// Returns true if the tensor appeared in the expression + bool hasTensor(std::string name) const; + + /// Returns true if the index variable appeared in the expression + bool hasIndexVar(std::string name) const; + + /// Retrieve the index variable with the given name + IndexVar getIndexVar(std::string name) const; + +private: + Datatype outType; + Format format; + + struct Content; + std::shared_ptr content; + std::vector names; + + /// assign ::= var '=' expr + TensorBase parseAssign(); + + /// expr ::= term {('+' | '-') term} + IndexExpr parseExpr(); + + /// term ::= factor {('*' | '/') factor} + IndexExpr parseTerm(); + + /// factor ::= final + /// | '(' expr ')' + /// | '-' factor + /// | factor '^T' + IndexExpr parseFactor(); + + /// final ::= var + /// | scalar + IndexExpr parseFinal(); + + /// var ::= identifier + Access parseVar(); + + std::string currentTokenString(); + + void consume(Token expected); + + /// Retrieve the next token from the lexer + void nextToken(); + + /// FIXME: REMOVE LATER, temporary workaround to use Tensor API and TensorBase + std::vector getUniqueIndices(size_t order); + + int idxcount; +}; + + +}} +#endif //TACO_LINALG_PARSER_H diff --git a/include/taco/parser/parser.h b/include/taco/parser/parser.h index 9a3c4cfff..a2b0d94d9 100644 --- a/include/taco/parser/parser.h +++ b/include/taco/parser/parser.h @@ -20,11 +20,19 @@ class Access; namespace parser { enum class Token; +class AbstractParser : public util::Uncopyable { +public: + virtual void parse() = 0; + virtual const TensorBase& getResultTensor() const = 0; + virtual const std::map& getTensors() const = 0; + virtual const TensorBase& getTensor(std::string name) const = 0; +}; + /// A simple index expression parser. The parser can parse an index expression /// string, where tensor access expressions are in the form (e.g.) `A(i,j)`, /// A_{i,j} or A_i. A variable is taken to be free if it is used to index the /// lhs, and taken to be a summation variable otherwise. -class Parser : public util::Uncopyable { +class Parser : public AbstractParser { public: Parser(std::string expression, const std::map& formats, const std::map& dataTypes, @@ -34,10 +42,10 @@ class Parser : public util::Uncopyable { /// Parse the expression. /// @throws ParseError if there's a parser error - void parse(); + void parse() override; /// Returns the result (lhs) tensor of the index expression. - const TensorBase& getResultTensor() const; + const TensorBase& getResultTensor() const override; /// Returns true if the index variable appeared in the expression bool hasIndexVar(std::string name) const; @@ -49,10 +57,10 @@ class Parser : public util::Uncopyable { bool hasTensor(std::string name) const; /// Retrieve the tensor with the given name - const TensorBase& getTensor(std::string name) const; + const TensorBase& getTensor(std::string name) const override; /// Retrieve a map from tensor names to tensors. - const std::map& getTensors() const; + const std::map& getTensors() const override; /// Retrieve a list of names in the order they occurred in the expression const std::vector getNames() const; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f68d4e4c7..f63016d84 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -6,7 +6,7 @@ else() message("-- Static library") endif() -set(TACO_SRC_DIRS . parser index_notation lower ir codegen storage error util) +set(TACO_SRC_DIRS . parser index_notation lower ir codegen storage error util linalg_notation) foreach(dir ${TACO_SRC_DIRS}) file(GLOB TACO_HEADERS ${TACO_HEADERS} ${dir}/*.h) diff --git a/src/lower/expr_tools.cpp b/src/lower/expr_tools.cpp index ded5c53dd..e6a5a9b8c 100644 --- a/src/lower/expr_tools.cpp +++ b/src/lower/expr_tools.cpp @@ -215,6 +215,10 @@ class SubExprVisitor : public IndexExprVisitorStrict { subExpr = unarySubExpr(op); } + void visit(const TransposeNode* op) { + subExpr = unarySubExpr(op); + } + void visit(const SqrtNode* op) { subExpr = unarySubExpr(op); } diff --git a/src/parser/lexer.cpp b/src/parser/lexer.cpp index a490840a8..e462c4023 100644 --- a/src/parser/lexer.cpp +++ b/src/parser/lexer.cpp @@ -90,6 +90,13 @@ Token Lexer::getToken() { case EOF: token = Token::eot; break; + case '^': + lastChar = getNextChar(); + if (lastChar == 'T') + token = Token::caretT; + else + token = Token::error; + break; default: token = Token::error; break; @@ -161,6 +168,9 @@ std::string Lexer::tokenString(const Token& token) { case Token::error: str = "error"; break; + case Token::caretT: + str = "^T"; + break; case Token::eot: default: taco_ierror; diff --git a/src/parser/linalg_parser.cpp b/src/parser/linalg_parser.cpp new file mode 100644 index 000000000..e73b9d28a --- /dev/null +++ b/src/parser/linalg_parser.cpp @@ -0,0 +1,411 @@ +#include "taco/parser/linalg_parser.h" +#include "taco/parser/parser.h" + +#include + +#include "taco/parser/lexer.h" +#include "taco/tensor.h" +#include "taco/format.h" + +#include "taco/index_notation/index_notation.h" +#include "taco/index_notation/index_notation_nodes.h" +#include "taco/index_notation/index_notation_rewriter.h" + +#include "taco/util/collections.h" + +using namespace std; + +namespace taco { +namespace parser { + +struct LinalgParser::Content { + /// Tensor formats + map formats; + map dataTypes; + + /// Tensor dimensions + map> tensorDimensions; + map indexVarDimensions; + + int defaultDimension; + + /// Track which modes have default values, so that we can change them + /// to values inferred from other tensors (read from files). + set> modesWithDefaults; + + Lexer lexer; + Token currentToken; + bool parsingLhs = false; + + map indexVars; + + TensorBase resultTensor; + map tensors; +}; + + LinalgParser::LinalgParser(string expression, const map& formats, + const map& dataTypes, + const map>& tensorDimensions, + const std::map& tensors, + int defaultDimension) + : content(new LinalgParser::Content) { + content->lexer = Lexer(expression); + content->formats = formats; + content->tensorDimensions = tensorDimensions; + content->defaultDimension = defaultDimension; + content->tensors = tensors; + content->dataTypes = dataTypes; + + idxcount = 0; + + nextToken(); + } + +void LinalgParser::parse() { + content->resultTensor = parseAssign(); +} + +const TensorBase& LinalgParser::getResultTensor() const { + return content->resultTensor; +} + +TensorBase LinalgParser::parseAssign() { + content->parsingLhs = true; + cout << "parsing lhs" << endl; + Access lhs = parseVar(); + cout << lhs << endl; + content->parsingLhs = false; + + cout << "parsing rhs" << endl; + consume(Token::eq); + IndexExpr rhs = parseExpr(); + + // Collect all index var dimensions + struct Visitor : IndexNotationVisitor { + using IndexNotationVisitor::visit; + set> modesWithDefaults; + map *indexVarDimensions; + + void visit(const AccessNode *op) { + for (size_t i = 0; i < op->indexVars.size(); i++) { + IndexVar indexVar = op->indexVars[i]; + if (!util::contains(modesWithDefaults, {op->tensorVar, i})) { + auto dimension = op->tensorVar.getType().getShape().getDimension(i); + if (util::contains(*indexVarDimensions, indexVar)) { + taco_uassert(indexVarDimensions->at(indexVar) == dimension) << + "Incompatible dimensions"; + } else { + indexVarDimensions->insert({indexVar, dimension.getSize()}); + } + } + } + } + }; + Visitor visitor; + visitor.indexVarDimensions = &content->indexVarDimensions; + visitor.modesWithDefaults = content->modesWithDefaults; + rhs.accept(&visitor); + + // Rewrite expression to new index dimensions + struct Rewriter : IndexNotationRewriter { + using IndexNotationRewriter::visit; + map *indexVarDimensions; + map tensors; + + void visit(const AccessNode *op) { + bool dimensionChanged = false; + Shape shape = op->tensorVar.getType().getShape(); + vector dimensions; + for (auto &dimension : shape) { + taco_iassert(dimension.isFixed()); + dimensions.push_back((int) dimension.getSize()); + } + + taco_uassert(op->indexVars.size() == dimensions.size()) << + "The order of " << op->tensorVar.getName() + << " is inconsistent " << + "between tensor accesses or options. Is it order " << + dimensions.size() << " or " << op->indexVars.size() + << "?"; + + for (size_t i = 0; i < dimensions.size(); i++) { + IndexVar indexVar = op->indexVars[i]; + if (util::contains(*indexVarDimensions, indexVar)) { + int dimension = indexVarDimensions->at(indexVar); + if (dimension != dimensions[i]) { + dimensions[i] = dimension; + dimensionChanged = true; + } + } + } + if (dimensionChanged) { + TensorBase tensor; + if (util::contains(tensors, op->tensorVar.getName())) { + tensor = tensors.at(op->tensorVar.getName()); + } else { + tensor = TensorBase(op->tensorVar.getName(), + op->tensorVar.getType().getDataType(), dimensions, + op->tensorVar.getFormat()); + tensors.insert({tensor.getName(), tensor}); + } + expr = tensor(op->indexVars); + } else { + expr = op; + } + } + }; + Rewriter rewriter; + rewriter.indexVarDimensions = visitor.indexVarDimensions; + rhs = rewriter.rewrite(rhs); + + IndexExpr rewrittenLhs = rewriter.rewrite(lhs); + + for (auto &tensor : rewriter.tensors) { + content->tensors.at(tensor.first) = tensor.second; + } + content->resultTensor = content->tensors.at(lhs.getTensorVar().getName()); + + content->resultTensor(lhs.getIndexVars()) = rhs; + return content->resultTensor; +} + +IndexExpr LinalgParser::parseExpr() { + IndexExpr expr = parseTerm(); + while (content->currentToken == Token::add || + content->currentToken == Token::sub) { + switch (content->currentToken) { + case Token::add: + consume(Token::add); + expr = expr + parseTerm(); + break; + case Token::sub: + consume(Token::sub); + expr = expr - parseTerm(); + break; + default: + taco_unreachable; + } + } + return expr; +} + +IndexExpr LinalgParser::parseTerm() { + IndexExpr term = parseFactor(); + while (content->currentToken == Token::mul || + content->currentToken == Token::div) { + switch (content->currentToken) { + case Token::mul: { + consume(Token::mul); + term = term * parseFactor(); + break; + } + case Token::div: { + consume(Token::div); + term = term / parseFactor(); + break; + } + default: + taco_unreachable; + } + } + return term; +} + +IndexExpr LinalgParser::parseFactor() { + switch (content->currentToken) { + case Token::lparen: { + consume(Token::lparen); + IndexExpr factor = parseExpr(); + consume(Token::rparen); + return factor; + } + case Token::sub: + consume(Token::sub); + return new NegNode(parseFactor()); + default: + break; + } + + IndexExpr final = parseFinal(); + + if (content->currentToken == Token::caretT) { + consume(Token::caretT); + return new TransposeNode(final); + } + return final; +} + +IndexExpr LinalgParser::parseFinal() { + std::istringstream value (content->lexer.getIdentifier()); + switch (content->currentToken) { + case Token::complex_scalar: + { + consume(Token::complex_scalar); + std::complex complex_value; + value >> complex_value; + return IndexExpr(complex_value); + } + case Token::int_scalar: + { + consume(Token::int_scalar); + int64_t int_value; + value >> int_value; + return IndexExpr(int_value); + } + case Token::uint_scalar: + { + consume(Token::uint_scalar); + uint64_t uint_value; + value >> uint_value; + return IndexExpr(uint_value); + } + case Token::float_scalar: + { + consume(Token::float_scalar); + double float_value; + value >> float_value; + return IndexExpr(float_value); + } + default: + return parseVar(); + } +} + +Access LinalgParser::parseVar() { + if(content->currentToken != Token::identifier) { + throw ParseError("Expected linalg name"); + } + string tensorName = content->lexer.getIdentifier(); + cout << tensorName << endl; + consume(Token::identifier); + names.push_back(tensorName); + + size_t order = 0; + // LinalgParser: By default assume capital variables are Matrices and lower case variables are vectors + if (isupper(tensorName.at(0))) { + order = 2; + } else { + order = 1; + } + cout << order << endl; + + Format format; + if (util::contains(content->formats, tensorName)) { + format = content->formats.at(tensorName); + } + else { + format = Format(std::vector(order, Dense)); + } + cout << format << endl; + + TensorBase tensor; + if (util::contains(content->tensors, tensorName)) { + tensor = content->tensors.at(tensorName); + } + else { + vector tensorDimensions(order); + vector modesWithDefaults(order, false); + for (size_t i = 0; i < tensorDimensions.size(); i++) { + cout << i << endl; + if (util::contains(content->tensorDimensions, tensorName)) { + tensorDimensions[i] = content->tensorDimensions.at(tensorName)[i]; + } + else { + cout << "default" << endl; + tensorDimensions[i] = content->defaultDimension; + modesWithDefaults[i] = true; + } + } + Datatype dataType = Float(); + if (util::contains(content->dataTypes, tensorName)) { + dataType = content->dataTypes.at(tensorName); + } + tensor = TensorBase(tensorName,dataType,tensorDimensions,format); + cout << tensor << endl; + for (size_t i = 0; i < tensorDimensions.size(); i++) { + if (modesWithDefaults[i]) { + content->modesWithDefaults.insert({tensor.getTensorVar(), i}); + } + } + + content->tensors.insert({tensorName,tensor}); + } + + cout << order << endl; + vector idxlist = getUniqueIndices(order); + for (auto i : idxlist) + cout << i << ", "; + + return tensor(idxlist); +} + +vector LinalgParser::getUniqueIndices(size_t order) { + vector result; + for (int i = idxcount; i < (idxcount + (int)order); i++) { + cout << i << ": "; + string name = "i" + to_string(i); + cout << name << " "; + IndexVar indexVar = getIndexVar(name); + cout << indexVar << endl; + result.push_back(indexVar); + } + idxcount += order; + return result; +} + +IndexVar LinalgParser::getIndexVar(string name) const { + taco_iassert(name != ""); + if (!hasIndexVar(name)) { + IndexVar var(name); + content->indexVars.insert({name, var}); + + // tensorDimensions can also store index var dimensions + if (util::contains(content->tensorDimensions, name)) { + content->indexVarDimensions.insert({var, content->tensorDimensions.at(name)[0]}); + } + } + return content->indexVars.at(name); +} + +bool LinalgParser::hasIndexVar(std::string name) const { + return util::contains(content->indexVars, name); +} + +void LinalgParser::consume(Token expected) { + if(content->currentToken != expected) { + string error = "Expected \'" + content->lexer.tokenString(expected) + + "\' but got \'" + currentTokenString() + "\'"; + throw ParseError(error); + } + nextToken(); +} + +const std::map& LinalgParser::getTensors() const { + return content->tensors; +} + +// FIXME: Remove this redundancy and try to add it to abstract parser class... +void LinalgParser::nextToken() { + content->currentToken = content->lexer.getToken(); +} + +string LinalgParser::currentTokenString() { + return (content->currentToken == Token::identifier) + ? content->lexer.getIdentifier() + : content->lexer.tokenString(content->currentToken); +} + +const TensorBase& LinalgParser::getTensor(string name) const { + taco_iassert(name != ""); + if (!hasTensor(name)) { + taco_uerror << "Parser error: Tensor name " << name << + " not found in expression" << endl; + } + return content->tensors.at(name); +} + +bool LinalgParser::hasTensor(std::string name) const { + return util::contains(content->tensors, name); +} +} // namespace parser +} // namespace taco diff --git a/tools/taco.cpp b/tools/taco.cpp index d2d2a02c6..2dd5748b8 100644 --- a/tools/taco.cpp +++ b/tools/taco.cpp @@ -10,6 +10,7 @@ #include "taco/error.h" #include "taco/parser/parser.h" +#include "taco/parser/linalg_parser.h" #include "taco/storage/storage.h" #include "taco/ir/ir.h" #include "taco/ir/ir_printer.h" @@ -189,6 +190,8 @@ static void printUsageInfo() { printFlag("schedule", "Specify parallel execution schedule"); cout << endl; printFlag("nthreads", "Specify number of threads for parallel execution"); + cout << endl; + printFlag("linalg", "Specify if the input should be in Linear Algebra (not index) Notation"); } static int reportError(string errorMessage, int errorCode) { @@ -208,7 +211,7 @@ static void printCommandLine(ostream& os, int argc, char* argv[]) { } } -static bool setSchedulingCommands(istream& in, parser::Parser& parser, IndexStmt& stmt) { +static bool setSchedulingCommands(istream& in, parser::AbstractParser& parser, IndexStmt& stmt) { auto findVar = [&stmt](string name) { ProvenanceGraph graph(stmt); for (auto v : graph.getAllIndexVars()) { @@ -490,7 +493,8 @@ int main(int argc, char* argv[]) { bool readKernels = false; bool cuda = false; - bool setSchedule = false; + bool setSchedule = false; + bool linalg = false; ParallelSchedule sched = ParallelSchedule::Static; int chunkSize = 0; @@ -833,6 +837,9 @@ int main(int argc, char* argv[]) { }, ' '); scheduleCommands.push_back(argValue); } + else if ("-linalg" == argName) { + linalg = true; + } else { if (exprStr.size() != 0) { printUsageInfo(); @@ -882,17 +889,23 @@ int main(int argc, char* argv[]) { } TensorBase tensor; - parser::Parser parser(exprStr, formats, dataTypes, tensorsDimensions, loadedTensors, 42); + parser::AbstractParser *parser; + if (linalg) + parser = new parser::LinalgParser(exprStr, formats, dataTypes, tensorsDimensions, loadedTensors, 42); + else + parser = new parser::Parser(exprStr, formats, dataTypes, tensorsDimensions, loadedTensors, 42); + try { - parser.parse(); - tensor = parser.getResultTensor(); + parser->parse(); + tensor = parser->getResultTensor(); + cout << tensor; } catch (parser::ParseError& e) { return reportError(e.getMessage(), 6); } // Generate tensors for (auto& fills : tensorsFill) { - TensorBase tensor = parser.getTensor(fills.first); + TensorBase tensor = parser->getTensor(fills.first); util::fillTensor(tensor,fills.second); loadedTensors.insert({fills.first, tensor}); @@ -904,8 +917,8 @@ int main(int argc, char* argv[]) { // If all input tensors have been initialized then we should evaluate bool benchmark = true; - for (auto& tensor : parser.getTensors()) { - if (tensor.second == parser.getResultTensor()) { + for (auto& tensor : parser->getTensors()) { + if (tensor.second == parser->getResultTensor()) { continue; } if (!util::contains(loadedTensors, tensor.second.getName())) { @@ -932,7 +945,7 @@ int main(int argc, char* argv[]) { scheduleStream << command << endl; } - cuda |= setSchedulingCommands(scheduleStream, parser, stmt); + cuda |= setSchedulingCommands(scheduleStream, *parser, stmt); } if (cuda) { @@ -993,8 +1006,8 @@ int main(int argc, char* argv[]) { // TODO: Replace this redundant parsing with just a call to set the expr try { - auto operands = parser.getTensors(); - operands.erase(parser.getResultTensor().getName()); + auto operands = parser->getTensors(); + operands.erase(parser->getResultTensor().getName()); parser::Parser parser2(exprStr, formats, dataTypes, tensorsDimensions, operands, 42); parser2.parse(); @@ -1265,7 +1278,7 @@ int main(int argc, char* argv[]) { write(outputFileName, FileType::tns, tensor); TensorBase paramTensor; for (const auto &fills : tensorsFill ) { - paramTensor = parser.getTensor(fills.first); + paramTensor = parser->getTensor(fills.first); outputFileName = outputDirectory + "/" + paramTensor.getName() + ".tns"; write(outputFileName, FileType::tns, paramTensor); } From 4630aed955973676fc3c3c2eab0ba6c0e0913d4c Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Mon, 2 Nov 2020 18:10:39 -0800 Subject: [PATCH 02/61] Add in linalg_notation files for future use --- .../taco/linalg_notation/linalg_notation.h | 145 +++++++++++++++ .../linalg_notation/linalg_notation_nodes.h | 176 ++++++++++++++++++ .../linalg_notation_nodes_abstract.h | 39 ++++ .../linalg_notation/linalg_notation_printer.h | 50 +++++ .../linalg_notation/linalg_notation_visitor.h | 51 +++++ src/linalg_notation/linalg_notation.cpp | 112 +++++++++++ .../linalg_notation_nodes_abstract.cpp | 20 ++ .../linalg_notation_printer.cpp | 149 +++++++++++++++ .../linalg_notation_visitor.cpp | 12 ++ 9 files changed, 754 insertions(+) create mode 100644 include/taco/linalg_notation/linalg_notation.h create mode 100644 include/taco/linalg_notation/linalg_notation_nodes.h create mode 100644 include/taco/linalg_notation/linalg_notation_nodes_abstract.h create mode 100644 include/taco/linalg_notation/linalg_notation_printer.h create mode 100644 include/taco/linalg_notation/linalg_notation_visitor.h create mode 100644 src/linalg_notation/linalg_notation.cpp create mode 100644 src/linalg_notation/linalg_notation_nodes_abstract.cpp create mode 100644 src/linalg_notation/linalg_notation_printer.cpp create mode 100644 src/linalg_notation/linalg_notation_visitor.cpp diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h new file mode 100644 index 000000000..d052e212a --- /dev/null +++ b/include/taco/linalg_notation/linalg_notation.h @@ -0,0 +1,145 @@ +// +// Created by Olivia Hsu on 10/30/20. +// + +#ifndef TACO_LINALG_NOTATION_H +#define TACO_LINALG_NOTATION_H +#include +#include +#include +#include +#include +#include +#include + +#include "taco/format.h" +#include "taco/error.h" +#include "taco/util/intrusive_ptr.h" +#include "taco/util/comparable.h" +#include "taco/type.h" +#include "taco/ir/ir.h" +#include "taco/codegen/module.h" + +#include "taco/ir_tags.h" +#include "taco/lower/iterator.h" +#include "taco/index_notation/provenance_graph.h" + +#include "taco/linalg_notation/linalg_notation_nodes_abstract.h" + +namespace taco { + +class Type; + +class Dimension; + +class Format; + +class Schedule; + +class TensorVar; + +class LinalgExpr; + +class Assignment; + +class Access; + +struct VarNode; +struct LiteralNode; +struct NegNode; +struct TransposeNode; +struct AddNode; +struct SubNode; +struct MatMulNode; +struct ElemMulNode; +struct DivNode; +struct UnaryExprNode; +struct BinaryExprNode; + +class LinalgExprVisitorStrict; + + +class LinalgExpr : public util::IntrusivePtr { +public: + LinalgExpr() : util::IntrusivePtr(nullptr) {} + + LinalgExpr(const LinalgExprNode *n) : util::IntrusivePtr(n) {} + + /// Construct a scalar tensor access. + /// ``` + /// A(i,j) = b; + /// ``` + LinalgExpr(TensorVar); + + /// Consturct an integer literal. + /// ``` + /// A(i,j) = 1; + /// ``` + LinalgExpr(char); + + LinalgExpr(int8_t); + + LinalgExpr(int16_t); + + LinalgExpr(int32_t); + + LinalgExpr(int64_t); + + /// Consturct an unsigned integer literal. + /// ``` + /// A(i,j) = 1u; + /// ``` + LinalgExpr(uint8_t); + + LinalgExpr(uint16_t); + + LinalgExpr(uint32_t); + + LinalgExpr(uint64_t); + + /// Consturct double literal. + /// ``` + /// A(i,j) = 1.0; + /// ``` + LinalgExpr(float); + + LinalgExpr(double); + + /// Construct complex literal. + /// ``` + /// A(i,j) = complex(1.0, 1.0); + /// ``` + LinalgExpr(std::complex); + + LinalgExpr(std::complex); + + Datatype getDataType() const; + + /// Visit the linalg expression's sub-expressions. + void accept(LinalgExprVisitorStrict *) const; + + /// Print the index expression. + friend std::ostream &operator<<(std::ostream &, const LinalgExpr &); +}; + +/// Compare two index expressions by value. +bool equals(LinalgExpr, LinalgExpr); + +/// Construct and returns an expression that negates this expression. +LinalgExpr operator-(const LinalgExpr&); + +/// Add two linear algebra expressions. +LinalgExpr operator+(const LinalgExpr&, const LinalgExpr&); + +/// Subtract a linear algebra expressions from another. +LinalgExpr operator-(const LinalgExpr&, const LinalgExpr&); + +/// Multiply two linear algebra expressions. +LinalgExpr operator*(const LinalgExpr&, const LinalgExpr&); + +/// Divide a linear expression by another. +LinalgExpr operator/(const LinalgExpr&, const LinalgExpr&); + +} + +#endif //TACO_LINALG_NOTATION_H diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h new file mode 100644 index 000000000..1ad72c566 --- /dev/null +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -0,0 +1,176 @@ +#ifndef TACO_LINALG_NOTATION_NODES_H +#define TACO_LINALG_NOTATION_NODES_H + +#include +#include + +#include "taco/type.h" +#include "taco/index_notation/index_notation.h" +#include "taco/index_notation/index_notation_nodes_abstract.h" +#include "taco/index_notation/index_notation_visitor.h" +#include "taco/index_notation/intrinsic.h" +#include "taco/util/strings.h" +#include "taco/linalg_notation/linalg_notation.h" +#include "taco/linalg_notation/linalg_notation_nodes_abstract.h" +#include "taco/linalg_notation/linalg_notation_visitor.h" + +namespace taco { + + + struct VarNode : public LinalgExprNode { + VarNode(TensorVar tensorVar) + : LinalgExprNode(tensorVar.getType().getDataType()), tensorVar(tensorVar) {} + + void accept(LinalgExprVisitorStrict* v) const override { + v->visit(this); + } + + virtual void setAssignment(const Assignment& assignment) {} + + TensorVar tensorVar; + }; + + struct LiteralNode : public LinalgExprNode { + template LiteralNode(T val) : LinalgExprNode(type()) { + this->val = malloc(sizeof(T)); + *static_cast(this->val) = val; + } + + ~LiteralNode() { + free(val); + } + + void accept(LinalgExprVisitorStrict* v) const override { + v->visit(this); + } + + template T getVal() const { + taco_iassert(getDataType() == type()) + << "Attempting to get data of wrong type"; + return *static_cast(val); + } + + void* val; + }; + + + struct UnaryExprNode : public LinalgExprNode { + LinalgExpr a; + + protected: + UnaryExprNode(LinalgExpr a) : LinalgExprNode(a.getDataType()), a(a) {} + }; + + + struct NegNode : public UnaryExprNode { + NegNode(LinalgExpr operand) : UnaryExprNode(operand) {} + + void accept(LinalgExprVisitorStrict* v) const override{ + v->visit(this); + } + }; + + struct TransposeNode : public UnaryExprNode { + TransposeNode(LinalgExpr operand) : UnaryExprNode(operand) {} + + void accept (LinalgExprVisitorStrict* v) const override{ + v->visit(this); + } + }; + + struct BinaryExprNode : public LinalgExprNode { + virtual std::string getOperatorString() const = 0; + + LinalgExpr a; + LinalgExpr b; + + protected: + BinaryExprNode() : LinalgExprNode() {} + BinaryExprNode(LinalgExpr a, LinalgExpr b) + : LinalgExprNode(max_type(a.getDataType(), b.getDataType())), a(a), b(b) {} + }; + + + struct AddNode : public BinaryExprNode { + AddNode() : BinaryExprNode() {} + AddNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} + + std::string getOperatorString() const override{ + return "+"; + } + + void accept(LinalgExprVisitorStrict* v) const override{ + v->visit(this); + } + }; + + + struct SubNode : public BinaryExprNode { + SubNode() : BinaryExprNode() {} + SubNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} + + std::string getOperatorString() const override{ + return "-"; + } + + void accept(LinalgExprVisitorStrict* v) const override{ + v->visit(this); + } + }; + + + struct MatMulNode : public BinaryExprNode { + MatMulNode() : BinaryExprNode() {} + MatMulNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} + + std::string getOperatorString() const override{ + return "*"; + } + + void accept(LinalgExprVisitorStrict* v) const override{ + v->visit(this); + } + }; + +struct ElemMulNode : public BinaryExprNode { + ElemMulNode() : BinaryExprNode() {} + ElemMulNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} + + std::string getOperatorString() const override{ + return "elemMul"; + } + + void accept(LinalgExprVisitorStrict* v) const override{ + v->visit(this); + } +}; + + struct DivNode : public BinaryExprNode { + DivNode() : BinaryExprNode() {} + DivNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} + + std::string getOperatorString() const override{ + return "/"; + } + + void accept(LinalgExprVisitorStrict* v) const override{ + v->visit(this); + } + }; + +/// Returns true if expression e is of type E. + template + inline bool isa(const LinalgExprNode* e) { + return e != nullptr && dynamic_cast(e) != nullptr; + } + +/// Casts the expression e to type E. + template + inline const E* to(const LinalgExprNode* e) { + taco_iassert(isa(e)) << + "Cannot convert " << typeid(e).name() << " to " << typeid(E).name(); + return static_cast(e); + } + +} +#endif //TACO_LINALG_NOTATION_NODES_H diff --git a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h new file mode 100644 index 000000000..a088d55c8 --- /dev/null +++ b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h @@ -0,0 +1,39 @@ +// +// Created by Olivia Hsu on 10/30/20. +// + +#ifndef TACO_LINALG_NOTATION_NODES_ABSTRACT_H +#define TACO_LINALG_NOTATION_NODES_ABSTRACT_H + +#include +#include + +#include "taco/type.h" +#include "taco/util/uncopyable.h" +#include "taco/util/intrusive_ptr.h" +#include "taco/linalg_notation/linalg_notation_visitor.h" +namespace taco { + +class TensorVar; +class LinalgExprVisitorStrict; +class Precompute; + +/// A node of a scalar index expression tree. +struct LinalgExprNode : public util::Manageable, + private util::Uncopyable { +public: + LinalgExprNode() = default; + LinalgExprNode(Datatype type); + virtual ~LinalgExprNode() = default; + virtual void accept(LinalgExprVisitorStrict*) const = 0; + + /// Return the scalar data type of the index expression. + Datatype getDataType() const; + +private: + Datatype dataType; +}; + +} + +#endif //TACO_LINALG_NOTATION_NODES_ABSTRACT_H diff --git a/include/taco/linalg_notation/linalg_notation_printer.h b/include/taco/linalg_notation/linalg_notation_printer.h new file mode 100644 index 000000000..fadbeb546 --- /dev/null +++ b/include/taco/linalg_notation/linalg_notation_printer.h @@ -0,0 +1,50 @@ +#ifndef TACO_LINALG_NOTATION_PRINTER_H +#define TACO_LINALG_NOTATION_PRINTER_H + +#include +#include "taco/linalg_notation/linalg_notation_visitor.h" + +namespace taco { + +class LinalgNotationPrinter : public LinalgExprVisitorStrict { +public: + LinalgNotationPrinter(std::ostream& os); + + void print(const LinalgExpr& expr); + + using LinalgExprVisitorStrict::visit; + + // Scalar Expressions + void visit(const VarNode*); + void visit(const LiteralNode*); + void visit(const NegNode*); + void visit(const AddNode*); + void visit(const SubNode*); + void visit(const MatMulNode*); + void visit(const ElemMulNode*); + void visit(const DivNode*); + void visit(const TransposeNode*); + +private: + std::ostream& os; + + enum class Precedence { + ACCESS = 2, + VAR = 2, + FUNC = 2, + NEG = 3, + TRANSPOSE = 3, + MATMUL = 5, + ELEMMUL = 5, + DIV = 5, + ADD = 6, + SUB = 6, + TOP = 20 + }; + Precedence parentPrecedence; + + template void visitBinary(Node op, Precedence p); +}; + +} +#endif //TACO_LINALG_NOTATION_PRINTER_H diff --git a/include/taco/linalg_notation/linalg_notation_visitor.h b/include/taco/linalg_notation/linalg_notation_visitor.h new file mode 100644 index 000000000..5812754a3 --- /dev/null +++ b/include/taco/linalg_notation/linalg_notation_visitor.h @@ -0,0 +1,51 @@ +#ifndef TACO_LINALG_NOTATION_VISITOR_H +#define TACO_LINALG_NOTATION_VISITOR_H +namespace taco { + +class LinalgExpr; + +class TensorVar; + +struct VarNode; +struct LiteralNode; +struct NegNode; +struct TransposeNode; +struct AddNode; +struct SubNode; +struct MatMulNode; +struct ElemMulNode; +struct DivNode; +struct UnaryExprNode; +struct BinaryExprNode; + + + +/// Visit the nodes in an expression. This visitor provides some type safety +/// by requiring all visit methods to be overridden. +class LinalgExprVisitorStrict { +public: + virtual ~LinalgExprVisitorStrict() = default; + + void visit(const LinalgExpr &); + + virtual void visit(const VarNode *) = 0; + + virtual void visit(const LiteralNode *) = 0; + + virtual void visit(const NegNode *) = 0; + + virtual void visit(const AddNode *) = 0; + + virtual void visit(const SubNode *) = 0; + + virtual void visit(const MatMulNode *) = 0; + + virtual void visit(const ElemMulNode *) = 0; + + virtual void visit(const DivNode *) = 0; + + virtual void visit(const TransposeNode *) = 0; +}; + +} +#endif //TACO_LINALG_NOTATION_VISITOR_H diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp new file mode 100644 index 000000000..646f91113 --- /dev/null +++ b/src/linalg_notation/linalg_notation.cpp @@ -0,0 +1,112 @@ +#include "taco/index_notation/index_notation.h" + +#include +#include +#include +#include +#include +#include +#include +#include "lower/mode_access.h" + +#include "error/error_checks.h" +#include "taco/error/error_messages.h" +#include "taco/type.h" +#include "taco/format.h" + +#include "taco/index_notation/intrinsic.h" +#include "taco/index_notation/schedule.h" +#include "taco/index_notation/transformations.h" +#include "taco/linalg_notation/linalg_notation_nodes.h" +#include "taco/index_notation/index_notation_rewriter.h" +#include "taco/linalg_notation/linalg_notation_printer.h" +#include "taco/ir/ir.h" +#include "taco/lower/lower.h" +#include "taco/codegen/module.h" + +#include "taco/util/name_generator.h" +#include "taco/util/scopedmap.h" +#include "taco/util/strings.h" +#include "taco/util/collections.h" + +using namespace std; + +namespace taco { + +LinalgExpr::LinalgExpr(TensorVar var) : LinalgExpr(new VarNode(var)) { +} + +LinalgExpr::LinalgExpr(char val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(int8_t val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(int16_t val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(int32_t val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(int64_t val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(uint8_t val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(uint16_t val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(uint32_t val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(uint64_t val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(float val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(double val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(std::complex val) : LinalgExpr(new LiteralNode(val)) { +} + +LinalgExpr::LinalgExpr(std::complex val) : LinalgExpr(new LiteralNode(val)) { +} + +Datatype LinalgExpr::getDataType() const { + return const_cast(this->ptr)->getDataType(); +} + +void LinalgExpr::accept(LinalgExprVisitorStrict *v) const { + ptr->accept(v); +} + +std::ostream& operator<<(std::ostream& os, const LinalgExpr& expr) { + if (!expr.defined()) return os << "LinalgExpr()"; + LinalgNotationPrinter printer(os); + printer.print(expr); + return os; +} + +LinalgExpr operator-(const LinalgExpr &expr) { + return new NegNode(expr.ptr); +} + +LinalgExpr operator+(const LinalgExpr &lhs, const LinalgExpr &rhs) { + return new AddNode(lhs, rhs); +} + +LinalgExpr operator-(const LinalgExpr &lhs, const LinalgExpr &rhs) { + return new SubNode(lhs, rhs); +} + +LinalgExpr operator*(const LinalgExpr &lhs, const LinalgExpr &rhs) { + return new MatMulNode(lhs, rhs); +} + +LinalgExpr operator/(const LinalgExpr &lhs, const LinalgExpr &rhs) { + return new DivNode(lhs, rhs); +} +} // namespace taco \ No newline at end of file diff --git a/src/linalg_notation/linalg_notation_nodes_abstract.cpp b/src/linalg_notation/linalg_notation_nodes_abstract.cpp new file mode 100644 index 000000000..62dcb747e --- /dev/null +++ b/src/linalg_notation/linalg_notation_nodes_abstract.cpp @@ -0,0 +1,20 @@ +#include "taco/linalg_notation/linalg_notation_nodes_abstract.h" + +#include "taco/linalg_notation/linalg_notation.h" +#include "taco/index_notation/schedule.h" +#include "taco/index_notation/transformations.h" + +#include + +using namespace std; + +namespace taco { + +LinalgExprNode::LinalgExprNode(Datatype type) + : dataType(type) { +} + +Datatype LinalgExprNode::getDataType() const { + return dataType; +} +} diff --git a/src/linalg_notation/linalg_notation_printer.cpp b/src/linalg_notation/linalg_notation_printer.cpp new file mode 100644 index 000000000..d682c0975 --- /dev/null +++ b/src/linalg_notation/linalg_notation_printer.cpp @@ -0,0 +1,149 @@ +#include "taco/linalg_notation/linalg_notation_printer.h" +#include "taco/linalg_notation/linalg_notation_nodes.h" + +using namespace std; + +namespace taco { + +LinalgNotationPrinter::LinalgNotationPrinter(std::ostream& os) : os(os) { +} + +void LinalgNotationPrinter::print(const LinalgExpr& expr) { + parentPrecedence = Precedence::TOP; + expr.accept(this); +} + +void LinalgNotationPrinter::visit(const VarNode* op) { + os << op->tensorVar.getName(); +} + +void LinalgNotationPrinter::visit(const LiteralNode* op) { + switch (op->getDataType().getKind()) { + case Datatype::Bool: + os << op->getVal(); + break; + case Datatype::UInt8: + os << op->getVal(); + break; + case Datatype::UInt16: + os << op->getVal(); + break; + case Datatype::UInt32: + os << op->getVal(); + break; + case Datatype::UInt64: + os << op->getVal(); + break; + case Datatype::UInt128: + taco_not_supported_yet; + break; + case Datatype::Int8: + os << op->getVal(); + break; + case Datatype::Int16: + os << op->getVal(); + break; + case Datatype::Int32: + os << op->getVal(); + break; + case Datatype::Int64: + os << op->getVal(); + break; + case Datatype::Int128: + taco_not_supported_yet; + break; + case Datatype::Float32: + os << op->getVal(); + break; + case Datatype::Float64: + os << op->getVal(); + break; + case Datatype::Complex64: + os << op->getVal>(); + break; + case Datatype::Complex128: + os << op->getVal>(); + break; + case Datatype::Undefined: + break; + } +} + +void LinalgNotationPrinter::visit(const NegNode* op) { + Precedence precedence = Precedence::NEG; + bool parenthesize = precedence > parentPrecedence; + parentPrecedence = precedence; + os << "-"; + if (parenthesize) { + os << "("; + } + op->a.accept(this); + if (parenthesize) { + os << ")"; + } +} + +void LinalgNotationPrinter::visit(const TransposeNode* op) { + Precedence precedence = Precedence::TRANSPOSE; + bool parenthesize = precedence > parentPrecedence; + parentPrecedence = precedence; + if (parenthesize) { + os << "("; + } + op->a.accept(this); + if (parenthesize) { + os << ")"; + } + os << "^T"; +} + +template +void LinalgNotationPrinter::visitBinary(Node op, Precedence precedence) { + bool parenthesize = precedence > parentPrecedence; + if (parenthesize) { + os << "("; + } + parentPrecedence = precedence; + op->a.accept(this); + os << " " << op->getOperatorString() << " "; + parentPrecedence = precedence; + op->b.accept(this); + if (parenthesize) { + os << ")"; + } +} + +void LinalgNotationPrinter::visit(const AddNode* op) { + visitBinary(op, Precedence::ADD); +} + +void LinalgNotationPrinter::visit(const SubNode* op) { + visitBinary(op, Precedence::SUB); +} + +void LinalgNotationPrinter::visit(const MatMulNode* op) { + visitBinary(op, Precedence::MATMUL); +} + +void LinalgNotationPrinter::visit(const ElemMulNode* op) { + visitBinary(op, Precedence::ELEMMUL); +} + +void LinalgNotationPrinter::visit(const DivNode* op) { + visitBinary(op, Precedence::DIV); +} + +template +static inline void acceptJoin(LinalgNotationPrinter* printer, + std::ostream& stream, const std::vector& nodes, + std::string sep) { + if (nodes.size() > 0) { + nodes[0].accept(printer); + } + for (size_t i = 1; i < nodes.size(); ++i) { + stream << sep; + nodes[i].accept(printer); + } +} + +} diff --git a/src/linalg_notation/linalg_notation_visitor.cpp b/src/linalg_notation/linalg_notation_visitor.cpp new file mode 100644 index 000000000..35c7e6932 --- /dev/null +++ b/src/linalg_notation/linalg_notation_visitor.cpp @@ -0,0 +1,12 @@ + +#include "taco/linalg_notation/linalg_notation_visitor.h" +#include "taco/linalg_notation/linalg_notation_nodes.h" + +using namespace std; + +namespace taco { + +void LinalgExprVisitorStrict::visit(const LinalgExpr &expr) { + expr.accept(this); +} +} \ No newline at end of file From 8811edad983b130cd341ffa36d0bee4c706bad06 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 2 Nov 2020 22:29:59 -0800 Subject: [PATCH 03/61] some debugging couts --- src/parser/linalg_parser.cpp | 4 ++++ tools/taco.cpp | 1 + 2 files changed, 5 insertions(+) diff --git a/src/parser/linalg_parser.cpp b/src/parser/linalg_parser.cpp index e73b9d28a..409328f43 100644 --- a/src/parser/linalg_parser.cpp +++ b/src/parser/linalg_parser.cpp @@ -73,12 +73,15 @@ TensorBase LinalgParser::parseAssign() { content->parsingLhs = true; cout << "parsing lhs" << endl; Access lhs = parseVar(); + cout << "Result of parsing LHS" << endl; cout << lhs << endl; content->parsingLhs = false; cout << "parsing rhs" << endl; consume(Token::eq); IndexExpr rhs = parseExpr(); + cout << "Result of parsing RHS" << endl; + cout << rhs << endl; // Collect all index var dimensions struct Visitor : IndexNotationVisitor { @@ -333,6 +336,7 @@ Access LinalgParser::parseVar() { cout << order << endl; vector idxlist = getUniqueIndices(order); + cout << "Idxlist"; for (auto i : idxlist) cout << i << ", "; diff --git a/tools/taco.cpp b/tools/taco.cpp index 2dd5748b8..926309784 100644 --- a/tools/taco.cpp +++ b/tools/taco.cpp @@ -898,6 +898,7 @@ int main(int argc, char* argv[]) { try { parser->parse(); tensor = parser->getResultTensor(); + cout << "getResultTensor!!" << endl; cout << tensor; } catch (parser::ParseError& e) { return reportError(e.getMessage(), 6); From 561aa51be8fa124e491e2dcb894d00957efb639b Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 3 Nov 2020 15:25:31 -0800 Subject: [PATCH 04/61] pair programming - LinalgBase, Matrix, Vector dummies --- include/taco/linalg.h | 58 +++++++++++++++++++++++++++++++++++++++++++ src/linalg.cpp | 19 ++++++++++++++ test/tests-linalg.cpp | 39 +++++++++++++++++++++++++++++ 3 files changed, 116 insertions(+) create mode 100644 include/taco/linalg.h create mode 100644 src/linalg.cpp create mode 100644 test/tests-linalg.cpp diff --git a/include/taco/linalg.h b/include/taco/linalg.h new file mode 100644 index 000000000..2cd17adfe --- /dev/null +++ b/include/taco/linalg.h @@ -0,0 +1,58 @@ +#ifndef TACO_LINALG_H +#define TACO_LINALG_H + +#include "taco/type.h" +#include "taco/tensor.h" +#include "taco/format.h" + +#include "taco/linalg_notation/linalg_notation.h" + +namespace taco { + +class LinalgBase : public LinalgExpr { + std::string name; + Datatype ctype; +public: + LinalgBase(std::string name, Datatype ctype); + + /* LinalgBase operator=(LinalgExpr) { */ + /* return (LinalgBase)LinalgExpr; */ + /* } */ + +}; + +// ------------------------------------------------------------ +// Matrix class +// ------------------------------------------------------------ + +template +class Matrix : public LinalgBase { + public: + explicit Matrix(std::string name); +}; + +// ------------------------------------------------------------ +// Matrix template method implementations +// ------------------------------------------------------------ + +template +Matrix::Matrix(std::string name) : LinalgBase(name, type()) {} + +// ------------------------------------------------------------ +// Vector class +// ------------------------------------------------------------ + +template +class Vector : public LinalgBase { + public: + explicit Vector(std::string name); +}; + +// ------------------------------------------------------------ +// Vector template method implementations +// ------------------------------------------------------------ + +template +Vector::Vector(std::string name) : LinalgBase(name, type()) {} +} +#endif diff --git a/src/linalg.cpp b/src/linalg.cpp new file mode 100644 index 000000000..6b8875054 --- /dev/null +++ b/src/linalg.cpp @@ -0,0 +1,19 @@ +#include "taco/linalg.h" + +using namespace std; + +namespace taco { + +// Just trying this out. Need to accept dimensions and format too. +/* LinalgBase::LinalgBase(Datatype ctype) */ +/* : LinalgBase(/1* get a unique name *1/, ctype) { */ +/* } */ + +LinalgBase::LinalgBase(string name, Datatype ctype) : name(name), ctype(ctype), LinalgExpr(TensorVar(name, Type(ctype, {42,42}))) { + +} + + + + +} diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp new file mode 100644 index 000000000..f5842e3c4 --- /dev/null +++ b/test/tests-linalg.cpp @@ -0,0 +1,39 @@ +#include "test.h" + +#include "taco/linalg.h" + +using namespace taco; + +TEST(linalg, simplest) { + Matrix B("B"); + Matrix C("C"); + Matrix A("A"); + + /* Vector c("c"); */ + + /* Vector a("a"); */ + + /* for(int i=0;i<42;i++) { */ + /* B.insert({i,i}, 1.0); */ + /* } */ + + /* for(int i=0;i<42;i++) { */ + /* c.insert({i}, (double) i); */ + /* } */ + + /* B.pack(); */ + /* c.pack(); */ + + /* IndexVar i("i"), j("j"); */ + + /* a(i) = B(i,j) * c(j); */ + + /* A = B*C; */ + + B * C; + + /* cout << a << endl; */ + + ASSERT_TRUE(1); +} + From 00ed53d1090f382769fc404b880a65c6224ba5b3 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Wed, 4 Nov 2020 22:48:27 -0800 Subject: [PATCH 05/61] Add in shapes and format for matrix and vector API and also assignment stmts --- include/taco/index_notation/index_notation.h | 5 ++ include/taco/linalg.h | 77 ++++++++++++++++--- .../taco/linalg_notation/linalg_notation.h | 30 +++++++- .../linalg_notation/linalg_notation_nodes.h | 60 ++++++++++----- .../linalg_notation_nodes_abstract.h | 14 ++++ .../linalg_notation/linalg_notation_printer.h | 7 +- .../linalg_notation/linalg_notation_visitor.h | 46 ++++++++++- src/linalg.cpp | 15 +++- src/linalg_notation/linalg_notation.cpp | 30 ++++++++ .../linalg_notation_printer.cpp | 10 +++ .../linalg_notation_visitor.cpp | 5 ++ test/tests-linalg.cpp | 6 +- 12 files changed, 264 insertions(+), 41 deletions(-) diff --git a/include/taco/index_notation/index_notation.h b/include/taco/index_notation/index_notation.h index ebc710e28..193bd4831 100644 --- a/include/taco/index_notation/index_notation.h +++ b/include/taco/index_notation/index_notation.h @@ -22,6 +22,8 @@ #include "taco/lower/iterator.h" #include "taco/index_notation/provenance_graph.h" +#include "taco/linalg_notation/linalg_notation_nodes_abstract.h" + namespace taco { class Type; @@ -59,6 +61,9 @@ struct SuchThatNode; class IndexExprVisitorStrict; class IndexStmtVisitorStrict; +struct VarNode; +class LinalgAssignment; + /// A tensor index expression describes a tensor computation as a scalar /// expression where tensors are indexed by index variables (`IndexVar`). The /// index variables range over the tensor dimensions they index, and the scalar diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 2cd17adfe..9430a6981 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -6,14 +6,23 @@ #include "taco/format.h" #include "taco/linalg_notation/linalg_notation.h" +#include "taco/linalg_notation/linalg_notation_nodes.h" namespace taco { class LinalgBase : public LinalgExpr { std::string name; - Datatype ctype; + Type tensorType; + + LinalgAssignment assignment; + + typedef VarNode Node; public: - LinalgBase(std::string name, Datatype ctype); + LinalgBase(std::string name, Type tensorType); + LinalgBase(std::string name, Type tensorType, Format format); + /// [LINALG NOTATION] + LinalgAssignment operator=(const LinalgExpr& expr); + /* LinalgBase operator=(LinalgExpr) { */ /* return (LinalgBase)LinalgExpr; */ @@ -27,8 +36,19 @@ class LinalgBase : public LinalgExpr { template class Matrix : public LinalgBase { - public: - explicit Matrix(std::string name); +public: + explicit Matrix(std::string name); + Matrix(std::string name, int dim1, int dim2); + Matrix(std::string name, std::vector dimensions); + Matrix(std::string name, int dim1, int dim2, Format format); + Matrix(std::string name, std::vector dimensions, Format format); + Matrix(std::string name, int dim1, int dim2, ModeFormat format1, ModeFormat format2); + Matrix(std::string name, Type tensorType); + Matrix(std::string name, Type tensorType, Format format); + LinalgAssignment operator=(const LinalgExpr& expr) { + return LinalgBase::operator=(expr); + } + }; // ------------------------------------------------------------ @@ -36,16 +56,42 @@ class Matrix : public LinalgBase { // ------------------------------------------------------------ template -Matrix::Matrix(std::string name) : LinalgBase(name, type()) {} - +Matrix::Matrix(std::string name) : LinalgBase(name, Type(type(), {42, 42})) {} +template +Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, Type(type(), dimensions)) {} +template +Matrix::Matrix(std::string name, int dim1, int dim2) : LinalgBase(name, Type(type(), {dim1, dim2})) {} +template +Matrix::Matrix(std::string name, int dim1, int dim2, Format format) : + LinalgBase(name, Type(type(), {dim1, dim2}), format) {} +template +Matrix::Matrix(std::string name, std::vector dimensions, Format format) : + LinalgBase(name, Type(type(), dimensions), format) {} +template +Matrix::Matrix(std::string name, int dim1, int dim2, ModeFormat format1, ModeFormat format2) : + LinalgBase(name, Type(type(), {dim1, dim2}), Format({format1, format2})) {} +template +Matrix::Matrix(std::string name, Type tensorType) : LinalgBase(name, tensorType) {} +template +Matrix::Matrix(std::string name, Type tensorType, Format format) : LinalgBase(name, tensorType, format) {} // ------------------------------------------------------------ // Vector class // ------------------------------------------------------------ template class Vector : public LinalgBase { - public: - explicit Vector(std::string name); + std::string name; + Datatype ctype; +public: + explicit Vector(std::string name); + Vector(std::string name, int dim); + Vector(std::string name, int dim, Format format); + Vector(std::string name, int dim, ModeFormat format); + Vector(std::string name, Type type, Format format); + Vector(std::string name, Type type, ModeFormat format); + LinalgAssignment operator=(const LinalgExpr& expr) { + return LinalgBase::operator=(expr); + } }; // ------------------------------------------------------------ @@ -53,6 +99,19 @@ class Vector : public LinalgBase { // ------------------------------------------------------------ template -Vector::Vector(std::string name) : LinalgBase(name, type()) {} +Vector::Vector(std::string name) : LinalgBase(name, Type(type(), {42})) {} +template +Vector::Vector(std::string name, int dim) : LinalgBase(name, Type(type(), {dim})) {} +template +Vector::Vector(std::string name, int dim, Format format) : LinalgBase(name, Type(type(), {dim}), format) {} +template +Vector::Vector(std::string name, int dim, ModeFormat format) : + LinalgBase(name, Type(type(), {dim}), Format(format)) {} +template +Vector::Vector(std::string name, Type type, Format format) : + LinalgBase(name, type, format) {} +template +Vector::Vector(std::string name, Type type, ModeFormat format) : + LinalgBase(name, type, Format(format)) {} } #endif diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index d052e212a..63c998434 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -40,7 +40,7 @@ class TensorVar; class LinalgExpr; -class Assignment; +class LinalgAssignment; class Access; @@ -140,6 +140,34 @@ LinalgExpr operator*(const LinalgExpr&, const LinalgExpr&); /// Divide a linear expression by another. LinalgExpr operator/(const LinalgExpr&, const LinalgExpr&); +/// A an index statement computes a tensor. The index statements are +/// assignment, forall, where, multi, and sequence. +class LinalgStmt : public util::IntrusivePtr { +public: + LinalgStmt(); + LinalgStmt(const LinalgStmtNode* n); + + /// Visit the tensor expression + void accept(LinalgStmtVisitorStrict *) const; +}; + +class LinalgAssignment : public LinalgStmt { +public: + LinalgAssignment() = default; + LinalgAssignment(const LinalgAssignmentNode*); + + /// Create an assignment. + LinalgAssignment(TensorVar lhs, LinalgExpr rhs); + + /// Return the assignment's left-hand side. + TensorVar getLhs() const; + + /// Return the assignment's right-hand side. + LinalgExpr getRhs() const; + + typedef LinalgAssignmentNode Node; +}; + } #endif //TACO_LINALG_NOTATION_H diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h index 1ad72c566..07f2047d2 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes.h +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -25,7 +25,7 @@ namespace taco { v->visit(this); } - virtual void setAssignment(const Assignment& assignment) {} + virtual void setAssignment(const LinalgAssignment& assignment) {} TensorVar tensorVar; }; @@ -145,32 +145,50 @@ struct ElemMulNode : public BinaryExprNode { } }; - struct DivNode : public BinaryExprNode { - DivNode() : BinaryExprNode() {} - DivNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} +struct DivNode : public BinaryExprNode { + DivNode() : BinaryExprNode() {} + DivNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} - std::string getOperatorString() const override{ - return "/"; - } + std::string getOperatorString() const override{ + return "/"; + } - void accept(LinalgExprVisitorStrict* v) const override{ - v->visit(this); - } - }; + void accept(LinalgExprVisitorStrict* v) const override{ + v->visit(this); + } +}; -/// Returns true if expression e is of type E. - template - inline bool isa(const LinalgExprNode* e) { - return e != nullptr && dynamic_cast(e) != nullptr; +// Linalg Statements +struct LinalgAssignmentNode : public LinalgStmtNode { + LinalgAssignmentNode(const TensorVar& lhs, const LinalgExpr& rhs) + : lhs(lhs), rhs(rhs) {} + + void accept(LinalgStmtVisitorStrict* v) const { + v->visit(this); } + TensorVar lhs; + LinalgExpr rhs; +}; + +/// Returns true if expression e is of type E. +template +inline bool isa(const LinalgExprNode* e) { + return e != nullptr && dynamic_cast(e) != nullptr; +} + /// Casts the expression e to type E. - template - inline const E* to(const LinalgExprNode* e) { - taco_iassert(isa(e)) << - "Cannot convert " << typeid(e).name() << " to " << typeid(E).name(); - return static_cast(e); - } +template +inline const E* to(const LinalgExprNode* e) { + taco_iassert(isa(e)) << + "Cannot convert " << typeid(e).name() << " to " << typeid(E).name(); + return static_cast(e); +} +template +inline const typename I::Node* getNode(const I& stmt) { + taco_iassert(isa(stmt.ptr)); + return static_cast(stmt.ptr); +} } #endif //TACO_LINALG_NOTATION_NODES_H diff --git a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h index a088d55c8..86353ab73 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h +++ b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h @@ -34,6 +34,20 @@ struct LinalgExprNode : public util::Manageable, Datatype dataType; }; +struct LinalgStmtNode : public util::Manageable, + private util::Uncopyable { +public: + LinalgStmtNode() = default; + LinalgStmtNode(Type type); + virtual ~LinalgStmtNode() = default; + virtual void accept(LinalgStmtVisitorStrict*) const = 0; + + Type getType() const; + +private: + Type type; +}; + } #endif //TACO_LINALG_NOTATION_NODES_ABSTRACT_H diff --git a/include/taco/linalg_notation/linalg_notation_printer.h b/include/taco/linalg_notation/linalg_notation_printer.h index fadbeb546..35c0fd7b2 100644 --- a/include/taco/linalg_notation/linalg_notation_printer.h +++ b/include/taco/linalg_notation/linalg_notation_printer.h @@ -6,11 +6,12 @@ namespace taco { -class LinalgNotationPrinter : public LinalgExprVisitorStrict { +class LinalgNotationPrinter : public LinalgNotationVisitorStrict { public: - LinalgNotationPrinter(std::ostream& os); + explicit LinalgNotationPrinter(std::ostream& os); void print(const LinalgExpr& expr); + void print(const LinalgStmt& expr); using LinalgExprVisitorStrict::visit; @@ -25,6 +26,8 @@ class LinalgNotationPrinter : public LinalgExprVisitorStrict { void visit(const DivNode*); void visit(const TransposeNode*); + void visit(const LinalgAssignmentNode*); + private: std::ostream& os; diff --git a/include/taco/linalg_notation/linalg_notation_visitor.h b/include/taco/linalg_notation/linalg_notation_visitor.h index 5812754a3..1db5e3cce 100644 --- a/include/taco/linalg_notation/linalg_notation_visitor.h +++ b/include/taco/linalg_notation/linalg_notation_visitor.h @@ -3,6 +3,7 @@ namespace taco { class LinalgExpr; +class LinalgStmt; class TensorVar; @@ -18,7 +19,7 @@ struct DivNode; struct UnaryExprNode; struct BinaryExprNode; - +struct LinalgAssignmentNode; /// Visit the nodes in an expression. This visitor provides some type safety /// by requiring all visit methods to be overridden. @@ -47,5 +48,48 @@ class LinalgExprVisitorStrict { virtual void visit(const TransposeNode *) = 0; }; +class LinalgStmtVisitorStrict { +public: + virtual ~LinalgStmtVisitorStrict() = default; + + void visit(const LinalgStmt&); + + virtual void visit(const LinalgAssignmentNode*) = 0; +}; + +/// Visit nodes in linalg notation +class LinalgNotationVisitorStrict : public LinalgExprVisitorStrict, + public LinalgStmtVisitorStrict { +public: + virtual ~LinalgNotationVisitorStrict() = default; + + using LinalgExprVisitorStrict::visit; + using LinalgStmtVisitorStrict::visit; +}; + +/// Visit nodes in an expression. +class LinalgNotationVisitor : public LinalgNotationVisitorStrict { +public: + virtual ~LinalgNotationVisitor() = default; + + using LinalgNotationVisitorStrict::visit; + + // Index Expressions + virtual void visit(const VarNode* node); + virtual void visit(const LiteralNode* node); + virtual void visit(const NegNode* node); + virtual void visit(const AddNode* node); + virtual void visit(const SubNode* node); + virtual void visit(const MatMulNode* node); + virtual void visit(const ElemMulNode* node); + virtual void visit(const DivNode* node); + virtual void visit(const UnaryExprNode* node); + virtual void visit(const BinaryExprNode* node); + virtual void visit(const TransposeNode* node); + + // Index Statments + virtual void visit(const LinalgAssignmentNode* node); +}; + } #endif //TACO_LINALG_NOTATION_VISITOR_H diff --git a/src/linalg.cpp b/src/linalg.cpp index 6b8875054..b5ad5ebe4 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -9,11 +9,18 @@ namespace taco { /* : LinalgBase(/1* get a unique name *1/, ctype) { */ /* } */ -LinalgBase::LinalgBase(string name, Datatype ctype) : name(name), ctype(ctype), LinalgExpr(TensorVar(name, Type(ctype, {42,42}))) { - +LinalgBase::LinalgBase(string name, Type tensorType) : name(name), tensorType(tensorType), + LinalgExpr(TensorVar(name, tensorType)) { +} +LinalgBase::LinalgBase(string name, Type tensorType, Format format) : name(name), tensorType(tensorType), + LinalgExpr(TensorVar(name, tensorType, format)) { } - - +LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { + taco_iassert(isa(this->ptr)); + LinalgAssignment assignment = LinalgAssignment(dynamic_cast(this->ptr)->tensorVar, expr); + this->assignment = assignment; + return assignment; +} } diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index 646f91113..568298ced 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -109,4 +109,34 @@ LinalgExpr operator*(const LinalgExpr &lhs, const LinalgExpr &rhs) { LinalgExpr operator/(const LinalgExpr &lhs, const LinalgExpr &rhs) { return new DivNode(lhs, rhs); } + +// class LinalgStmt +LinalgStmt::LinalgStmt() : util::IntrusivePtr(nullptr) { +} + +LinalgStmt::LinalgStmt(const LinalgStmtNode* n) + : util::IntrusivePtr(n) { +} + +void LinalgStmt::accept(LinalgStmtVisitorStrict *v) const { + ptr->accept(v); +} + + +// class LinalgAssignment +LinalgAssignment::LinalgAssignment(const LinalgAssignmentNode* n) : LinalgStmt(n) { +} + +LinalgAssignment::LinalgAssignment(TensorVar lhs, LinalgExpr rhs) + : LinalgAssignment(new LinalgAssignmentNode(lhs, rhs)) { +} + +TensorVar LinalgAssignment::getLhs() const { + return getNode(*this)->lhs; +} + +LinalgExpr LinalgAssignment::getRhs() const { + return getNode(*this)->rhs; +} + } // namespace taco \ No newline at end of file diff --git a/src/linalg_notation/linalg_notation_printer.cpp b/src/linalg_notation/linalg_notation_printer.cpp index d682c0975..fa617ef49 100644 --- a/src/linalg_notation/linalg_notation_printer.cpp +++ b/src/linalg_notation/linalg_notation_printer.cpp @@ -13,6 +13,11 @@ void LinalgNotationPrinter::print(const LinalgExpr& expr) { expr.accept(this); } +void LinalgNotationPrinter::print(const LinalgStmt& expr) { + parentPrecedence = Precedence::TOP; + expr.accept(this); +} + void LinalgNotationPrinter::visit(const VarNode* op) { os << op->tensorVar.getName(); } @@ -146,4 +151,9 @@ static inline void acceptJoin(LinalgNotationPrinter* printer, } } +void LinalgNotationPrinter::visit(const LinalgAssignmentNode* op) { + os << op->lhs.getName() << " " << "= "; + op->rhs.accept(this); +} + } diff --git a/src/linalg_notation/linalg_notation_visitor.cpp b/src/linalg_notation/linalg_notation_visitor.cpp index 35c7e6932..0d1925780 100644 --- a/src/linalg_notation/linalg_notation_visitor.cpp +++ b/src/linalg_notation/linalg_notation_visitor.cpp @@ -9,4 +9,9 @@ namespace taco { void LinalgExprVisitorStrict::visit(const LinalgExpr &expr) { expr.accept(this); } + +void LinalgStmtVisitorStrict::visit(const LinalgStmt& stmt) { + stmt.accept(this); +} + } \ No newline at end of file diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index f5842e3c4..d97c3dcdb 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -5,7 +5,7 @@ using namespace taco; TEST(linalg, simplest) { - Matrix B("B"); + Matrix B("B", 2, 2, dense, dense); Matrix C("C"); Matrix A("A"); @@ -30,9 +30,9 @@ TEST(linalg, simplest) { /* A = B*C; */ - B * C; + A = B * C; - /* cout << a << endl; */ + cout << A << endl; ASSERT_TRUE(1); } From 0a5a547f13efb45958966a59ac4c80cc890e423f Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Fri, 6 Nov 2020 11:39:36 -0800 Subject: [PATCH 06/61] Add in code to start rewriting from linalg notation to index notation --- include/taco/linalg.h | 17 +++- .../taco/linalg_notation/linalg_notation.h | 22 ++--- .../linalg_notation/linalg_notation_nodes.h | 72 +++++++------- .../linalg_notation/linalg_notation_printer.h | 18 ++-- .../linalg_notation/linalg_notation_visitor.h | 62 ++++++------ src/linalg.cpp | 94 ++++++++++++++++++- src/linalg_notation/linalg_notation.cpp | 39 ++++---- .../linalg_notation_printer.cpp | 18 ++-- test/tests-linalg.cpp | 3 + 9 files changed, 229 insertions(+), 116 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 9430a6981..8c0705d75 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -7,6 +7,8 @@ #include "taco/linalg_notation/linalg_notation.h" #include "taco/linalg_notation/linalg_notation_nodes.h" +#include "taco/linalg_notation/linalg_notation_printer.h" + namespace taco { @@ -15,21 +17,34 @@ class LinalgBase : public LinalgExpr { Type tensorType; LinalgAssignment assignment; + IndexStmt indexAssignment; + + int idxcount; - typedef VarNode Node; + IndexExpr rewrite(LinalgExpr linalg, std::vector indices); + IndexVar getUniqueIndex(); + std::vector getUniqueIndices(size_t order); public: LinalgBase(std::string name, Type tensorType); LinalgBase(std::string name, Type tensorType, Format format); /// [LINALG NOTATION] LinalgAssignment operator=(const LinalgExpr& expr); + const LinalgAssignment getAssignment() const; + const IndexStmt getIndexAssignment() const; + IndexStmt rewrite(); + typedef LinalgVarNode Node; /* LinalgBase operator=(LinalgExpr) { */ /* return (LinalgBase)LinalgExpr; */ /* } */ }; +std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg); +IndexExpr rewrite(LinalgExpr linalg, std::vector); +IndexStmt rewrite(LinalgStmt linalg); + // ------------------------------------------------------------ // Matrix class // ------------------------------------------------------------ diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index 63c998434..2f1d855fc 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -44,17 +44,17 @@ class LinalgAssignment; class Access; -struct VarNode; -struct LiteralNode; -struct NegNode; -struct TransposeNode; -struct AddNode; -struct SubNode; -struct MatMulNode; -struct ElemMulNode; -struct DivNode; -struct UnaryExprNode; -struct BinaryExprNode; +struct LinalgVarNode; +struct LinalgLiteralNode; +struct LinalgNegNode; +struct LinalgTransposeNode; +struct LinalgAddNode; +struct LinalgSubNode; +struct LinalgMatMulNode; +struct LinalgElemMulNode; +struct LinalgDivNode; +struct LinalgUnaryExprNode; +struct LinalgBinaryExprNode; class LinalgExprVisitorStrict; diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h index 07f2047d2..413dabbe7 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes.h +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -17,8 +17,8 @@ namespace taco { - struct VarNode : public LinalgExprNode { - VarNode(TensorVar tensorVar) + struct LinalgVarNode : public LinalgExprNode { + LinalgVarNode(TensorVar tensorVar) : LinalgExprNode(tensorVar.getType().getDataType()), tensorVar(tensorVar) {} void accept(LinalgExprVisitorStrict* v) const override { @@ -30,13 +30,13 @@ namespace taco { TensorVar tensorVar; }; - struct LiteralNode : public LinalgExprNode { - template LiteralNode(T val) : LinalgExprNode(type()) { + struct LinalgLiteralNode : public LinalgExprNode { + template LinalgLiteralNode(T val) : LinalgExprNode(type()) { this->val = malloc(sizeof(T)); *static_cast(this->val) = val; } - ~LiteralNode() { + ~LinalgLiteralNode() { free(val); } @@ -54,46 +54,46 @@ namespace taco { }; - struct UnaryExprNode : public LinalgExprNode { + struct LinalgUnaryExprNode : public LinalgExprNode { LinalgExpr a; protected: - UnaryExprNode(LinalgExpr a) : LinalgExprNode(a.getDataType()), a(a) {} + LinalgUnaryExprNode(LinalgExpr a) : LinalgExprNode(a.getDataType()), a(a) {} }; - struct NegNode : public UnaryExprNode { - NegNode(LinalgExpr operand) : UnaryExprNode(operand) {} + struct LinalgNegNode : public LinalgUnaryExprNode { + LinalgNegNode(LinalgExpr operand) : LinalgUnaryExprNode(operand) {} void accept(LinalgExprVisitorStrict* v) const override{ v->visit(this); } }; - struct TransposeNode : public UnaryExprNode { - TransposeNode(LinalgExpr operand) : UnaryExprNode(operand) {} + struct LinalgTransposeNode : public LinalgUnaryExprNode { + LinalgTransposeNode(LinalgExpr operand) : LinalgUnaryExprNode(operand) {} void accept (LinalgExprVisitorStrict* v) const override{ v->visit(this); } }; - struct BinaryExprNode : public LinalgExprNode { + struct LinalgBinaryExprNode : public LinalgExprNode { virtual std::string getOperatorString() const = 0; LinalgExpr a; LinalgExpr b; protected: - BinaryExprNode() : LinalgExprNode() {} - BinaryExprNode(LinalgExpr a, LinalgExpr b) + LinalgBinaryExprNode() : LinalgExprNode() {} + LinalgBinaryExprNode(LinalgExpr a, LinalgExpr b) : LinalgExprNode(max_type(a.getDataType(), b.getDataType())), a(a), b(b) {} }; - struct AddNode : public BinaryExprNode { - AddNode() : BinaryExprNode() {} - AddNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} + struct LinalgAddNode : public LinalgBinaryExprNode { + LinalgAddNode() : LinalgBinaryExprNode() {} + LinalgAddNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} std::string getOperatorString() const override{ return "+"; @@ -105,9 +105,9 @@ namespace taco { }; - struct SubNode : public BinaryExprNode { - SubNode() : BinaryExprNode() {} - SubNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} + struct LinalgSubNode : public LinalgBinaryExprNode { + LinalgSubNode() : LinalgBinaryExprNode() {} + LinalgSubNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} std::string getOperatorString() const override{ return "-"; @@ -119,9 +119,9 @@ namespace taco { }; - struct MatMulNode : public BinaryExprNode { - MatMulNode() : BinaryExprNode() {} - MatMulNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} + struct LinalgMatMulNode : public LinalgBinaryExprNode { + LinalgMatMulNode() : LinalgBinaryExprNode() {} + LinalgMatMulNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} std::string getOperatorString() const override{ return "*"; @@ -132,9 +132,9 @@ namespace taco { } }; -struct ElemMulNode : public BinaryExprNode { - ElemMulNode() : BinaryExprNode() {} - ElemMulNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} +struct LinalgElemMulNode : public LinalgBinaryExprNode { + LinalgElemMulNode() : LinalgBinaryExprNode() {} + LinalgElemMulNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} std::string getOperatorString() const override{ return "elemMul"; @@ -145,9 +145,9 @@ struct ElemMulNode : public BinaryExprNode { } }; -struct DivNode : public BinaryExprNode { - DivNode() : BinaryExprNode() {} - DivNode(LinalgExpr a, LinalgExpr b) : BinaryExprNode(a, b) {} +struct LinalgDivNode : public LinalgBinaryExprNode { + LinalgDivNode() : LinalgBinaryExprNode() {} + LinalgDivNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} std::string getOperatorString() const override{ return "/"; @@ -185,10 +185,16 @@ inline const E* to(const LinalgExprNode* e) { return static_cast(e); } -template -inline const typename I::Node* getNode(const I& stmt) { - taco_iassert(isa(stmt.ptr)); - return static_cast(stmt.ptr); +/// Returns true if statement e is of type S. +template +inline bool isa(const LinalgStmtNode* s) { + return s != nullptr && dynamic_cast(s) != nullptr; } + +//template +//inline const typename I::Node* getNode(const I& stmt) { +// taco_iassert(isa(stmt.ptr)); +// return static_cast(stmt.ptr); +//} } #endif //TACO_LINALG_NOTATION_NODES_H diff --git a/include/taco/linalg_notation/linalg_notation_printer.h b/include/taco/linalg_notation/linalg_notation_printer.h index 35c0fd7b2..6c32bfa92 100644 --- a/include/taco/linalg_notation/linalg_notation_printer.h +++ b/include/taco/linalg_notation/linalg_notation_printer.h @@ -16,15 +16,15 @@ class LinalgNotationPrinter : public LinalgNotationVisitorStrict { using LinalgExprVisitorStrict::visit; // Scalar Expressions - void visit(const VarNode*); - void visit(const LiteralNode*); - void visit(const NegNode*); - void visit(const AddNode*); - void visit(const SubNode*); - void visit(const MatMulNode*); - void visit(const ElemMulNode*); - void visit(const DivNode*); - void visit(const TransposeNode*); + void visit(const LinalgVarNode*); + void visit(const LinalgLiteralNode*); + void visit(const LinalgNegNode*); + void visit(const LinalgAddNode*); + void visit(const LinalgSubNode*); + void visit(const LinalgMatMulNode*); + void visit(const LinalgElemMulNode*); + void visit(const LinalgDivNode*); + void visit(const LinalgTransposeNode*); void visit(const LinalgAssignmentNode*); diff --git a/include/taco/linalg_notation/linalg_notation_visitor.h b/include/taco/linalg_notation/linalg_notation_visitor.h index 1db5e3cce..a53178fca 100644 --- a/include/taco/linalg_notation/linalg_notation_visitor.h +++ b/include/taco/linalg_notation/linalg_notation_visitor.h @@ -7,17 +7,17 @@ class LinalgStmt; class TensorVar; -struct VarNode; -struct LiteralNode; -struct NegNode; -struct TransposeNode; -struct AddNode; -struct SubNode; -struct MatMulNode; -struct ElemMulNode; -struct DivNode; -struct UnaryExprNode; -struct BinaryExprNode; +struct LinalgVarNode; +struct LinalgLiteralNode; +struct LinalgNegNode; +struct LinalgTransposeNode; +struct LinalgAddNode; +struct LinalgSubNode; +struct LinalgMatMulNode; +struct LinalgElemMulNode; +struct LinalgDivNode; +struct LinalgUnaryExprNode; +struct LinalgBinaryExprNode; struct LinalgAssignmentNode; @@ -29,23 +29,23 @@ class LinalgExprVisitorStrict { void visit(const LinalgExpr &); - virtual void visit(const VarNode *) = 0; + virtual void visit(const LinalgVarNode *) = 0; - virtual void visit(const LiteralNode *) = 0; + virtual void visit(const LinalgLiteralNode *) = 0; - virtual void visit(const NegNode *) = 0; + virtual void visit(const LinalgNegNode *) = 0; - virtual void visit(const AddNode *) = 0; + virtual void visit(const LinalgAddNode *) = 0; - virtual void visit(const SubNode *) = 0; + virtual void visit(const LinalgSubNode *) = 0; - virtual void visit(const MatMulNode *) = 0; + virtual void visit(const LinalgMatMulNode *) = 0; - virtual void visit(const ElemMulNode *) = 0; + virtual void visit(const LinalgElemMulNode *) = 0; - virtual void visit(const DivNode *) = 0; + virtual void visit(const LinalgDivNode *) = 0; - virtual void visit(const TransposeNode *) = 0; + virtual void visit(const LinalgTransposeNode *) = 0; }; class LinalgStmtVisitorStrict { @@ -75,17 +75,17 @@ class LinalgNotationVisitor : public LinalgNotationVisitorStrict { using LinalgNotationVisitorStrict::visit; // Index Expressions - virtual void visit(const VarNode* node); - virtual void visit(const LiteralNode* node); - virtual void visit(const NegNode* node); - virtual void visit(const AddNode* node); - virtual void visit(const SubNode* node); - virtual void visit(const MatMulNode* node); - virtual void visit(const ElemMulNode* node); - virtual void visit(const DivNode* node); - virtual void visit(const UnaryExprNode* node); - virtual void visit(const BinaryExprNode* node); - virtual void visit(const TransposeNode* node); + virtual void visit(const LinalgVarNode* node); + virtual void visit(const LinalgLiteralNode* node); + virtual void visit(const LinalgNegNode* node); + virtual void visit(const LinalgAddNode* node); + virtual void visit(const LinalgSubNode* node); + virtual void visit(const LinalgMatMulNode* node); + virtual void visit(const LinalgElemMulNode* node); + virtual void visit(const LinalgDivNode* node); + virtual void visit(const LinalgUnaryExprNode* node); + virtual void visit(const LinalgBinaryExprNode* node); + virtual void visit(const LinalgTransposeNode* node); // Index Statments virtual void visit(const LinalgAssignmentNode* node); diff --git a/src/linalg.cpp b/src/linalg.cpp index b5ad5ebe4..f84a34d56 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -1,5 +1,9 @@ #include "taco/linalg.h" +#include "taco/index_notation/index_notation.h" +#include "taco/index_notation/index_notation_nodes.h" +#include "taco/linalg_notation/linalg_notation_nodes.h" + using namespace std; namespace taco { @@ -9,18 +13,102 @@ namespace taco { /* : LinalgBase(/1* get a unique name *1/, ctype) { */ /* } */ -LinalgBase::LinalgBase(string name, Type tensorType) : name(name), tensorType(tensorType), +LinalgBase::LinalgBase(string name, Type tensorType) : name(name), tensorType(tensorType), idxcount(0), LinalgExpr(TensorVar(name, tensorType)) { } -LinalgBase::LinalgBase(string name, Type tensorType, Format format) : name(name), tensorType(tensorType), +LinalgBase::LinalgBase(string name, Type tensorType, Format format) : name(name), tensorType(tensorType), idxcount(0), LinalgExpr(TensorVar(name, tensorType, format)) { } LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { taco_iassert(isa(this->ptr)); - LinalgAssignment assignment = LinalgAssignment(dynamic_cast(this->ptr)->tensorVar, expr); + LinalgAssignment assignment = LinalgAssignment(to(this->get())->tensorVar, expr); this->assignment = assignment; return assignment; } +const LinalgAssignment LinalgBase::getAssignment() const{ + return this->assignment; +} +const IndexStmt LinalgBase::getIndexAssignment() const { + if (this->indexAssignment.defined()) { + return this->indexAssignment; + } + return IndexStmt(); +} + +vector LinalgBase::getUniqueIndices(size_t order) { + vector result; + for (int i = idxcount; i < (idxcount + (int)order); i++) { + cout << i << ": "; + string name = "i" + to_string(i); + IndexVar indexVar(name); + result.push_back(indexVar); + } + idxcount += order; + return result; +} + +IndexVar LinalgBase::getUniqueIndex() { + string name = "i" + to_string(idxcount); + idxcount += 1; + IndexVar result(name); + return result; +} + +IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { + if (isa(linalg.get())) { + const LinalgSubNode* sub = to(linalg.get()); + IndexExpr indexA = rewrite(sub->a, indices); + IndexExpr indexB = rewrite(sub->b, indices); + return new SubNode(indexA, indexB); + } else if (isa(linalg.get())) { + const LinalgMatMulNode* mul = to(linalg.get()); + IndexVar index = getUniqueIndex(); + IndexExpr indexA = rewrite(mul->a, {indices[0], index}); + IndexExpr indexB = rewrite(mul->b, {index, indices[1]}); + return new MulNode(indexA, indexB); + } else if (isa(linalg.get())) { + const LinalgVarNode* var = to(linalg.get()); + return new AccessNode(var->tensorVar, indices); + } + return IndexExpr(); +} + +IndexStmt rewrite(LinalgStmt linalg) { + return IndexStmt(); +} + +IndexStmt LinalgBase::rewrite() { + if (this->assignment.defined()) { + + TensorVar tensor = this->assignment.getLhs(); + + vector indices; + if (tensor.getOrder() == 1) { + indices.push_back(IndexVar("i")); + } else if (tensor.getOrder() == 2) { + indices.push_back(IndexVar("i")); + indices.push_back(IndexVar("j")); + } + Access lhs = Access(tensor, indices); + IndexExpr rhs = rewrite(this->assignment.getRhs(), indices); + + Assignment indexAssign = Assignment(lhs, rhs); + this->indexAssignment = indexAssign; + return indexAssign; + } + return IndexStmt(); +} + + + +std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg) { + LinalgAssignment assignment = linalg.getAssignment(); + if (!assignment.defined()) return os << getNode(linalg)->tensorVar.getName(); + LinalgNotationPrinter printer(os); + printer.print(assignment); + return os; +} + } diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index 568298ced..3e4e00526 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -17,6 +17,7 @@ #include "taco/index_notation/intrinsic.h" #include "taco/index_notation/schedule.h" #include "taco/index_notation/transformations.h" +#include "taco/index_notation/index_notation_nodes.h" #include "taco/linalg_notation/linalg_notation_nodes.h" #include "taco/index_notation/index_notation_rewriter.h" #include "taco/linalg_notation/linalg_notation_printer.h" @@ -33,46 +34,46 @@ using namespace std; namespace taco { -LinalgExpr::LinalgExpr(TensorVar var) : LinalgExpr(new VarNode(var)) { +LinalgExpr::LinalgExpr(TensorVar var) : LinalgExpr(new LinalgVarNode(var)) { } -LinalgExpr::LinalgExpr(char val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(char val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(int8_t val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(int8_t val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(int16_t val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(int16_t val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(int32_t val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(int32_t val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(int64_t val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(int64_t val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(uint8_t val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(uint8_t val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(uint16_t val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(uint16_t val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(uint32_t val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(uint32_t val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(uint64_t val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(uint64_t val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(float val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(float val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(double val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(double val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(std::complex val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(std::complex val) : LinalgExpr(new LinalgLiteralNode(val)) { } -LinalgExpr::LinalgExpr(std::complex val) : LinalgExpr(new LiteralNode(val)) { +LinalgExpr::LinalgExpr(std::complex val) : LinalgExpr(new LinalgLiteralNode(val)) { } Datatype LinalgExpr::getDataType() const { @@ -91,23 +92,23 @@ std::ostream& operator<<(std::ostream& os, const LinalgExpr& expr) { } LinalgExpr operator-(const LinalgExpr &expr) { - return new NegNode(expr.ptr); + return new LinalgNegNode(expr.ptr); } LinalgExpr operator+(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new AddNode(lhs, rhs); + return new LinalgAddNode(lhs, rhs); } LinalgExpr operator-(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new SubNode(lhs, rhs); + return new LinalgSubNode(lhs, rhs); } LinalgExpr operator*(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new MatMulNode(lhs, rhs); + return new LinalgMatMulNode(lhs, rhs); } LinalgExpr operator/(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new DivNode(lhs, rhs); + return new LinalgDivNode(lhs, rhs); } // class LinalgStmt diff --git a/src/linalg_notation/linalg_notation_printer.cpp b/src/linalg_notation/linalg_notation_printer.cpp index fa617ef49..7dcb246e9 100644 --- a/src/linalg_notation/linalg_notation_printer.cpp +++ b/src/linalg_notation/linalg_notation_printer.cpp @@ -18,11 +18,11 @@ void LinalgNotationPrinter::print(const LinalgStmt& expr) { expr.accept(this); } -void LinalgNotationPrinter::visit(const VarNode* op) { +void LinalgNotationPrinter::visit(const LinalgVarNode* op) { os << op->tensorVar.getName(); } -void LinalgNotationPrinter::visit(const LiteralNode* op) { +void LinalgNotationPrinter::visit(const LinalgLiteralNode* op) { switch (op->getDataType().getKind()) { case Datatype::Bool: os << op->getVal(); @@ -74,7 +74,7 @@ void LinalgNotationPrinter::visit(const LiteralNode* op) { } } -void LinalgNotationPrinter::visit(const NegNode* op) { +void LinalgNotationPrinter::visit(const LinalgNegNode* op) { Precedence precedence = Precedence::NEG; bool parenthesize = precedence > parentPrecedence; parentPrecedence = precedence; @@ -88,7 +88,7 @@ void LinalgNotationPrinter::visit(const NegNode* op) { } } -void LinalgNotationPrinter::visit(const TransposeNode* op) { +void LinalgNotationPrinter::visit(const LinalgTransposeNode* op) { Precedence precedence = Precedence::TRANSPOSE; bool parenthesize = precedence > parentPrecedence; parentPrecedence = precedence; @@ -118,23 +118,23 @@ void LinalgNotationPrinter::visitBinary(Node op, Precedence precedence) { } } -void LinalgNotationPrinter::visit(const AddNode* op) { +void LinalgNotationPrinter::visit(const LinalgAddNode* op) { visitBinary(op, Precedence::ADD); } -void LinalgNotationPrinter::visit(const SubNode* op) { +void LinalgNotationPrinter::visit(const LinalgSubNode* op) { visitBinary(op, Precedence::SUB); } -void LinalgNotationPrinter::visit(const MatMulNode* op) { +void LinalgNotationPrinter::visit(const LinalgMatMulNode* op) { visitBinary(op, Precedence::MATMUL); } -void LinalgNotationPrinter::visit(const ElemMulNode* op) { +void LinalgNotationPrinter::visit(const LinalgElemMulNode* op) { visitBinary(op, Precedence::ELEMMUL); } -void LinalgNotationPrinter::visit(const DivNode* op) { +void LinalgNotationPrinter::visit(const LinalgDivNode* op) { visitBinary(op, Precedence::DIV); } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index d97c3dcdb..2552c6108 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -34,6 +34,9 @@ TEST(linalg, simplest) { cout << A << endl; + A.rewrite(); + cout << A.getIndexAssignment(); + ASSERT_TRUE(1); } From d3a6d836b7e63283d9c845e28b09f9906bdf3e86 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Fri, 6 Nov 2020 16:58:23 -0800 Subject: [PATCH 07/61] squash init order warning, add dummy at method for Matrix, use size_t dims --- .gitignore | 1 + include/taco/linalg.h | 51 ++++++++++++++++++++++++++----------------- src/linalg.cpp | 9 ++++---- test/tests-linalg.cpp | 4 ++++ 4 files changed, 41 insertions(+), 24 deletions(-) diff --git a/.gitignore b/.gitignore index 16389f34e..5511edc86 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ CMakeCache.txt doc apps/tensor_times_vector/tensor_times_vector +tags diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 9430a6981..bdbc43566 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -14,6 +14,9 @@ class LinalgBase : public LinalgExpr { std::string name; Type tensorType; + // The associated tensor + TensorBase tbase; + LinalgAssignment assignment; typedef VarNode Node; @@ -23,10 +26,9 @@ class LinalgBase : public LinalgExpr { /// [LINALG NOTATION] LinalgAssignment operator=(const LinalgExpr& expr); - - /* LinalgBase operator=(LinalgExpr) { */ - /* return (LinalgBase)LinalgExpr; */ - /* } */ + void ping() { + std::cout << "ping" << std::endl; + } }; @@ -38,17 +40,20 @@ template class Matrix : public LinalgBase { public: explicit Matrix(std::string name); - Matrix(std::string name, int dim1, int dim2); - Matrix(std::string name, std::vector dimensions); - Matrix(std::string name, int dim1, int dim2, Format format); - Matrix(std::string name, std::vector dimensions, Format format); - Matrix(std::string name, int dim1, int dim2, ModeFormat format1, ModeFormat format2); + Matrix(std::string name, size_t dim1, size_t dim2); + Matrix(std::string name, std::vector dimensions); + Matrix(std::string name, size_t dim1, size_t dim2, Format format); + Matrix(std::string name, std::vector dimensions, Format format); + Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2); Matrix(std::string name, Type tensorType); Matrix(std::string name, Type tensorType, Format format); LinalgAssignment operator=(const LinalgExpr& expr) { return LinalgBase::operator=(expr); } + // Support some Read methods + CType at(const size_t coord_x, const size_t coord_y); + }; // ------------------------------------------------------------ @@ -58,22 +63,28 @@ class Matrix : public LinalgBase { template Matrix::Matrix(std::string name) : LinalgBase(name, Type(type(), {42, 42})) {} template -Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, Type(type(), dimensions)) {} +Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, Type(type(), dimensions)) {} template -Matrix::Matrix(std::string name, int dim1, int dim2) : LinalgBase(name, Type(type(), {dim1, dim2})) {} +Matrix::Matrix(std::string name, size_t dim1, size_t dim2) : LinalgBase(name, Type(type(), {dim1, dim2})) {} template -Matrix::Matrix(std::string name, int dim1, int dim2, Format format) : +Matrix::Matrix(std::string name, size_t dim1, size_t dim2, Format format) : LinalgBase(name, Type(type(), {dim1, dim2}), format) {} template -Matrix::Matrix(std::string name, std::vector dimensions, Format format) : +Matrix::Matrix(std::string name, std::vector dimensions, Format format) : LinalgBase(name, Type(type(), dimensions), format) {} template -Matrix::Matrix(std::string name, int dim1, int dim2, ModeFormat format1, ModeFormat format2) : +Matrix::Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2) : LinalgBase(name, Type(type(), {dim1, dim2}), Format({format1, format2})) {} template Matrix::Matrix(std::string name, Type tensorType) : LinalgBase(name, tensorType) {} template Matrix::Matrix(std::string name, Type tensorType, Format format) : LinalgBase(name, tensorType, format) {} + +// Definition of Read methods +template +CType Matrix::at(const size_t coord_x, const size_t coord_y) { + return 0; +} // ------------------------------------------------------------ // Vector class // ------------------------------------------------------------ @@ -84,9 +95,9 @@ class Vector : public LinalgBase { Datatype ctype; public: explicit Vector(std::string name); - Vector(std::string name, int dim); - Vector(std::string name, int dim, Format format); - Vector(std::string name, int dim, ModeFormat format); + Vector(std::string name, size_t dim); + Vector(std::string name, size_t dim, Format format); + Vector(std::string name, size_t dim, ModeFormat format); Vector(std::string name, Type type, Format format); Vector(std::string name, Type type, ModeFormat format); LinalgAssignment operator=(const LinalgExpr& expr) { @@ -101,11 +112,11 @@ class Vector : public LinalgBase { template Vector::Vector(std::string name) : LinalgBase(name, Type(type(), {42})) {} template -Vector::Vector(std::string name, int dim) : LinalgBase(name, Type(type(), {dim})) {} +Vector::Vector(std::string name, size_t dim) : LinalgBase(name, Type(type(), {dim})) {} template -Vector::Vector(std::string name, int dim, Format format) : LinalgBase(name, Type(type(), {dim}), format) {} +Vector::Vector(std::string name, size_t dim, Format format) : LinalgBase(name, Type(type(), {dim}), format) {} template -Vector::Vector(std::string name, int dim, ModeFormat format) : +Vector::Vector(std::string name, size_t dim, ModeFormat format) : LinalgBase(name, Type(type(), {dim}), Format(format)) {} template Vector::Vector(std::string name, Type type, Format format) : diff --git a/src/linalg.cpp b/src/linalg.cpp index b5ad5ebe4..5e0f44508 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -9,15 +9,16 @@ namespace taco { /* : LinalgBase(/1* get a unique name *1/, ctype) { */ /* } */ -LinalgBase::LinalgBase(string name, Type tensorType) : name(name), tensorType(tensorType), - LinalgExpr(TensorVar(name, tensorType)) { +LinalgBase::LinalgBase(string name, Type tensorType) : LinalgExpr(TensorVar(name, tensorType)), name(name), tensorType(tensorType) + { } -LinalgBase::LinalgBase(string name, Type tensorType, Format format) : name(name), tensorType(tensorType), - LinalgExpr(TensorVar(name, tensorType, format)) { +LinalgBase::LinalgBase(string name, Type tensorType, Format format) : LinalgExpr(TensorVar(name, tensorType, format)), name(name), tensorType(tensorType) + { } LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { taco_iassert(isa(this->ptr)); + cout << "LinalgBase operator=" << endl; LinalgAssignment assignment = LinalgAssignment(dynamic_cast(this->ptr)->tensorVar, expr); this->assignment = assignment; return assignment; diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index d97c3dcdb..c4260a3f0 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -30,8 +30,12 @@ TEST(linalg, simplest) { /* A = B*C; */ + B.ping(); + A = B * C; + cout << "A(0,0): " << A.at(0,0) << endl; + cout << A << endl; ASSERT_TRUE(1); From da5dbc27e04efa34e46224581a1c32985c6e18d8 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Sun, 8 Nov 2020 11:38:26 -0800 Subject: [PATCH 08/61] init-ed a tensorbase inside the linalgbase --- include/taco/linalg.h | 22 +++++++++++++++++----- src/linalg.cpp | 24 +++++++++++++++++++++++- test/tests-linalg.cpp | 6 +++--- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index bdbc43566..1c97957cf 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -11,11 +11,12 @@ namespace taco { class LinalgBase : public LinalgExpr { +protected: std::string name; Type tensorType; // The associated tensor - TensorBase tbase; + TensorBase *tbase; LinalgAssignment assignment; @@ -27,7 +28,7 @@ class LinalgBase : public LinalgExpr { LinalgAssignment operator=(const LinalgExpr& expr); void ping() { - std::cout << "ping" << std::endl; + std::cout << name << ".ping()" << std::endl; } }; @@ -52,7 +53,7 @@ class Matrix : public LinalgBase { } // Support some Read methods - CType at(const size_t coord_x, const size_t coord_y); + CType at(int coord_x, int coord_y); }; @@ -82,8 +83,19 @@ Matrix::Matrix(std::string name, Type tensorType, Format format) : Linalg // Definition of Read methods template -CType Matrix::at(const size_t coord_x, const size_t coord_y) { - return 0; +CType Matrix::at(int coord_x, int coord_y) { + std::cout << "Name: " << name << std::endl; + std::cout << tbase << std::endl; + std::cout << "Matrix found a TBase " << tbase->getName() << std::endl; + std::cout << "Will print a coordinate" << std::endl; + + + // Check if this LinalgBase holds an assignment + if (this->assignment.ptr != NULL) { + std::cout << "This matrix is the result of an assignment" << std::endl; + } + + return tbase->at({coord_x, coord_y}); } // ------------------------------------------------------------ // Vector class diff --git a/src/linalg.cpp b/src/linalg.cpp index 5e0f44508..c8fe8d58d 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -14,13 +14,35 @@ LinalgBase::LinalgBase(string name, Type tensorType) : LinalgExpr(TensorVar(name } LinalgBase::LinalgBase(string name, Type tensorType, Format format) : LinalgExpr(TensorVar(name, tensorType, format)), name(name), tensorType(tensorType) { + // Unpack the type and shape + Datatype type = tensorType.getDataType(); + Shape shape = tensorType.getShape(); + vector dimensions(shape.begin(), shape.end()); + vector dims; + for(const Dimension& d : dimensions) { + dims.push_back((int)d.getSize()); + } + + // Init a TensorBase + tbase = new TensorBase(name, type, dims, format); + + cout << "Created TensorBase " << tbase->getName() << endl; + cout << tbase << endl; } + LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { taco_iassert(isa(this->ptr)); - cout << "LinalgBase operator=" << endl; + cout << "LinalgBase operator= on " << name << endl; LinalgAssignment assignment = LinalgAssignment(dynamic_cast(this->ptr)->tensorVar, expr); + /* cout << "this assignment ptr: " << this->assignment.ptr << endl; */ this->assignment = assignment; + /* cout << "this assignment ptr: " << this->assignment.ptr << endl; */ + + // Now that the assignment is made we should run the index-assignment algorithm + + // Start by trying to print out the whole expression tree + return assignment; } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index c4260a3f0..508847df6 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -6,8 +6,8 @@ using namespace taco; TEST(linalg, simplest) { Matrix B("B", 2, 2, dense, dense); - Matrix C("C"); - Matrix A("A"); + Matrix C("C", 2, 2, dense, dense); + Matrix A("A", 2, 2, dense, dense); /* Vector c("c"); */ @@ -34,7 +34,7 @@ TEST(linalg, simplest) { A = B * C; - cout << "A(0,0): " << A.at(0,0) << endl; + cout << "B(0,0): " << B.at(0,0) << endl; cout << A << endl; From ef59d4beae0d028dbbda870e8ba5e8ca8e49c538 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Sun, 8 Nov 2020 16:35:11 -0800 Subject: [PATCH 09/61] poking around at the Tensor API --- src/index_notation/index_notation.cpp | 3 +++ test/tests-linalg.cpp | 37 +++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/src/index_notation/index_notation.cpp b/src/index_notation/index_notation.cpp index 1f857a5fc..1d7f2300f 100644 --- a/src/index_notation/index_notation.cpp +++ b/src/index_notation/index_notation.cpp @@ -769,6 +769,7 @@ static void check(Assignment assignment) { } Assignment Access::operator=(const IndexExpr& expr) { + cout << "Main Access::operator= called" << endl; TensorVar result = getTensorVar(); Assignment assignment = Assignment(*this, expr); check(assignment); @@ -777,10 +778,12 @@ Assignment Access::operator=(const IndexExpr& expr) { } Assignment Access::operator=(const Access& expr) { + cout << "accessexpr Access::operator= called" << endl; return operator=(static_cast(expr)); } Assignment Access::operator=(const TensorVar& var) { + cout << "tensorvaraccess Access::operator= called" << endl; return operator=(Access(var)); } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 508847df6..fdedc6af0 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -41,3 +41,40 @@ TEST(linalg, simplest) { ASSERT_TRUE(1); } +TEST(linalg, tensorapi) { + cout << "--- Beginning of TensorAPI test ---" << endl; + Tensor a({2,2}, dense); + Tensor b({2,3}, dense); + Tensor c({3,2}, dense); + + cout << "--- Initialized Tensors ---" << endl; + + b(0,0) = 2; + b(1,1) = 1; + b(0,1) = 2; + + cout << "--- Initializing c ---" << endl; + + c(0,0) = 2; + c(1,1) = 2; + + cout << "--- Declaring IndexVars ---" << endl; + + IndexVar i,j,k; + + // The original + /* a(i,j) = b(i,k) * c(k,j); */ + + // The broken-up version + cout << "--- Creating operand IndexExprs ---" << endl; + + IndexExpr tc = c(k,j); + IndexExpr tb = b(i,k); + + cout << "Pre-assignment" << endl; + a(i,j) = tb * tc; + cout << "Post-assignment" << endl; + + /* cout << a << endl; */ +} + From e03995252bdbbdca3177f5136210d93bad2f550a Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Sun, 8 Nov 2020 21:17:22 -0800 Subject: [PATCH 10/61] declarations for passing in a TensorBase ptr to the LinalgExpr --- include/taco/linalg.h | 1 + include/taco/linalg_notation/linalg_notation.h | 4 ++++ .../taco/linalg_notation/linalg_notation_nodes.h | 16 ++++++++++++++++ src/linalg.cpp | 9 +++++++++ src/linalg_notation/linalg_notation.cpp | 5 ++++- 5 files changed, 34 insertions(+), 1 deletion(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 7e4f3ee85..caaa36832 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -31,6 +31,7 @@ class LinalgBase : public LinalgExpr { public: LinalgBase(std::string name, Type tensorType); LinalgBase(std::string name, Type tensorType, Format format); + LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format); /// [LINALG NOTATION] LinalgAssignment operator=(const LinalgExpr& expr); const LinalgAssignment getAssignment() const; diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index 2f1d855fc..f5d87c80b 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -26,6 +26,8 @@ #include "taco/linalg_notation/linalg_notation_nodes_abstract.h" +#include "taco/tensor.h" + namespace taco { class Type; @@ -71,6 +73,8 @@ class LinalgExpr : public util::IntrusivePtr { /// ``` LinalgExpr(TensorVar); + LinalgExpr(TensorVar, TensorBase* tensorBase); + /// Consturct an integer literal. /// ``` /// A(i,j) = 1; diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h index 413dabbe7..6bd21a4d3 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes.h +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -14,6 +14,8 @@ #include "taco/linalg_notation/linalg_notation_nodes_abstract.h" #include "taco/linalg_notation/linalg_notation_visitor.h" +#include "taco/tensor.h" + namespace taco { @@ -30,6 +32,20 @@ namespace taco { TensorVar tensorVar; }; + struct LinalgTensorBaseNode : public LinalgExprNode { + LinalgTensorBaseNode(TensorVar tensorVar, TensorBase *tensorBase) + : LinalgExprNode(tensorVar.getType().getDataType()), tensorVar(tensorVar), tensorBase(tensorBase) {} + + void accept(LinalgExprVisitorStrict* v) const override { + v->visit(this); + } + + virtual void setAssignment(const LinalgAssignment& assignment) {} + + TensorVar tensorVar; + TensorBase* tensorBase; + }; + struct LinalgLiteralNode : public LinalgExprNode { template LinalgLiteralNode(T val) : LinalgExprNode(type()) { this->val = malloc(sizeof(T)); diff --git a/src/linalg.cpp b/src/linalg.cpp index ab6af5126..29fbc35f6 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -16,6 +16,10 @@ namespace taco { LinalgBase::LinalgBase(string name, Type tensorType) : name(name), tensorType(tensorType), idxcount(0), LinalgExpr(TensorVar(name, tensorType)) { } + +LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format) : LinalgExpr(TensorVar(name, tensorType, format)), name(name), tensorType(tensorType) { + +} LinalgBase::LinalgBase(string name, Type tensorType, Format format) : LinalgExpr(TensorVar(name, tensorType, format)), name(name), tensorType(tensorType) { // Unpack the type and shape Datatype type = tensorType.getDataType(); @@ -31,6 +35,11 @@ LinalgBase::LinalgBase(string name, Type tensorType, Format format) : LinalgExpr cout << "Created TensorBase " << tbase->getName() << endl; cout << tbase << endl; + + // Attach this TensorBase to the node + /* dynamic_cast(this->ptr)->setTensorBase(tbase); */ + /* dynamic_cast(this->ptr)->setTensorBase(tbase); */ + /* to(this->get())->setTensorBase(tbase); */ } diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index 3e4e00526..21757eb91 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -37,6 +37,9 @@ namespace taco { LinalgExpr::LinalgExpr(TensorVar var) : LinalgExpr(new LinalgVarNode(var)) { } +LinalgExpr::LinalgExpr(TensorVar var, TensorBase* tensorBase) : LinalgExpr(new LinalgTensorBaseNode(var, tensorBase)) { +} + LinalgExpr::LinalgExpr(char val) : LinalgExpr(new LinalgLiteralNode(val)) { } @@ -140,4 +143,4 @@ LinalgExpr LinalgAssignment::getRhs() const { return getNode(*this)->rhs; } -} // namespace taco \ No newline at end of file +} // namespace taco From 3e1ed50adb9a8c0ca3c9b469a9745735e30e72f2 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 9 Nov 2020 22:41:27 -0800 Subject: [PATCH 11/61] hopeful signs - getting tensorBase --- include/taco/linalg.h | 4 ++-- include/taco/linalg_notation/linalg_notation.h | 2 ++ src/linalg.cpp | 15 ++++++++++++++- src/linalg_notation/linalg_notation.cpp | 3 ++- test/tests-linalg.cpp | 8 +++++++- 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index caaa36832..72662782b 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -31,7 +31,7 @@ class LinalgBase : public LinalgExpr { public: LinalgBase(std::string name, Type tensorType); LinalgBase(std::string name, Type tensorType, Format format); - LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format); + LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format); /// [LINALG NOTATION] LinalgAssignment operator=(const LinalgExpr& expr); const LinalgAssignment getAssignment() const; @@ -95,7 +95,7 @@ Matrix::Matrix(std::string name, std::vector dimensions, Format f LinalgBase(name, Type(type(), dimensions), format) {} template Matrix::Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2) : - LinalgBase(name, Type(type(), {dim1, dim2}), Format({format1, format2})) {} + LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int)dim1, (int)dim2}, Format({format1, format2})) {} template Matrix::Matrix(std::string name, Type tensorType) : LinalgBase(name, tensorType) {} template diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index f5d87c80b..ba32ace24 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -124,6 +124,8 @@ class LinalgExpr : public util::IntrusivePtr { /// Print the index expression. friend std::ostream &operator<<(std::ostream &, const LinalgExpr &); + + TensorBase *tensorBase; }; /// Compare two index expressions by value. diff --git a/src/linalg.cpp b/src/linalg.cpp index 29fbc35f6..2bc5011bb 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -17,7 +17,20 @@ LinalgBase::LinalgBase(string name, Type tensorType) : name(name), tensorType(te LinalgExpr(TensorVar(name, tensorType)) { } -LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format) : LinalgExpr(TensorVar(name, tensorType, format)), name(name), tensorType(tensorType) { +LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format) : LinalgExpr(TensorVar(name, tensorType, format), new TensorBase(name, dtype, dims, format)), name(name), tensorType(tensorType) { + + cout << "Called constructor that uses dims dims" << endl; + + + if(isa(ptr)) { + cout << "LinalgBase constructor - LinalgTensorBaseNode" << endl; + // This is problematic because of const correctness + /* LinalgTensorBaseNode* tnode = to(ptr); */ + cout << this->tensorBase->getName() << endl; + } + else { + cout << "LinalgBase constructor - Not a LinalgVarNode" << endl; + } } LinalgBase::LinalgBase(string name, Type tensorType, Format format) : LinalgExpr(TensorVar(name, tensorType, format)), name(name), tensorType(tensorType) { diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index 21757eb91..7a76e10d3 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -37,7 +37,8 @@ namespace taco { LinalgExpr::LinalgExpr(TensorVar var) : LinalgExpr(new LinalgVarNode(var)) { } -LinalgExpr::LinalgExpr(TensorVar var, TensorBase* tensorBase) : LinalgExpr(new LinalgTensorBaseNode(var, tensorBase)) { +LinalgExpr::LinalgExpr(TensorVar var, TensorBase* _tensorBase) : LinalgExpr(new LinalgTensorBaseNode(var, _tensorBase)) { + tensorBase = _tensorBase; } LinalgExpr::LinalgExpr(char val) : LinalgExpr(new LinalgLiteralNode(val)) { diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 8a948af1a..ecf8c50ac 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -30,16 +30,22 @@ TEST(linalg, simplest) { /* A = B*C; */ + cout << "--- Before Ping ---" << endl; B.ping(); + cout << "--- Post-Ping ---" << endl; + cout << "--- Before Expression ---" << endl; A = B * C; + cout << "--- After Expression ---" << endl; + cout << "--- Before At ---" << endl; cout << "B(0,0): " << B.at(0,0) << endl; + cout << "--- After At ---" << endl; cout << A << endl; A.rewrite(); - cout << A.getIndexAssignment(); + cout << A.getIndexAssignment() << endl; ASSERT_TRUE(1); } From d5336fc02730af28904f5341c9a11914722b4845 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 9 Nov 2020 23:07:06 -0800 Subject: [PATCH 12/61] can reference the TensorBase inside the rewrite --- include/taco/linalg.h | 6 +++--- src/linalg.cpp | 4 ++++ test/tests-linalg.cpp | 10 +++++++++- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 72662782b..f5aecc8ea 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -105,8 +105,8 @@ Matrix::Matrix(std::string name, Type tensorType, Format format) : Linalg template CType Matrix::at(int coord_x, int coord_y) { std::cout << "Name: " << name << std::endl; - std::cout << tbase << std::endl; - std::cout << "Matrix found a TBase " << tbase->getName() << std::endl; + std::cout << tensorBase << std::endl; + std::cout << "Matrix found a TBase " << tensorBase->getName() << std::endl; std::cout << "Will print a coordinate" << std::endl; @@ -115,7 +115,7 @@ CType Matrix::at(int coord_x, int coord_y) { std::cout << "This matrix is the result of an assignment" << std::endl; } - return tbase->at({coord_x, coord_y}); + return tensorBase->at({coord_x, coord_y}); } // ------------------------------------------------------------ // Vector class diff --git a/src/linalg.cpp b/src/linalg.cpp index 2bc5011bb..6c5aa4343 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -114,6 +114,10 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { } else if (isa(linalg.get())) { const LinalgVarNode* var = to(linalg.get()); return new AccessNode(var->tensorVar, indices); + } else if (isa(linalg.get())) { + cout << "LinalgBase::rewrite -- got a tensorbasenode" << endl; + cout << "Eet ees " << linalg.tensorBase->getName() << endl; + return linalg.tensorBase->operator()(indices); } return IndexExpr(); } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index ecf8c50ac..130bcdd25 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -42,10 +42,18 @@ TEST(linalg, simplest) { cout << "B(0,0): " << B.at(0,0) << endl; cout << "--- After At ---" << endl; - cout << A << endl; + cout << "--- Before cout of A ---" << endl; + /* cout << A << endl; */ + cout << "--- After cout of A ---" << endl; + + cout << "--- Before Rewrite of A ---" << endl; A.rewrite(); + cout << "--- After Rewrite of A ---" << endl; + + cout << "--- Before getIndexAssignment on A ---" << endl; cout << A.getIndexAssignment() << endl; + cout << "--- After getIndexAssignment on A ---" << endl; ASSERT_TRUE(1); } From 8d512fe5fd7886c305e0c44a9a3d992d07acefd5 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 9 Nov 2020 23:44:43 -0800 Subject: [PATCH 13/61] experiment with doing the assignment after the rewrite of the RHS --- src/linalg.cpp | 10 ++++++++-- test/tests-linalg.cpp | 5 +++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/linalg.cpp b/src/linalg.cpp index 6c5aa4343..4b77c9049 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -115,8 +115,7 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { const LinalgVarNode* var = to(linalg.get()); return new AccessNode(var->tensorVar, indices); } else if (isa(linalg.get())) { - cout << "LinalgBase::rewrite -- got a tensorbasenode" << endl; - cout << "Eet ees " << linalg.tensorBase->getName() << endl; + cout << "LinalgBase::rewrite -- got a tensorbasenode " << linalg.tensorBase->getName() << endl; return linalg.tensorBase->operator()(indices); } return IndexExpr(); @@ -141,6 +140,13 @@ IndexStmt LinalgBase::rewrite() { Access lhs = Access(tensor, indices); IndexExpr rhs = rewrite(this->assignment.getRhs(), indices); + // TODO: instead of doing it here, do it at the point of read-method + // by grabbing the RHS from the indexAssignment (need state to know if assigned before) + + cout << "--- Going to use the Tensor API to assign the RHS ---" << endl; + this->tensorBase->operator()(indices) = rhs; + cout << "--- Done assigning RHS to Tensor API ---" << endl; + Assignment indexAssign = Assignment(lhs, rhs); this->indexAssignment = indexAssign; return indexAssign; diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 130bcdd25..20a2831b3 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -40,6 +40,7 @@ TEST(linalg, simplest) { cout << "--- Before At ---" << endl; cout << "B(0,0): " << B.at(0,0) << endl; + cout << "A(0,0): " << A.at(0,0) << endl; cout << "--- After At ---" << endl; @@ -51,6 +52,10 @@ TEST(linalg, simplest) { A.rewrite(); cout << "--- After Rewrite of A ---" << endl; + cout << "--- Before At (A) ---" << endl; + /* cout << "A(0,0): " << A.at(0,0) << endl; */ + cout << "--- After At (A) ---" << endl; + cout << "--- Before getIndexAssignment on A ---" << endl; cout << A.getIndexAssignment() << endl; cout << "--- After getIndexAssignment on A ---" << endl; From 4576e3c6b20f5fa01b6d3264077fcedc0a88f9c6 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 10 Nov 2020 14:31:37 -0800 Subject: [PATCH 14/61] set idxcount(0) and print tensorBase in << --- src/linalg.cpp | 31 ++++++++++++++++--------------- test/tests-linalg.cpp | 16 ++++++++-------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/src/linalg.cpp b/src/linalg.cpp index 4b77c9049..40f96a993 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -17,7 +17,7 @@ LinalgBase::LinalgBase(string name, Type tensorType) : name(name), tensorType(te LinalgExpr(TensorVar(name, tensorType)) { } -LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format) : LinalgExpr(TensorVar(name, tensorType, format), new TensorBase(name, dtype, dims, format)), name(name), tensorType(tensorType) { +LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format) : LinalgExpr(TensorVar(name, tensorType, format), new TensorBase(name, dtype, dims, format)), name(name), tensorType(tensorType), idxcount(0) { cout << "Called constructor that uses dims dims" << endl; @@ -33,7 +33,7 @@ LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector } } -LinalgBase::LinalgBase(string name, Type tensorType, Format format) : LinalgExpr(TensorVar(name, tensorType, format)), name(name), tensorType(tensorType) { +LinalgBase::LinalgBase(string name, Type tensorType, Format format) : LinalgExpr(TensorVar(name, tensorType, format)), name(name), tensorType(tensorType), idxcount(0) { // Unpack the type and shape Datatype type = tensorType.getDataType(); Shape shape = tensorType.getShape(); @@ -61,11 +61,9 @@ LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { cout << "LinalgBase operator= on " << name << endl; LinalgAssignment assignment = LinalgAssignment(to(this->get())->tensorVar, expr); this->assignment = assignment; - /* cout << "this assignment ptr: " << this->assignment.ptr << endl; */ - - // Now that the assignment is made we should run the index-assignment algorithm - - // Start by trying to print out the whole expression tree + + // TODO: May need to invoke the rewrite at this point to get the interleaving of inserts and + // expressions correct return assignment; } @@ -140,12 +138,11 @@ IndexStmt LinalgBase::rewrite() { Access lhs = Access(tensor, indices); IndexExpr rhs = rewrite(this->assignment.getRhs(), indices); - // TODO: instead of doing it here, do it at the point of read-method - // by grabbing the RHS from the indexAssignment (need state to know if assigned before) - - cout << "--- Going to use the Tensor API to assign the RHS ---" << endl; - this->tensorBase->operator()(indices) = rhs; - cout << "--- Done assigning RHS to Tensor API ---" << endl; + if(this->tensorBase != NULL) { + cout << "--- Going to use the Tensor API to assign the RHS ---" << endl; + this->tensorBase->operator()(indices) = rhs; + cout << "--- Done assigning RHS to Tensor API ---" << endl; + } Assignment indexAssign = Assignment(lhs, rhs); this->indexAssignment = indexAssign; @@ -154,10 +151,14 @@ IndexStmt LinalgBase::rewrite() { return IndexStmt(); } - - std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg) { LinalgAssignment assignment = linalg.getAssignment(); + + // If TensorBase exists, print the storage + if (linalg.tensorBase != NULL) { + return os << *(linalg.tensorBase) << endl; + } + if (!assignment.defined()) return os << getNode(linalg)->tensorVar.getName(); LinalgNotationPrinter printer(os); printer.print(assignment); diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 20a2831b3..1f27e769b 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -30,10 +30,6 @@ TEST(linalg, simplest) { /* A = B*C; */ - cout << "--- Before Ping ---" << endl; - B.ping(); - cout << "--- Post-Ping ---" << endl; - cout << "--- Before Expression ---" << endl; A = B * C; cout << "--- After Expression ---" << endl; @@ -44,18 +40,22 @@ TEST(linalg, simplest) { cout << "--- After At ---" << endl; - cout << "--- Before cout of A ---" << endl; - /* cout << A << endl; */ - cout << "--- After cout of A ---" << endl; + /* cout << "--- before cout of a ---" << endl; */ + /* cout << a << endl; */ + /* cout << "--- after cout of a ---" << endl; */ cout << "--- Before Rewrite of A ---" << endl; A.rewrite(); cout << "--- After Rewrite of A ---" << endl; cout << "--- Before At (A) ---" << endl; - /* cout << "A(0,0): " << A.at(0,0) << endl; */ + cout << "A(0,0): " << A.at(0,0) << endl; cout << "--- After At (A) ---" << endl; + cout << "--- before cout of a ---" << endl; + cout << A << endl; + cout << "--- after cout of a ---" << endl; + cout << "--- Before getIndexAssignment on A ---" << endl; cout << A.getIndexAssignment() << endl; cout << "--- After getIndexAssignment on A ---" << endl; From 703e62e776d77b40149a480b4a81e232bd84a6c0 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Tue, 10 Nov 2020 17:49:38 -0800 Subject: [PATCH 15/61] Add in transpose() and elemMul() for linalg expr and also add more nodes in rewrite --- .../taco/linalg_notation/linalg_notation.h | 11 ++++++++- src/linalg.cpp | 23 ++++++++++++++++--- src/linalg_notation/linalg_notation.cpp | 8 +++++++ 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index 2f1d855fc..9620595f3 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -134,12 +134,21 @@ LinalgExpr operator+(const LinalgExpr&, const LinalgExpr&); /// Subtract a linear algebra expressions from another. LinalgExpr operator-(const LinalgExpr&, const LinalgExpr&); -/// Multiply two linear algebra expressions. +/// Matrix Multiply two linear algebra expressions. LinalgExpr operator*(const LinalgExpr&, const LinalgExpr&); /// Divide a linear expression by another. LinalgExpr operator/(const LinalgExpr&, const LinalgExpr&); +/// Element-wise multiply two linear algebra expressions +// FIXME: May want to be consistent with eigen library in c++ and change to cmul +LinalgExpr elemMul(const LinalgExpr& lhs, const LinalgExpr& rhs); + +/// Construct and returns an expression that transposes this expression +// FIXME: May want to change this with '^T' in the future +LinalgExpr transpose(const LinalgExpr& lhs); +//LinalgExpr operator^(const LinalgExpr&, const T); + /// A an index statement computes a tensor. The index statements are /// assignment, forall, where, multi, and sequence. class LinalgStmt : public util::IntrusivePtr { diff --git a/src/linalg.cpp b/src/linalg.cpp index f84a34d56..994638966 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -58,18 +58,35 @@ IndexVar LinalgBase::getUniqueIndex() { IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { if (isa(linalg.get())) { - const LinalgSubNode* sub = to(linalg.get()); + auto sub = to(linalg.get()); IndexExpr indexA = rewrite(sub->a, indices); IndexExpr indexB = rewrite(sub->b, indices); return new SubNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto add = to(linalg.get()); + IndexExpr indexA = rewrite(add->a, indices); + IndexExpr indexB = rewrite(add->b, indices); + return new AddNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto mul = to(linalg.get()); + IndexExpr indexA = rewrite(mul->a, indices); + IndexExpr indexB = rewrite(mul->b, indices); + return new MulNode(indexA, indexB); } else if (isa(linalg.get())) { - const LinalgMatMulNode* mul = to(linalg.get()); + auto mul = to(linalg.get()); IndexVar index = getUniqueIndex(); IndexExpr indexA = rewrite(mul->a, {indices[0], index}); IndexExpr indexB = rewrite(mul->b, {index, indices[1]}); return new MulNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto neg = to(linalg.get()); + IndexExpr index = rewrite(neg->a, indices); + return new NegNode(index); + } else if (isa(linalg.get())) { + auto transpose = to(linalg.get()); + return rewrite(transpose->a, {indices[1], indices[0]}); } else if (isa(linalg.get())) { - const LinalgVarNode* var = to(linalg.get()); + auto var = to(linalg.get()); return new AccessNode(var->tensorVar, indices); } return IndexExpr(); diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index 3e4e00526..cda57cb87 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -111,6 +111,14 @@ LinalgExpr operator/(const LinalgExpr &lhs, const LinalgExpr &rhs) { return new LinalgDivNode(lhs, rhs); } +LinalgExpr elemMul(const LinalgExpr &lhs, const LinalgExpr &rhs) { + return new LinalgElemMulNode(lhs, rhs); +} + +LinalgExpr transpose(const LinalgExpr &lhs) { + return new LinalgTransposeNode(lhs); +} + // class LinalgStmt LinalgStmt::LinalgStmt() : util::IntrusivePtr(nullptr) { } From 86e2da13c418327bd0a09b7a473877a34926597d Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Tue, 10 Nov 2020 18:10:28 -0800 Subject: [PATCH 16/61] Add in rewrite for LinalgLiteralNodes --- src/linalg.cpp | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/src/linalg.cpp b/src/linalg.cpp index 994638966..3541065b8 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -78,6 +78,11 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { IndexExpr indexA = rewrite(mul->a, {indices[0], index}); IndexExpr indexB = rewrite(mul->b, {index, indices[1]}); return new MulNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto div = to(linalg.get()); + IndexExpr indexA = rewrite(div->a, indices); + IndexExpr indexB = rewrite(div->b, indices); + return new DivNode(indexA, indexB); } else if (isa(linalg.get())) { auto neg = to(linalg.get()); IndexExpr index = rewrite(neg->a, indices); @@ -85,6 +90,60 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { } else if (isa(linalg.get())) { auto transpose = to(linalg.get()); return rewrite(transpose->a, {indices[1], indices[0]}); + } else if (isa(linalg.get())) { + auto lit = to(linalg.get()); + + LiteralNode* value; + switch (lit->getDataType().getKind()) { + case Datatype::Bool: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt8: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt16: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt128: + taco_not_supported_yet; + break; + case Datatype::Int8: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int16: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int128: + taco_not_supported_yet; + break; + case Datatype::Float32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Float64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Complex64: + value = new LiteralNode(lit->getVal>()); + break; + case Datatype::Complex128: + value = new LiteralNode(lit->getVal>()); + break; + case Datatype::Undefined: + break; + } + return value; } else if (isa(linalg.get())) { auto var = to(linalg.get()); return new AccessNode(var->tensorVar, indices); From 9ebc5bad3b5b3a63f3879ca89ed187ef63775417 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Wed, 11 Nov 2020 15:40:09 -0800 Subject: [PATCH 17/61] Add in matrix multiplication and shape checks and vectors and scalars --- include/taco/linalg.h | 136 +++++++++++++----- .../taco/linalg_notation/linalg_notation.h | 11 +- .../linalg_notation/linalg_notation_nodes.h | 34 +++-- .../linalg_notation_nodes_abstract.h | 11 +- src/linalg.cpp | 48 +++++-- src/linalg_notation/linalg_notation.cpp | 62 +++++++- .../linalg_notation_nodes_abstract.cpp | 33 ++++- .../linalg_notation_visitor.cpp | 2 +- test/tests-linalg.cpp | 79 +++++++--- 9 files changed, 323 insertions(+), 93 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 8c0705d75..445c2aa95 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -20,18 +20,28 @@ class LinalgBase : public LinalgExpr { IndexStmt indexAssignment; int idxcount; + bool isColVec; IndexExpr rewrite(LinalgExpr linalg, std::vector indices); + IndexVar getUniqueIndex(); + std::vector getUniqueIndices(size_t order); + public: - LinalgBase(std::string name, Type tensorType); - LinalgBase(std::string name, Type tensorType, Format format); + LinalgBase(std::string name, Type tensorType, bool isColVec = false); + + LinalgBase(std::string name, Type tensorType, Format format, bool isColVec = false); + /// [LINALG NOTATION] - LinalgAssignment operator=(const LinalgExpr& expr); + LinalgAssignment operator=(const LinalgExpr &expr); + const LinalgAssignment getAssignment() const; + const IndexStmt getIndexAssignment() const; + bool isColVector() const; + IndexStmt rewrite(); typedef LinalgVarNode Node; @@ -41,26 +51,36 @@ class LinalgBase : public LinalgExpr { }; -std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg); +std::ostream &operator<<(std::ostream &os, const LinalgBase &linalg); + IndexExpr rewrite(LinalgExpr linalg, std::vector); + IndexStmt rewrite(LinalgStmt linalg); // ------------------------------------------------------------ // Matrix class // ------------------------------------------------------------ -template +template class Matrix : public LinalgBase { public: explicit Matrix(std::string name); + Matrix(std::string name, int dim1, int dim2); + Matrix(std::string name, std::vector dimensions); + Matrix(std::string name, int dim1, int dim2, Format format); + Matrix(std::string name, std::vector dimensions, Format format); + Matrix(std::string name, int dim1, int dim2, ModeFormat format1, ModeFormat format2); + Matrix(std::string name, Type tensorType); + Matrix(std::string name, Type tensorType, Format format); - LinalgAssignment operator=(const LinalgExpr& expr) { + + LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); } @@ -70,41 +90,55 @@ class Matrix : public LinalgBase { // Matrix template method implementations // ------------------------------------------------------------ -template +template Matrix::Matrix(std::string name) : LinalgBase(name, Type(type(), {42, 42})) {} -template -Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, Type(type(), dimensions)) {} -template + +template +Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, + Type(type(), dimensions)) {} + +template Matrix::Matrix(std::string name, int dim1, int dim2) : LinalgBase(name, Type(type(), {dim1, dim2})) {} -template + +template Matrix::Matrix(std::string name, int dim1, int dim2, Format format) : LinalgBase(name, Type(type(), {dim1, dim2}), format) {} -template + +template Matrix::Matrix(std::string name, std::vector dimensions, Format format) : LinalgBase(name, Type(type(), dimensions), format) {} -template + +template Matrix::Matrix(std::string name, int dim1, int dim2, ModeFormat format1, ModeFormat format2) : LinalgBase(name, Type(type(), {dim1, dim2}), Format({format1, format2})) {} -template + +template Matrix::Matrix(std::string name, Type tensorType) : LinalgBase(name, tensorType) {} -template + +template Matrix::Matrix(std::string name, Type tensorType, Format format) : LinalgBase(name, tensorType, format) {} // ------------------------------------------------------------ // Vector class // ------------------------------------------------------------ -template +template class Vector : public LinalgBase { std::string name; Datatype ctype; public: - explicit Vector(std::string name); - Vector(std::string name, int dim); - Vector(std::string name, int dim, Format format); - Vector(std::string name, int dim, ModeFormat format); - Vector(std::string name, Type type, Format format); - Vector(std::string name, Type type, ModeFormat format); - LinalgAssignment operator=(const LinalgExpr& expr) { + explicit Vector(std::string name, bool isColVec = true); + + Vector(std::string name, int dim, bool isColVec = true); + + Vector(std::string name, int dim, Format format, bool isColVec = true); + + Vector(std::string name, int dim, ModeFormat format, bool isColVec = true); + + Vector(std::string name, Type type, Format format, bool isColVec = true); + + Vector(std::string name, Type type, ModeFormat format, bool isColVec = true); + + LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); } }; @@ -113,20 +147,44 @@ class Vector : public LinalgBase { // Vector template method implementations // ------------------------------------------------------------ -template -Vector::Vector(std::string name) : LinalgBase(name, Type(type(), {42})) {} -template -Vector::Vector(std::string name, int dim) : LinalgBase(name, Type(type(), {dim})) {} -template -Vector::Vector(std::string name, int dim, Format format) : LinalgBase(name, Type(type(), {dim}), format) {} -template -Vector::Vector(std::string name, int dim, ModeFormat format) : - LinalgBase(name, Type(type(), {dim}), Format(format)) {} -template -Vector::Vector(std::string name, Type type, Format format) : - LinalgBase(name, type, format) {} -template -Vector::Vector(std::string name, Type type, ModeFormat format) : - LinalgBase(name, type, Format(format)) {} -} +template +Vector::Vector(std::string name, bool isColVec) : LinalgBase(name, Type(type(), {42}), isColVec) {} + +template +Vector::Vector(std::string name, int dim, bool isColVec) : LinalgBase(name, Type(type(), {dim}), + isColVec) {} + +template +Vector::Vector(std::string name, int dim, Format format, bool isColVec) : LinalgBase(name, + Type(type(), {dim}), + format, isColVec) {} + +template +Vector::Vector(std::string name, int dim, ModeFormat format, bool isColVec) : + LinalgBase(name, Type(type(), {dim}), Format(format), isColVec) {} + +template +Vector::Vector(std::string name, Type type, Format format, bool isColVec) : + LinalgBase(name, type, format, isColVec) {} + +template +Vector::Vector(std::string name, Type type, ModeFormat format, bool isColVec) : + LinalgBase(name, type, Format(format), isColVec) {} + +template +class Scalar : public LinalgBase { + std::string name; + Datatype ctype; +public: + explicit Scalar(std::string name); + + LinalgAssignment operator=(const LinalgExpr &expr) { + return LinalgBase::operator=(expr); + } +}; + +template +Scalar::Scalar(std::string name) : LinalgBase(name, Type(type(), {})) {} + +} // namespace taco #endif diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index 9620595f3..409ed5fe8 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -25,6 +25,7 @@ #include "taco/index_notation/provenance_graph.h" #include "taco/linalg_notation/linalg_notation_nodes_abstract.h" +#include "taco/linalg.h" namespace taco { @@ -38,6 +39,8 @@ class Schedule; class TensorVar; +class LinalgBase; + class LinalgExpr; class LinalgAssignment; @@ -70,7 +73,7 @@ class LinalgExpr : public util::IntrusivePtr { /// A(i,j) = b; /// ``` LinalgExpr(TensorVar); - + LinalgExpr(TensorVar var, bool isColVec); /// Consturct an integer literal. /// ``` /// A(i,j) = 1; @@ -114,6 +117,8 @@ class LinalgExpr : public util::IntrusivePtr { LinalgExpr(std::complex); Datatype getDataType() const; + int getOrder() const; + bool isColVector() const; /// Visit the linalg expression's sub-expressions. void accept(LinalgExprVisitorStrict *) const; @@ -149,6 +154,10 @@ LinalgExpr elemMul(const LinalgExpr& lhs, const LinalgExpr& rhs); LinalgExpr transpose(const LinalgExpr& lhs); //LinalgExpr operator^(const LinalgExpr&, const T); +/// Check to make sure operators are legal (shape-wise) +int getMatMulOrder(const LinalgExpr &lhs, const LinalgExpr &rhs); + +void checkCompatibleShape(const LinalgExpr &lhs, const LinalgExpr &rhs); /// A an index statement computes a tensor. The index statements are /// assignment, forall, where, multi, and sequence. class LinalgStmt : public util::IntrusivePtr { diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h index 413dabbe7..03a6edc0f 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes.h +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -3,6 +3,7 @@ #include #include +#include #include "taco/type.h" #include "taco/index_notation/index_notation.h" @@ -19,7 +20,9 @@ namespace taco { struct LinalgVarNode : public LinalgExprNode { LinalgVarNode(TensorVar tensorVar) - : LinalgExprNode(tensorVar.getType().getDataType()), tensorVar(tensorVar) {} + : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder()), tensorVar(tensorVar) {} + LinalgVarNode(TensorVar tensorVar, bool isColVec) + : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder(), isColVec), tensorVar(tensorVar) {} void accept(LinalgExprVisitorStrict* v) const override { v->visit(this); @@ -58,7 +61,7 @@ namespace taco { LinalgExpr a; protected: - LinalgUnaryExprNode(LinalgExpr a) : LinalgExprNode(a.getDataType()), a(a) {} + LinalgUnaryExprNode(LinalgExpr a) : LinalgExprNode(a.getDataType(), a.getOrder(), a.isColVector()), a(a) {} }; @@ -86,14 +89,17 @@ namespace taco { protected: LinalgBinaryExprNode() : LinalgExprNode() {} - LinalgBinaryExprNode(LinalgExpr a, LinalgExpr b) - : LinalgExprNode(max_type(a.getDataType(), b.getDataType())), a(a), b(b) {} + LinalgBinaryExprNode(LinalgExpr a, LinalgExpr b, int order) + : LinalgExprNode(max_type(a.getDataType(), b.getDataType()), order), a(a), b(b) {} + LinalgBinaryExprNode(LinalgExpr a, LinalgExpr b, int order, bool isColVec) + : LinalgExprNode(max_type(a.getDataType(), b.getDataType()), order, isColVec), a(a), b(b) {} }; struct LinalgAddNode : public LinalgBinaryExprNode { LinalgAddNode() : LinalgBinaryExprNode() {} - LinalgAddNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} + LinalgAddNode(LinalgExpr a, LinalgExpr b, int order) : LinalgBinaryExprNode(a, b, order) {} + LinalgAddNode(LinalgExpr a, LinalgExpr b, int order, bool isColVec) : LinalgBinaryExprNode(a, b, order, isColVec) {} std::string getOperatorString() const override{ return "+"; @@ -107,7 +113,8 @@ namespace taco { struct LinalgSubNode : public LinalgBinaryExprNode { LinalgSubNode() : LinalgBinaryExprNode() {} - LinalgSubNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} + LinalgSubNode(LinalgExpr a, LinalgExpr b, int order) : LinalgBinaryExprNode(a, b, order) {} + LinalgSubNode(LinalgExpr a, LinalgExpr b, int order, bool isColVec) : LinalgBinaryExprNode(a, b, order, isColVec) {} std::string getOperatorString() const override{ return "-"; @@ -121,7 +128,8 @@ namespace taco { struct LinalgMatMulNode : public LinalgBinaryExprNode { LinalgMatMulNode() : LinalgBinaryExprNode() {} - LinalgMatMulNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} + LinalgMatMulNode(LinalgExpr a, LinalgExpr b, int order) : LinalgBinaryExprNode(a, b, order) {} + LinalgMatMulNode(LinalgExpr a, LinalgExpr b, int order, bool isColVec) : LinalgBinaryExprNode(a, b, order, isColVec) {} std::string getOperatorString() const override{ return "*"; @@ -134,7 +142,8 @@ namespace taco { struct LinalgElemMulNode : public LinalgBinaryExprNode { LinalgElemMulNode() : LinalgBinaryExprNode() {} - LinalgElemMulNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} + LinalgElemMulNode(LinalgExpr a, LinalgExpr b, int order) : LinalgBinaryExprNode(a, b, order) {} + LinalgElemMulNode(LinalgExpr a, LinalgExpr b, int order, bool isColVec) : LinalgBinaryExprNode(a, b, order, isColVec) {} std::string getOperatorString() const override{ return "elemMul"; @@ -147,7 +156,8 @@ struct LinalgElemMulNode : public LinalgBinaryExprNode { struct LinalgDivNode : public LinalgBinaryExprNode { LinalgDivNode() : LinalgBinaryExprNode() {} - LinalgDivNode(LinalgExpr a, LinalgExpr b) : LinalgBinaryExprNode(a, b) {} + LinalgDivNode(LinalgExpr a, LinalgExpr b, int order) : LinalgBinaryExprNode(a, b, order) {} + LinalgDivNode(LinalgExpr a, LinalgExpr b, int order, bool isColVec) : LinalgBinaryExprNode(a, b, order, isColVec) {} std::string getOperatorString() const override{ return "/"; @@ -161,7 +171,10 @@ struct LinalgDivNode : public LinalgBinaryExprNode { // Linalg Statements struct LinalgAssignmentNode : public LinalgStmtNode { LinalgAssignmentNode(const TensorVar& lhs, const LinalgExpr& rhs) - : lhs(lhs), rhs(rhs) {} + : lhs(lhs), rhs(rhs) { isColVec = false;} + + LinalgAssignmentNode(const TensorVar& lhs, bool isColVec, const LinalgExpr& rhs) + : lhs(lhs), rhs(rhs), isColVec(isColVec) {} void accept(LinalgStmtVisitorStrict* v) const { v->visit(this); @@ -169,6 +182,7 @@ struct LinalgAssignmentNode : public LinalgStmtNode { TensorVar lhs; LinalgExpr rhs; + bool isColVec; }; /// Returns true if expression e is of type E. diff --git a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h index 86353ab73..132cd1026 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h +++ b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h @@ -12,6 +12,7 @@ #include "taco/util/uncopyable.h" #include "taco/util/intrusive_ptr.h" #include "taco/linalg_notation/linalg_notation_visitor.h" + namespace taco { class TensorVar; @@ -23,15 +24,23 @@ struct LinalgExprNode : public util::Manageable, private util::Uncopyable { public: LinalgExprNode() = default; - LinalgExprNode(Datatype type); + explicit LinalgExprNode(Datatype type); + LinalgExprNode(Datatype type, int order); + LinalgExprNode(Datatype type, int order, bool isColVec); + virtual ~LinalgExprNode() = default; + virtual void accept(LinalgExprVisitorStrict*) const = 0; /// Return the scalar data type of the index expression. Datatype getDataType() const; + int getOrder() const; + bool isColVector() const; private: Datatype dataType; + int order; + bool isColVec; }; struct LinalgStmtNode : public util::Manageable, diff --git a/src/linalg.cpp b/src/linalg.cpp index 3541065b8..49a267a61 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -13,16 +13,22 @@ namespace taco { /* : LinalgBase(/1* get a unique name *1/, ctype) { */ /* } */ -LinalgBase::LinalgBase(string name, Type tensorType) : name(name), tensorType(tensorType), idxcount(0), - LinalgExpr(TensorVar(name, tensorType)) { +LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : name(name), tensorType(tensorType), idxcount(0), + isColVec(isColVec), LinalgExpr(TensorVar(name, tensorType), isColVec) { } -LinalgBase::LinalgBase(string name, Type tensorType, Format format) : name(name), tensorType(tensorType), idxcount(0), - LinalgExpr(TensorVar(name, tensorType, format)) { +LinalgBase::LinalgBase(string name, Type tensorType, Format format, bool isColVec) : name(name), tensorType(tensorType), + idxcount(0), isColVec(isColVec), LinalgExpr(TensorVar(name, tensorType, format), isColVec) { } LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { - taco_iassert(isa(this->ptr)); - LinalgAssignment assignment = LinalgAssignment(to(this->get())->tensorVar, expr); + taco_iassert(isa(this->ptr)); + TensorVar var = to(this->get())->tensorVar; + + taco_uassert(var.getOrder() == expr.getOrder()) << "RHS and LHS of linalg assignment must match order"; + if (var.getOrder() == 1) + taco_uassert(this->isColVector() == expr.isColVector()) << "RHS and LHS of linalg assignment must match vector type"; + + LinalgAssignment assignment = LinalgAssignment(var, expr); this->assignment = assignment; return assignment; } @@ -75,8 +81,29 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { } else if (isa(linalg.get())) { auto mul = to(linalg.get()); IndexVar index = getUniqueIndex(); - IndexExpr indexA = rewrite(mul->a, {indices[0], index}); - IndexExpr indexB = rewrite(mul->b, {index, indices[1]}); + vector indicesA; + vector indicesB; + if (mul->a.getOrder() == 2 && mul->b.getOrder() == 2) { + indicesA = {indices[0], index}; + indicesB = {index, indices[1]}; + } + else if (mul->a.getOrder() == 1 && mul->b.getOrder() == 2) { + indicesA = {index}; + indicesB = {index, indices[0]}; + } + else if (mul->a.getOrder() == 2 && mul->b.getOrder() == 1) { + indicesA = {indices[0], index}; + indicesB = {index}; + } + else if (mul->a.getOrder() == 1 && mul->a.isColVector() && mul->b.getOrder() == 1) { + indicesA = {indices[0], index}; + indicesB = {index}; + } else { + indicesA = {index}; + indicesB = {index}; + } + IndexExpr indexA = rewrite(mul->a, indicesA); + IndexExpr indexB = rewrite(mul->b, indicesB); return new MulNode(indexA, indexB); } else if (isa(linalg.get())) { auto div = to(linalg.get()); @@ -141,6 +168,7 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { value = new LiteralNode(lit->getVal>()); break; case Datatype::Undefined: + taco_uerror << "unsupported Datatype"; break; } return value; @@ -177,7 +205,9 @@ IndexStmt LinalgBase::rewrite() { return IndexStmt(); } - +bool LinalgBase::isColVector() const { + return this->isColVec; +} std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg) { LinalgAssignment assignment = linalg.getAssignment(); diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index cda57cb87..72da31ed7 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -37,6 +37,9 @@ namespace taco { LinalgExpr::LinalgExpr(TensorVar var) : LinalgExpr(new LinalgVarNode(var)) { } +LinalgExpr::LinalgExpr(TensorVar var, bool isColVec) : LinalgExpr(new LinalgVarNode(var, isColVec)) { +} + LinalgExpr::LinalgExpr(char val) : LinalgExpr(new LinalgLiteralNode(val)) { } @@ -80,6 +83,14 @@ Datatype LinalgExpr::getDataType() const { return const_cast(this->ptr)->getDataType(); } +int LinalgExpr::getOrder() const { + return const_cast(this->ptr)->getOrder(); +} + +bool LinalgExpr::isColVector() const { + return const_cast(this->ptr)->isColVector(); +} + void LinalgExpr::accept(LinalgExprVisitorStrict *v) const { ptr->accept(v); } @@ -91,28 +102,67 @@ std::ostream& operator<<(std::ostream& os, const LinalgExpr& expr) { return os; } +void checkCompatibleShape(const LinalgExpr &lhs, const LinalgExpr &rhs) { + taco_uassert(lhs.getOrder() == rhs.getOrder()) << "RHS and LHS order do not match for linear algebra " + "binary operation" << endl; + if (lhs.getOrder() == 1) + taco_uassert(lhs.isColVector() == rhs.isColVector()) << "RHS and LHS vector type do not match for linear algebra " + "binary operation" << endl; +} + LinalgExpr operator-(const LinalgExpr &expr) { return new LinalgNegNode(expr.ptr); } LinalgExpr operator+(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new LinalgAddNode(lhs, rhs); + checkCompatibleShape(lhs, rhs); + return new LinalgAddNode(lhs, rhs, lhs.getOrder(), lhs.isColVector()); } LinalgExpr operator-(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new LinalgSubNode(lhs, rhs); + checkCompatibleShape(lhs, rhs); + return new LinalgSubNode(lhs, rhs, lhs.getOrder(), lhs.isColVector()); } LinalgExpr operator*(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new LinalgMatMulNode(lhs, rhs); + int order = 0; + bool isColVec = false; + // Matrix-matrix mult + if (lhs.getOrder() == 2 && rhs.getOrder() == 2) { + order = 2; + } + // Matrix-column vector multiply + else if (lhs.getOrder() == 2 && rhs.getOrder() == 1 && rhs.isColVector()) { + order = 1; + isColVec = true; + } + // Row-vector Matrix multiply + else if (lhs.getOrder() == 1 && !lhs.isColVector() && rhs.getOrder() == 2) { + order = 1; + } + // Inner product + else if (lhs.getOrder() == 1 && !lhs.isColVector() && rhs.getOrder() == 1 && rhs.isColVector()) { + order = 0; + } + // Outer product + else if (lhs.getOrder() == 1 && lhs.isColVector() && rhs.getOrder() == 1 && !rhs.isColVector()) { + order = 2; + } + else { + taco_uassert(lhs.getOrder() != rhs.getOrder()) << "RHS and LHS order/vector type do not match " + "for linear algebra matrix multiply" << endl; + } + return new LinalgMatMulNode(lhs, rhs, order, isColVec); } LinalgExpr operator/(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new LinalgDivNode(lhs, rhs); + checkCompatibleShape(lhs, rhs); + return new LinalgDivNode(lhs, rhs, lhs.getOrder(), lhs.isColVector()); } LinalgExpr elemMul(const LinalgExpr &lhs, const LinalgExpr &rhs) { - return new LinalgElemMulNode(lhs, rhs); + checkCompatibleShape(lhs, rhs); + return new LinalgElemMulNode(lhs, rhs, lhs.getOrder(), lhs.isColVector()); } LinalgExpr transpose(const LinalgExpr &lhs) { @@ -148,4 +198,4 @@ LinalgExpr LinalgAssignment::getRhs() const { return getNode(*this)->rhs; } -} // namespace taco \ No newline at end of file +} // namespace taco diff --git a/src/linalg_notation/linalg_notation_nodes_abstract.cpp b/src/linalg_notation/linalg_notation_nodes_abstract.cpp index 62dcb747e..fcc699e56 100644 --- a/src/linalg_notation/linalg_notation_nodes_abstract.cpp +++ b/src/linalg_notation/linalg_notation_nodes_abstract.cpp @@ -1,20 +1,39 @@ #include "taco/linalg_notation/linalg_notation_nodes_abstract.h" -#include "taco/linalg_notation/linalg_notation.h" -#include "taco/index_notation/schedule.h" -#include "taco/index_notation/transformations.h" - -#include - using namespace std; namespace taco { LinalgExprNode::LinalgExprNode(Datatype type) - : dataType(type) { + : dataType(type), order(0), isColVec(false) { +} + +LinalgExprNode::LinalgExprNode(Datatype type, int order) + : dataType(type), order(order) { + if (order != 1) + isColVec = false; + else + isColVec = true; +} + +LinalgExprNode::LinalgExprNode(Datatype type, int order, bool isColVec) + : dataType(type), order(order) { + if (order != 1) + this->isColVec = false; + else + this->isColVec = isColVec; } Datatype LinalgExprNode::getDataType() const { return dataType; } + +int LinalgExprNode::getOrder() const { + return order; +} + +bool LinalgExprNode::isColVector() const { + return isColVec; +} + } diff --git a/src/linalg_notation/linalg_notation_visitor.cpp b/src/linalg_notation/linalg_notation_visitor.cpp index 0d1925780..c71001d06 100644 --- a/src/linalg_notation/linalg_notation_visitor.cpp +++ b/src/linalg_notation/linalg_notation_visitor.cpp @@ -14,4 +14,4 @@ void LinalgStmtVisitorStrict::visit(const LinalgStmt& stmt) { stmt.accept(this); } -} \ No newline at end of file +} diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 2552c6108..e99e23553 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -6,37 +6,78 @@ using namespace taco; TEST(linalg, simplest) { Matrix B("B", 2, 2, dense, dense); - Matrix C("C"); - Matrix A("A"); + Matrix C("C", 2, 2, dense, dense); + Matrix A("A", 2, 2, dense, dense); - /* Vector c("c"); */ + cout << C.getOrder() << endl; + cout << B.getOrder() << endl; + cout << A.getOrder() << endl; + A = B + C; - /* Vector a("a"); */ + cout << A << endl; + + A.rewrite(); + cout << A.getIndexAssignment(); + + ASSERT_TRUE(1); +} - /* for(int i=0;i<42;i++) { */ - /* B.insert({i,i}, 1.0); */ - /* } */ +TEST(linalg, matvec_mul) { + Vector x("x", 2, dense); + Vector b("b", 2, dense); + Matrix A("A", 2, 2, dense, dense); - /* for(int i=0;i<42;i++) { */ - /* c.insert({i}, (double) i); */ - /* } */ + x = A*b; - /* B.pack(); */ - /* c.pack(); */ + cout << x << endl; - /* IndexVar i("i"), j("j"); */ + x.rewrite(); + cout << x.getIndexAssignment(); - /* a(i) = B(i,j) * c(j); */ + ASSERT_TRUE(1); +} - /* A = B*C; */ +TEST(linalg, vecmat_mul) { + Vector x("x", 2, dense, false); + Vector b("b", 2, dense, false); + Matrix A("A", 2, 2, dense, dense); - A = B * C; + x = b * A; - cout << A << endl; + cout << x << endl; - A.rewrite(); - cout << A.getIndexAssignment(); + x.rewrite(); + cout << x.getIndexAssignment(); ASSERT_TRUE(1); } +TEST(linalg, inner_mul) { + Scalar x("x"); + Vector b("b", 2, dense, false); + Vector a("a", 2, dense, true); + + x = b * a; + + cout << x << endl; + + x.rewrite(); + cout << x.getIndexAssignment(); + + ASSERT_TRUE(1); +} + +TEST(linalg, outer_mul) { + Matrix X("X", 2, 2, dense, dense); + Vector b("b", 2, dense, false); + Vector a("a", 2, dense, true); + + X = a * b; + + cout << X << endl; + + X.rewrite(); + cout << X.getIndexAssignment(); + + ASSERT_TRUE(1); +} \ No newline at end of file From 8ce89c667fd1a26ed8edb9645493c882893947c3 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Wed, 11 Nov 2020 17:10:48 -0800 Subject: [PATCH 18/61] demonstrate that insertion works, compute on actual data works too --- include/taco/linalg.h | 11 +++++++++++ test/tests-linalg.cpp | 9 +++++++++ 2 files changed, 20 insertions(+) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index f5aecc8ea..d5d648a87 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -75,6 +75,10 @@ class Matrix : public LinalgBase { // Support some Read methods CType at(int coord_x, int coord_y); + // And a Write method + void insert(int coord_x, int coord_y, CType value); + + }; // ------------------------------------------------------------ @@ -117,6 +121,13 @@ CType Matrix::at(int coord_x, int coord_y) { return tensorBase->at({coord_x, coord_y}); } + +// Definition of Write methods +template +void Matrix::insert(int coord_x, int coord_y, CType value) { + tensorBase->insert({coord_x, coord_y}, value); +} + // ------------------------------------------------------------ // Vector class // ------------------------------------------------------------ diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 1f27e769b..5058bf288 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -30,6 +30,15 @@ TEST(linalg, simplest) { /* A = B*C; */ + cout << "--- Before inserting ---" << endl; + B.insert(0,0,2); + B.insert(1,1,1); + B.insert(0,1,2); + + C.insert(0,0,2); + C.insert(1,1,2); + cout << "--- After inserting ---" << endl; + cout << "--- Before Expression ---" << endl; A = B * C; cout << "--- After Expression ---" << endl; From b98e3f5e040c726ba7925b2cd1cfb1c04893300a Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Wed, 11 Nov 2020 17:15:52 -0800 Subject: [PATCH 19/61] rename my simplest test to tensorbase --- test/tests-linalg.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 5058bf288..286ec000a 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -4,7 +4,7 @@ using namespace taco; -TEST(linalg, simplest) { +TEST(linalg, tensorbase) { Matrix B("B", 2, 2, dense, dense); Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); From 6fd2d0ccca4fa3b5315ee3ba97e2859fce95d639 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Wed, 11 Nov 2020 18:44:19 -0800 Subject: [PATCH 20/61] Add in transpose for vectors --- include/taco/linalg.h | 6 ------ include/taco/linalg_notation/linalg_notation.h | 1 + .../linalg_notation/linalg_notation_nodes.h | 2 ++ .../linalg_notation_nodes_abstract.h | 1 + src/linalg.cpp | 16 +++++++++------- src/linalg_notation/linalg_notation.cpp | 6 +++++- .../linalg_notation_nodes_abstract.cpp | 4 ++++ test/tests-linalg.cpp | 18 +++++++++++++++--- 8 files changed, 37 insertions(+), 17 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 445c2aa95..339d37413 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -20,7 +20,6 @@ class LinalgBase : public LinalgExpr { IndexStmt indexAssignment; int idxcount; - bool isColVec; IndexExpr rewrite(LinalgExpr linalg, std::vector indices); @@ -40,15 +39,10 @@ class LinalgBase : public LinalgExpr { const IndexStmt getIndexAssignment() const; - bool isColVector() const; IndexStmt rewrite(); typedef LinalgVarNode Node; - /* LinalgBase operator=(LinalgExpr) { */ - /* return (LinalgBase)LinalgExpr; */ - /* } */ - }; std::ostream &operator<<(std::ostream &os, const LinalgBase &linalg); diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index 409ed5fe8..0de91cc44 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -119,6 +119,7 @@ class LinalgExpr : public util::IntrusivePtr { Datatype getDataType() const; int getOrder() const; bool isColVector() const; + void setColVector(bool) const; /// Visit the linalg expression's sub-expressions. void accept(LinalgExprVisitorStrict *) const; diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h index 03a6edc0f..4630aa144 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes.h +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -62,6 +62,7 @@ namespace taco { protected: LinalgUnaryExprNode(LinalgExpr a) : LinalgExprNode(a.getDataType(), a.getOrder(), a.isColVector()), a(a) {} + LinalgUnaryExprNode(LinalgExpr a, bool isColVec) : LinalgExprNode(a.getDataType(), a.getOrder(), isColVec), a(a) {} }; @@ -75,6 +76,7 @@ namespace taco { struct LinalgTransposeNode : public LinalgUnaryExprNode { LinalgTransposeNode(LinalgExpr operand) : LinalgUnaryExprNode(operand) {} + LinalgTransposeNode(LinalgExpr operand, bool isColVec) : LinalgUnaryExprNode(operand, isColVec) {} void accept (LinalgExprVisitorStrict* v) const override{ v->visit(this); diff --git a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h index 132cd1026..136034053 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h +++ b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h @@ -36,6 +36,7 @@ struct LinalgExprNode : public util::Manageable, Datatype getDataType() const; int getOrder() const; bool isColVector() const; + void setColVector(bool val); private: Datatype dataType; diff --git a/src/linalg.cpp b/src/linalg.cpp index 49a267a61..e344a7c8f 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -14,10 +14,10 @@ namespace taco { /* } */ LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : name(name), tensorType(tensorType), idxcount(0), - isColVec(isColVec), LinalgExpr(TensorVar(name, tensorType), isColVec) { + LinalgExpr(TensorVar(name, tensorType), isColVec) { } LinalgBase::LinalgBase(string name, Type tensorType, Format format, bool isColVec) : name(name), tensorType(tensorType), - idxcount(0), isColVec(isColVec), LinalgExpr(TensorVar(name, tensorType, format), isColVec) { + idxcount(0), LinalgExpr(TensorVar(name, tensorType, format), isColVec) { } LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { @@ -116,7 +116,13 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { return new NegNode(index); } else if (isa(linalg.get())) { auto transpose = to(linalg.get()); - return rewrite(transpose->a, {indices[1], indices[0]}); + if (transpose->a.getOrder() == 2) { + return rewrite(transpose->a, {indices[1], indices[0]}); + } + else if (transpose->a.getOrder() == 1) { + return rewrite(transpose->a, {indices[0]}); + } + return rewrite(transpose->a, {}); } else if (isa(linalg.get())) { auto lit = to(linalg.get()); @@ -205,10 +211,6 @@ IndexStmt LinalgBase::rewrite() { return IndexStmt(); } -bool LinalgBase::isColVector() const { - return this->isColVec; -} - std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg) { LinalgAssignment assignment = linalg.getAssignment(); if (!assignment.defined()) return os << getNode(linalg)->tensorVar.getName(); diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index 72da31ed7..4e999eadf 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -91,6 +91,10 @@ bool LinalgExpr::isColVector() const { return const_cast(this->ptr)->isColVector(); } +void LinalgExpr::setColVector(bool val) const { + const_cast(this->ptr)->setColVector(val); +} + void LinalgExpr::accept(LinalgExprVisitorStrict *v) const { ptr->accept(v); } @@ -166,7 +170,7 @@ LinalgExpr elemMul(const LinalgExpr &lhs, const LinalgExpr &rhs) { } LinalgExpr transpose(const LinalgExpr &lhs) { - return new LinalgTransposeNode(lhs); + return new LinalgTransposeNode(lhs, !lhs.isColVector()); } // class LinalgStmt diff --git a/src/linalg_notation/linalg_notation_nodes_abstract.cpp b/src/linalg_notation/linalg_notation_nodes_abstract.cpp index fcc699e56..532a4943e 100644 --- a/src/linalg_notation/linalg_notation_nodes_abstract.cpp +++ b/src/linalg_notation/linalg_notation_nodes_abstract.cpp @@ -36,4 +36,8 @@ bool LinalgExprNode::isColVector() const { return isColVec; } +void LinalgExprNode::setColVector(bool val) { + isColVec = val; +} + } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index e99e23553..1a6a0663e 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -9,9 +9,6 @@ TEST(linalg, simplest) { Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); - cout << C.getOrder() << endl; - cout << B.getOrder() << endl; - cout << A.getOrder() << endl; A = B + C; cout << A << endl; @@ -79,5 +76,20 @@ TEST(linalg, outer_mul) { X.rewrite(); cout << X.getIndexAssignment(); + ASSERT_TRUE(1); +} + +TEST(linalg, rowvec_transpose) { + Vector b("b", 2, dense, false); + Matrix A("A", 2, 2, dense, dense); + Scalar a("a"); + + a = transpose(transpose(b) * A * b); + + cout << a << endl; + + a.rewrite(); + cout << a.getIndexAssignment(); + ASSERT_TRUE(1); } \ No newline at end of file From 8cb0c145dec968ab36eba7cd482ec76705ed1965 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Wed, 11 Nov 2020 20:49:31 -0800 Subject: [PATCH 21/61] Add in implicit rewrite after assignment --- src/linalg.cpp | 3 +-- test/tests-linalg.cpp | 8 +------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/src/linalg.cpp b/src/linalg.cpp index 403f40fcb..607cea40c 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -36,8 +36,7 @@ LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { LinalgAssignment assignment = LinalgAssignment(var, expr); this->assignment = assignment; - // invoke the rewrite at this point to get the interleaving of inserts and expressions correct? - + this->rewrite(); return assignment; } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index ec6108619..1889de070 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -52,8 +52,7 @@ TEST(linalg, tensorbase) { A = B + C; cout << A << endl; - - A.rewrite(); + cout << A.getIndexAssignment(); ASSERT_TRUE(1); @@ -68,7 +67,6 @@ TEST(linalg, matvec_mul) { cout << x << endl; - x.rewrite(); cout << x.getIndexAssignment(); ASSERT_TRUE(1); @@ -83,7 +81,6 @@ TEST(linalg, vecmat_mul) { cout << x << endl; - x.rewrite(); cout << x.getIndexAssignment(); ASSERT_TRUE(1); @@ -135,7 +132,6 @@ TEST(linalg, inner_mul) { cout << x << endl; - x.rewrite(); cout << x.getIndexAssignment(); ASSERT_TRUE(1); @@ -150,7 +146,6 @@ TEST(linalg, outer_mul) { cout << X << endl; - X.rewrite(); cout << X.getIndexAssignment(); ASSERT_TRUE(1); @@ -165,7 +160,6 @@ TEST(linalg, rowvec_transpose) { cout << a << endl; - a.rewrite(); cout << a.getIndexAssignment(); ASSERT_TRUE(1); From 1a8d7f06e23b07d33d20587929f6c6ecb913f44e Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Thu, 12 Nov 2020 10:51:00 -0800 Subject: [PATCH 22/61] Add in fixes after TensorBase merge and partial linalg_parser that uses LinalgExpr and LinalgBase --- include/taco/linalg.h | 4 + .../linalg_notation/linalg_notation_nodes.h | 3 +- include/taco/parser/linalg_parser.h | 20 ++- src/linalg.cpp | 35 +++-- src/linalg_notation/linalg_notation.cpp | 23 ++- .../linalg_notation_nodes_abstract.cpp | 1 - src/parser/linalg_parser.cpp | 142 +++--------------- src/tensor.cpp | 5 + test/tests-linalg.cpp | 4 +- 9 files changed, 80 insertions(+), 157 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 96a9dfac7..dcde04948 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -207,6 +207,7 @@ class Scalar : public LinalgBase { Datatype ctype; public: explicit Scalar(std::string name); + Scalar(std::string name, bool useTensorBase); LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); @@ -215,6 +216,9 @@ class Scalar : public LinalgBase { template Scalar::Scalar(std::string name) : LinalgBase(name, Type(type(), {})) {} +template +Scalar::Scalar(std::string name, bool useTensorBase) : + LinalgBase(name, Type(type(), {}) , type(), {}, Format(), false) {} } // namespace taco #endif diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h index cdb6abb75..5c47de49e 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes.h +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -38,7 +38,8 @@ namespace taco { struct LinalgTensorBaseNode : public LinalgExprNode { LinalgTensorBaseNode(TensorVar tensorVar, TensorBase *tensorBase) : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder()), tensorVar(tensorVar), tensorBase(tensorBase) {} - + LinalgTensorBaseNode(TensorVar tensorVar, TensorBase *tensorBase, bool isColVec) + : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder(), isColVec), tensorVar(tensorVar), tensorBase(tensorBase) {} void accept(LinalgExprVisitorStrict* v) const override { v->visit(this); } diff --git a/include/taco/parser/linalg_parser.h b/include/taco/parser/linalg_parser.h index 8875ba723..86a801f44 100644 --- a/include/taco/parser/linalg_parser.h +++ b/include/taco/parser/linalg_parser.h @@ -11,13 +11,17 @@ #include "taco/util/uncopyable.h" #include "taco/type.h" #include "taco/parser/parser.h" +#include "taco/linalg_notation/linalg_notation_nodes.h" namespace taco { class TensorBase; +class LinalgBase; class Format; class IndexVar; -class IndexExpr; -class Access; +class LinalgExpr; + +class LinalgStmt; +class LinalgAssignment; namespace parser { enum class Token; @@ -64,26 +68,26 @@ class LinalgParser : public AbstractParser { std::vector names; /// assign ::= var '=' expr - TensorBase parseAssign(); + LinalgAssignment parseAssign(); /// expr ::= term {('+' | '-') term} - IndexExpr parseExpr(); + LinalgExpr parseExpr(); /// term ::= factor {('*' | '/') factor} - IndexExpr parseTerm(); + LinalgExpr parseTerm(); /// factor ::= final /// | '(' expr ')' /// | '-' factor /// | factor '^T' - IndexExpr parseFactor(); + LinalgExpr parseFactor(); /// final ::= var /// | scalar - IndexExpr parseFinal(); + LinalgExpr parseFinal(); /// var ::= identifier - Access parseVar(); + LinalgBase parseVar(); std::string currentTokenString(); diff --git a/src/linalg.cpp b/src/linalg.cpp index 607cea40c..16009ca95 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -12,11 +12,13 @@ LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : name(name) LinalgExpr(TensorVar(name, tensorType), isColVec) { } -LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec) : LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), tensorType(tensorType), idxcount(0) { - if(isa(ptr)) { - /* cout << "LinalgBase constructor - LinalgTensorBaseNode" << endl; */ - cout << this->tensorBase->getName() << endl; - } +LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec) : + LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), + tensorType(tensorType), idxcount(0) { + if(isa(ptr)) { + /* cout << "LinalgBase constructor - LinalgTensorBaseNode" << endl; */ + cout << this->tensorBase->getName() << endl; + } } LinalgBase::LinalgBase(string name, Type tensorType, Format format, bool isColVec) : name(name), tensorType(tensorType), @@ -29,14 +31,17 @@ LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { taco_iassert(isa(this->ptr)); TensorVar var = to(this->get())->tensorVar; + cout << var.getOrder() << endl; + cout << expr.getOrder() << endl; taco_uassert(var.getOrder() == expr.getOrder()) << "RHS and LHS of linalg assignment must match order"; if (var.getOrder() == 1) taco_uassert(this->isColVector() == expr.isColVector()) << "RHS and LHS of linalg assignment must match vector type"; LinalgAssignment assignment = LinalgAssignment(var, expr); this->assignment = assignment; - + cout << "rewrite here" << endl; this->rewrite(); + cout << "end rewrite" << endl; return assignment; } @@ -103,8 +108,8 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { indicesB = {index}; } else if (mul->a.getOrder() == 1 && mul->a.isColVector() && mul->b.getOrder() == 1) { - indicesA = {indices[0], index}; - indicesB = {index}; + indicesA = {indices[0]}; + indicesB = {indices[1]}; } else { indicesA = {index}; indicesB = {index}; @@ -204,7 +209,7 @@ IndexStmt LinalgBase::rewrite() { TensorVar tensor = this->assignment.getLhs(); - vector indices; + vector indices = {}; if (tensor.getOrder() == 1) { indices.push_back(IndexVar("i")); } else if (tensor.getOrder() == 2) { @@ -213,11 +218,13 @@ IndexStmt LinalgBase::rewrite() { } Access lhs = Access(tensor, indices); IndexExpr rhs = rewrite(this->assignment.getRhs(), indices); + cout << "rhs done here" << endl; - if(this->tensorBase != NULL) { - /* cout << "--- Going to use the Tensor API to assign the RHS ---" << endl; */ - this->tensorBase->operator()(indices) = rhs; - /* cout << "--- Done assigning RHS to Tensor API ---" << endl; */ + if(this->tensorBase != nullptr) { + cout << "--- Going to use the Tensor API to assign the RHS ---" << endl; + cout << rhs << endl; + this->tensorBase->operator()(indices) = rhs; + cout << "--- Done assigning RHS to Tensor API ---" << endl; } Assignment indexAssign = Assignment(lhs, rhs); @@ -231,7 +238,7 @@ std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg) { LinalgAssignment assignment = linalg.getAssignment(); // If TensorBase exists, print the storage - if (linalg.tensorBase != NULL) { + if (linalg.tensorBase != nullptr) { return os << *(linalg.tensorBase) << endl; } diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index 4f5f31047..654d83b1b 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -1,4 +1,4 @@ -#include "taco/index_notation/index_notation.h" +//#include "taco/linalg_notation/linalg_notation.h" #include #include @@ -12,23 +12,17 @@ #include "error/error_checks.h" #include "taco/error/error_messages.h" #include "taco/type.h" -#include "taco/format.h" -#include "taco/index_notation/intrinsic.h" +//#include "taco/linalg_notation/linalg_notation.h" +#include "taco/linalg.h" +#include "taco/linalg_notation/linalg_notation_nodes.h" + #include "taco/index_notation/schedule.h" #include "taco/index_notation/transformations.h" #include "taco/index_notation/index_notation_nodes.h" -#include "taco/linalg_notation/linalg_notation_nodes.h" -#include "taco/index_notation/index_notation_rewriter.h" -#include "taco/linalg_notation/linalg_notation_printer.h" + #include "taco/ir/ir.h" -#include "taco/lower/lower.h" -#include "taco/codegen/module.h" -#include "taco/util/name_generator.h" -#include "taco/util/scopedmap.h" -#include "taco/util/strings.h" -#include "taco/util/collections.h" using namespace std; @@ -37,7 +31,7 @@ namespace taco { LinalgExpr::LinalgExpr(TensorVar var) : LinalgExpr(new LinalgVarNode(var)) { } -LinalgExpr::LinalgExpr(TensorVar var, bool isColVec, TensorBase* _tensorBase) : LinalgExpr(new LinalgTensorBaseNode(var, _tensorBase)) { +LinalgExpr::LinalgExpr(TensorVar var, bool isColVec, TensorBase* _tensorBase) : LinalgExpr(new LinalgTensorBaseNode(var, _tensorBase, isColVec)) { tensorBase = _tensorBase; } @@ -119,7 +113,7 @@ void checkCompatibleShape(const LinalgExpr &lhs, const LinalgExpr &rhs) { } LinalgExpr operator-(const LinalgExpr &expr) { - return new LinalgNegNode(expr.ptr); + return LinalgExpr(new LinalgNegNode(expr.ptr)); } LinalgExpr operator+(const LinalgExpr &lhs, const LinalgExpr &rhs) { @@ -174,6 +168,7 @@ LinalgExpr elemMul(const LinalgExpr &lhs, const LinalgExpr &rhs) { } LinalgExpr transpose(const LinalgExpr &lhs) { + cout << "transpose here" << endl; return new LinalgTransposeNode(lhs, !lhs.isColVector()); } diff --git a/src/linalg_notation/linalg_notation_nodes_abstract.cpp b/src/linalg_notation/linalg_notation_nodes_abstract.cpp index 532a4943e..52cf9ba3e 100644 --- a/src/linalg_notation/linalg_notation_nodes_abstract.cpp +++ b/src/linalg_notation/linalg_notation_nodes_abstract.cpp @@ -39,5 +39,4 @@ bool LinalgExprNode::isColVector() const { void LinalgExprNode::setColVector(bool val) { isColVec = val; } - } diff --git a/src/parser/linalg_parser.cpp b/src/parser/linalg_parser.cpp index 409328f43..f25429e09 100644 --- a/src/parser/linalg_parser.cpp +++ b/src/parser/linalg_parser.cpp @@ -7,9 +7,9 @@ #include "taco/tensor.h" #include "taco/format.h" -#include "taco/index_notation/index_notation.h" -#include "taco/index_notation/index_notation_nodes.h" -#include "taco/index_notation/index_notation_rewriter.h" +#include "taco/linalg_notation/linalg_notation.h" +#include "taco/linalg_notation/linalg_notation_nodes.h" +#include "taco/linalg.h" #include "taco/util/collections.h" @@ -62,118 +62,33 @@ struct LinalgParser::Content { } void LinalgParser::parse() { - content->resultTensor = parseAssign(); + content->resultTensor = parseAssign().getLhs(); } const TensorBase& LinalgParser::getResultTensor() const { return content->resultTensor; } -TensorBase LinalgParser::parseAssign() { +LinalgAssignment LinalgParser::parseAssign() { content->parsingLhs = true; cout << "parsing lhs" << endl; - Access lhs = parseVar(); + LinalgExpr lhs = parseVar(); + const TensorVar var = to(lhs.get())->tensorVar; cout << "Result of parsing LHS" << endl; cout << lhs << endl; content->parsingLhs = false; cout << "parsing rhs" << endl; consume(Token::eq); - IndexExpr rhs = parseExpr(); + LinalgExpr rhs = parseExpr(); cout << "Result of parsing RHS" << endl; cout << rhs << endl; - // Collect all index var dimensions - struct Visitor : IndexNotationVisitor { - using IndexNotationVisitor::visit; - set> modesWithDefaults; - map *indexVarDimensions; - - void visit(const AccessNode *op) { - for (size_t i = 0; i < op->indexVars.size(); i++) { - IndexVar indexVar = op->indexVars[i]; - if (!util::contains(modesWithDefaults, {op->tensorVar, i})) { - auto dimension = op->tensorVar.getType().getShape().getDimension(i); - if (util::contains(*indexVarDimensions, indexVar)) { - taco_uassert(indexVarDimensions->at(indexVar) == dimension) << - "Incompatible dimensions"; - } else { - indexVarDimensions->insert({indexVar, dimension.getSize()}); - } - } - } - } - }; - Visitor visitor; - visitor.indexVarDimensions = &content->indexVarDimensions; - visitor.modesWithDefaults = content->modesWithDefaults; - rhs.accept(&visitor); - - // Rewrite expression to new index dimensions - struct Rewriter : IndexNotationRewriter { - using IndexNotationRewriter::visit; - map *indexVarDimensions; - map tensors; - - void visit(const AccessNode *op) { - bool dimensionChanged = false; - Shape shape = op->tensorVar.getType().getShape(); - vector dimensions; - for (auto &dimension : shape) { - taco_iassert(dimension.isFixed()); - dimensions.push_back((int) dimension.getSize()); - } - - taco_uassert(op->indexVars.size() == dimensions.size()) << - "The order of " << op->tensorVar.getName() - << " is inconsistent " << - "between tensor accesses or options. Is it order " << - dimensions.size() << " or " << op->indexVars.size() - << "?"; - - for (size_t i = 0; i < dimensions.size(); i++) { - IndexVar indexVar = op->indexVars[i]; - if (util::contains(*indexVarDimensions, indexVar)) { - int dimension = indexVarDimensions->at(indexVar); - if (dimension != dimensions[i]) { - dimensions[i] = dimension; - dimensionChanged = true; - } - } - } - if (dimensionChanged) { - TensorBase tensor; - if (util::contains(tensors, op->tensorVar.getName())) { - tensor = tensors.at(op->tensorVar.getName()); - } else { - tensor = TensorBase(op->tensorVar.getName(), - op->tensorVar.getType().getDataType(), dimensions, - op->tensorVar.getFormat()); - tensors.insert({tensor.getName(), tensor}); - } - expr = tensor(op->indexVars); - } else { - expr = op; - } - } - }; - Rewriter rewriter; - rewriter.indexVarDimensions = visitor.indexVarDimensions; - rhs = rewriter.rewrite(rhs); - - IndexExpr rewrittenLhs = rewriter.rewrite(lhs); - - for (auto &tensor : rewriter.tensors) { - content->tensors.at(tensor.first) = tensor.second; - } - content->resultTensor = content->tensors.at(lhs.getTensorVar().getName()); - - content->resultTensor(lhs.getIndexVars()) = rhs; - return content->resultTensor; + return LinalgAssignment(var, rhs); } -IndexExpr LinalgParser::parseExpr() { - IndexExpr expr = parseTerm(); +LinalgExpr LinalgParser::parseExpr() { + LinalgExpr expr = parseTerm(); while (content->currentToken == Token::add || content->currentToken == Token::sub) { switch (content->currentToken) { @@ -192,8 +107,8 @@ IndexExpr LinalgParser::parseExpr() { return expr; } -IndexExpr LinalgParser::parseTerm() { - IndexExpr term = parseFactor(); +LinalgExpr LinalgParser::parseTerm() { + LinalgExpr term = parseFactor(); while (content->currentToken == Token::mul || content->currentToken == Token::div) { switch (content->currentToken) { @@ -214,31 +129,31 @@ IndexExpr LinalgParser::parseTerm() { return term; } -IndexExpr LinalgParser::parseFactor() { +LinalgExpr LinalgParser::parseFactor() { switch (content->currentToken) { case Token::lparen: { consume(Token::lparen); - IndexExpr factor = parseExpr(); + LinalgExpr factor = parseExpr(); consume(Token::rparen); return factor; } case Token::sub: consume(Token::sub); - return new NegNode(parseFactor()); + return new LinalgNegNode(parseFactor()); default: break; } - IndexExpr final = parseFinal(); + LinalgExpr final = parseFinal(); if (content->currentToken == Token::caretT) { consume(Token::caretT); - return new TransposeNode(final); + return new LinalgTransposeNode(final); } return final; } -IndexExpr LinalgParser::parseFinal() { +LinalgExpr LinalgParser::parseFinal() { std::istringstream value (content->lexer.getIdentifier()); switch (content->currentToken) { case Token::complex_scalar: @@ -246,35 +161,35 @@ IndexExpr LinalgParser::parseFinal() { consume(Token::complex_scalar); std::complex complex_value; value >> complex_value; - return IndexExpr(complex_value); + return LinalgExpr(complex_value); } case Token::int_scalar: { consume(Token::int_scalar); int64_t int_value; value >> int_value; - return IndexExpr(int_value); + return LinalgExpr(int_value); } case Token::uint_scalar: { consume(Token::uint_scalar); uint64_t uint_value; value >> uint_value; - return IndexExpr(uint_value); + return LinalgExpr(uint_value); } case Token::float_scalar: { consume(Token::float_scalar); double float_value; value >> float_value; - return IndexExpr(float_value); + return LinalgExpr(float_value); } default: return parseVar(); } } -Access LinalgParser::parseVar() { +LinalgBase LinalgParser::parseVar() { if(content->currentToken != Token::identifier) { throw ParseError("Expected linalg name"); } @@ -333,14 +248,7 @@ Access LinalgParser::parseVar() { content->tensors.insert({tensorName,tensor}); } - - cout << order << endl; - vector idxlist = getUniqueIndices(order); - cout << "Idxlist"; - for (auto i : idxlist) - cout << i << ", "; - - return tensor(idxlist); + return LinalgBase(tensor.getName(),tensor.getComponentType(), tensor.getFormat() ); } vector LinalgParser::getUniqueIndices(size_t order) { diff --git a/src/tensor.cpp b/src/tensor.cpp index 32edd1c17..cce0d726a 100644 --- a/src/tensor.cpp +++ b/src/tensor.cpp @@ -487,6 +487,7 @@ struct AccessTensorNode : public AccessNode { }; const Access TensorBase::operator()(const std::vector& indices) const { + cout << "Const op() call" << endl; taco_uassert(indices.size() == (size_t)getOrder()) << "A tensor of order " << getOrder() << " must be indexed with " << getOrder() << " variables, but is indexed with: " @@ -495,10 +496,14 @@ const Access TensorBase::operator()(const std::vector& indices) const } Access TensorBase::operator()(const std::vector& indices) { + cout << "Non-Const op() call" << endl; + cout << to_string(getOrder()) << endl; + cout << " after getOrder" << endl; taco_uassert(indices.size() == (size_t)getOrder()) << "A tensor of order " << getOrder() << " must be indexed with " << getOrder() << " variables, but is indexed with: " << util::join(indices); + cout << " after uassert" << endl; return Access(new AccessTensorNode(*this, indices)); } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 1889de070..1fea93525 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -124,7 +124,7 @@ TEST(linalg, tensorapi) { } TEST(linalg, inner_mul) { - Scalar x("x"); + Scalar x("x", true); Vector b("b", 2, dense, false); Vector a("a", 2, dense, true); @@ -154,7 +154,7 @@ TEST(linalg, outer_mul) { TEST(linalg, rowvec_transpose) { Vector b("b", 2, dense, false); Matrix A("A", 2, 2, dense, dense); - Scalar a("a"); + Scalar a("a", true); a = transpose(transpose(b) * A * b); From 0e06201861af1c0def93ba6a6971a77e9ef9d667 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Thu, 12 Nov 2020 14:30:50 -0800 Subject: [PATCH 23/61] remove the more egregious spewing of couts --- src/index_notation/index_notation.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/index_notation/index_notation.cpp b/src/index_notation/index_notation.cpp index 1d7f2300f..1f857a5fc 100644 --- a/src/index_notation/index_notation.cpp +++ b/src/index_notation/index_notation.cpp @@ -769,7 +769,6 @@ static void check(Assignment assignment) { } Assignment Access::operator=(const IndexExpr& expr) { - cout << "Main Access::operator= called" << endl; TensorVar result = getTensorVar(); Assignment assignment = Assignment(*this, expr); check(assignment); @@ -778,12 +777,10 @@ Assignment Access::operator=(const IndexExpr& expr) { } Assignment Access::operator=(const Access& expr) { - cout << "accessexpr Access::operator= called" << endl; return operator=(static_cast(expr)); } Assignment Access::operator=(const TensorVar& var) { - cout << "tensorvaraccess Access::operator= called" << endl; return operator=(Access(var)); } From d6f25c1283709b2a2ee41ee4f0b0ffaf80693f10 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Thu, 12 Nov 2020 15:41:32 -0800 Subject: [PATCH 24/61] add data to testcases involving vectors + add a write method for vector --- include/taco/linalg.h | 13 +++++ test/tests-linalg.cpp | 128 ++++++++++++++++++++++++++++++------------ 2 files changed, 104 insertions(+), 37 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index dcde04948..957c82f62 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -169,6 +169,12 @@ class Vector : public LinalgBase { LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); } + + // Support some Write methods + void insert(int coord, CType value); + + // Support some Read methods too + CType at(int coord); }; // ------------------------------------------------------------ @@ -201,6 +207,13 @@ template Vector::Vector(std::string name, Type type, ModeFormat format, bool isColVec) : LinalgBase(name, type, Format(format), isColVec) {} + +// Vector write methods +template +void Vector::insert(int coord, CType value) { + tensorBase->insert({coord}, value); +} + template class Scalar : public LinalgBase { std::string name; diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 1fea93525..8ce09ca16 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -42,6 +42,15 @@ TEST(linalg, matmul) { cout << "--- Before getIndexAssignment on A ---" << endl; cout << A.getIndexAssignment() << endl; cout << "--- After getIndexAssignment on A ---" << endl; + + + // Get the values.. + map, double> vals = {{{0,0},4}, {{0,1},4}, {{1,1},2}}; + /* for (auto val = A.beginTyped(); val != A.endTyped(); ++val) { */ + /* ASSERT_TRUE(util::contains(vals, val->first.toVector())); */ + /* ASSERT_EQ(vals.at(val->first.toVector()), val->second); */ + /* } */ + } TEST(linalg, tensorbase) { @@ -49,8 +58,15 @@ TEST(linalg, tensorbase) { Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); + B.insert(0,0,1); + B.insert(1,1,4); + + C.insert(0,1,2); + C.insert(1,0,3); + A = B + C; + // Should be [1,2,3,4] cout << A << endl; cout << A.getIndexAssignment(); @@ -63,8 +79,16 @@ TEST(linalg, matvec_mul) { Vector b("b", 2, dense); Matrix A("A", 2, 2, dense, dense); + b.insert(0,2); + b.insert(1,1); + + A.insert(0,0,1); + A.insert(0,1,3); + A.insert(1,1,2); + x = A*b; + // Should be [5,2] cout << x << endl; cout << x.getIndexAssignment(); @@ -77,6 +101,14 @@ TEST(linalg, vecmat_mul) { Vector b("b", 2, dense, false); Matrix A("A", 2, 2, dense, dense); + b.insert(0,3); + b.insert(1,-2); + + A.insert(0,0,5); + A.insert(0,1,2); + A.insert(1,0,-1); + + // Should be [17, 6] x = b * A; cout << x << endl; @@ -86,50 +118,20 @@ TEST(linalg, vecmat_mul) { ASSERT_TRUE(1); } -TEST(linalg, tensorapi) { - cout << "--- Beginning of TensorAPI test ---" << endl; - Tensor a({2,2}, dense); - Tensor b({2,3}, dense); - Tensor c({3,2}, dense); - - cout << "--- Initialized Tensors ---" << endl; - - b(0,0) = 2; - b(1,1) = 1; - b(0,1) = 2; - - cout << "--- Initializing c ---" << endl; - - c(0,0) = 2; - c(1,1) = 2; - - cout << "--- Declaring IndexVars ---" << endl; - - IndexVar i,j,k; - - // The original - /* a(i,j) = b(i,k) * c(k,j); */ - - // The broken-up version - cout << "--- Creating operand IndexExprs ---" << endl; - - IndexExpr tc = c(k,j); - IndexExpr tb = b(i,k); - - cout << "Pre-assignment" << endl; - a(i,j) = tb * tc; - cout << "Post-assignment" << endl; - - /* cout << a << endl; */ -} - TEST(linalg, inner_mul) { Scalar x("x", true); Vector b("b", 2, dense, false); Vector a("a", 2, dense, true); + b.insert(0,2); + b.insert(1,3); + + a.insert(0,-3); + a.insert(1,5); + x = b * a; + // Should be 9 cout << x << endl; cout << x.getIndexAssignment(); @@ -142,8 +144,15 @@ TEST(linalg, outer_mul) { Vector b("b", 2, dense, false); Vector a("a", 2, dense, true); + b.insert(0,2); + b.insert(1,3); + + a.insert(0,-3); + a.insert(1,5); + X = a * b; + // Should be [-6,-9,10,15] cout << X << endl; cout << X.getIndexAssignment(); @@ -156,11 +165,56 @@ TEST(linalg, rowvec_transpose) { Matrix A("A", 2, 2, dense, dense); Scalar a("a", true); + b.insert(0,2); + b.insert(1,5); + + A.insert(0,0,1); + A.insert(0,1,2); + A.insert(1,1,4); + a = transpose(transpose(b) * A * b); + // Should be 124 cout << a << endl; cout << a.getIndexAssignment(); ASSERT_TRUE(1); } + +TEST(linalg, tensorapi) { + cout << "--- Beginning of TensorAPI test ---" << endl; + Tensor a({2,2}, dense); + Tensor b({2,3}, dense); + Tensor c({3,2}, dense); + + cout << "--- Initialized Tensors ---" << endl; + + b(0,0) = 2; + b(1,1) = 1; + b(0,1) = 2; + + cout << "--- Initializing c ---" << endl; + + c(0,0) = 2; + c(1,1) = 2; + + cout << "--- Declaring IndexVars ---" << endl; + + IndexVar i,j,k; + + // The original + /* a(i,j) = b(i,k) * c(k,j); */ + + // The broken-up version + cout << "--- Creating operand IndexExprs ---" << endl; + + IndexExpr tc = c(k,j); + IndexExpr tb = b(i,k); + + cout << "Pre-assignment" << endl; + a(i,j) = tb * tc; + cout << "Post-assignment" << endl; + + /* cout << a << endl; */ +} From 98aedd509828d877cfde23fd828744e468b15dd0 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Thu, 12 Nov 2020 16:20:54 -0800 Subject: [PATCH 25/61] use ASSERT_EQ in some tests --- include/taco/linalg.h | 18 +++++++----------- test/tests-linalg.cpp | 23 ++++++++++++++++------- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 957c82f62..541c1636a 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -125,17 +125,6 @@ Matrix::Matrix(std::string name, Type tensorType, Format format) : Linalg // Definition of Read methods template CType Matrix::at(int coord_x, int coord_y) { - std::cout << "Name: " << name << std::endl; - std::cout << tensorBase << std::endl; - std::cout << "Matrix found a TBase " << tensorBase->getName() << std::endl; - std::cout << "Will print a coordinate" << std::endl; - - - // Check if this LinalgBase holds an assignment - if (this->assignment.ptr != NULL) { - std::cout << "This matrix is the result of an assignment" << std::endl; - } - return tensorBase->at({coord_x, coord_y}); } @@ -214,6 +203,11 @@ void Vector::insert(int coord, CType value) { tensorBase->insert({coord}, value); } +template +CType Vector::at(int coord) { + return tensorBase->at({coord}); +} + template class Scalar : public LinalgBase { std::string name; @@ -225,6 +219,8 @@ class Scalar : public LinalgBase { LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); } + + /* operator int() const { return */ }; template diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 8ce09ca16..a426bfb1a 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -43,9 +43,13 @@ TEST(linalg, matmul) { cout << A.getIndexAssignment() << endl; cout << "--- After getIndexAssignment on A ---" << endl; + ASSERT_EQ(A.at(0,0), 4); + ASSERT_EQ(A.at(0,1), 4); + ASSERT_EQ(A.at(1,0), 0); + ASSERT_EQ(A.at(1,1), 2); - // Get the values.. - map, double> vals = {{{0,0},4}, {{0,1},4}, {{1,1},2}}; + // TODO: Support this style of accessing and querying the values, too + /* map, double> vals = {{{0,0},4}, {{0,1},4}, {{1,1},2}}; */ /* for (auto val = A.beginTyped(); val != A.endTyped(); ++val) { */ /* ASSERT_TRUE(util::contains(vals, val->first.toVector())); */ /* ASSERT_EQ(vals.at(val->first.toVector()), val->second); */ @@ -71,7 +75,10 @@ TEST(linalg, tensorbase) { cout << A.getIndexAssignment(); - ASSERT_TRUE(1); + ASSERT_EQ(A.at(0,0), 1); + ASSERT_EQ(A.at(0,1), 2); + ASSERT_EQ(A.at(1,0), 3); + ASSERT_EQ(A.at(1,1), 4); } TEST(linalg, matvec_mul) { @@ -88,12 +95,13 @@ TEST(linalg, matvec_mul) { x = A*b; + ASSERT_EQ(x.at(0), 5); + ASSERT_EQ(x.at(1), 2); + // Should be [5,2] cout << x << endl; cout << x.getIndexAssignment(); - - ASSERT_TRUE(1); } TEST(linalg, vecmat_mul) { @@ -111,11 +119,12 @@ TEST(linalg, vecmat_mul) { // Should be [17, 6] x = b * A; + ASSERT_EQ(x.at(0), 17); + ASSERT_EQ(x.at(1), 6); + cout << x << endl; cout << x.getIndexAssignment(); - - ASSERT_TRUE(1); } TEST(linalg, inner_mul) { From c7b8c4aa245878c4fd78e4c168dc950fe6a56f37 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Mon, 16 Nov 2020 11:52:29 -0800 Subject: [PATCH 26/61] Add in changes to fix linalg_parser --- include/taco/linalg.h | 1 + .../taco/linalg_notation/linalg_notation.h | 4 +- .../linalg_notation/linalg_notation_printer.h | 1 + .../linalg_notation/linalg_notation_visitor.h | 4 + include/taco/parser/lexer.h | 2 + include/taco/parser/linalg_parser.h | 4 +- src/linalg.cpp | 9 ++ src/linalg_notation/linalg_notation.cpp | 8 +- .../linalg_notation_printer.cpp | 4 + src/lower/lowerer_impl.cpp | 2 + src/parser/lexer.cpp | 10 +++ src/parser/linalg_parser.cpp | 86 ++++++++++++++++--- src/parser/parser.cpp | 9 ++ src/tensor.cpp | 1 + test/tests-linalg.cpp | 19 ++++ 15 files changed, 148 insertions(+), 16 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 541c1636a..e45fcc1b8 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -35,6 +35,7 @@ class LinalgBase : public LinalgExpr { LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec = false); LinalgBase(std::string name, Type tensorType, bool isColVec = false); LinalgBase(std::string name, Type tensorType, Format format, bool isColVec = false); + LinalgBase(TensorBase* tensor, bool isColVec = false); /// [LINALG NOTATION] LinalgAssignment operator=(const LinalgExpr &expr); diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index 4b94ed560..8e761c1f3 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -74,10 +74,12 @@ class LinalgExpr : public util::IntrusivePtr { /// ``` /// A(i,j) = b; /// ``` - LinalgExpr(TensorVar); + explicit LinalgExpr(TensorVar); LinalgExpr(TensorVar, bool isColVec, TensorBase* tensorBase); + explicit LinalgExpr(TensorBase* _tensorBase, bool isColVec=false); + LinalgExpr(TensorVar var, bool isColVec); /// Consturct an integer literal. /// ``` diff --git a/include/taco/linalg_notation/linalg_notation_printer.h b/include/taco/linalg_notation/linalg_notation_printer.h index 6c32bfa92..7a4754c2d 100644 --- a/include/taco/linalg_notation/linalg_notation_printer.h +++ b/include/taco/linalg_notation/linalg_notation_printer.h @@ -17,6 +17,7 @@ class LinalgNotationPrinter : public LinalgNotationVisitorStrict { // Scalar Expressions void visit(const LinalgVarNode*); + void visit(const LinalgTensorBaseNode*); void visit(const LinalgLiteralNode*); void visit(const LinalgNegNode*); void visit(const LinalgAddNode*); diff --git a/include/taco/linalg_notation/linalg_notation_visitor.h b/include/taco/linalg_notation/linalg_notation_visitor.h index a53178fca..001dbdd7a 100644 --- a/include/taco/linalg_notation/linalg_notation_visitor.h +++ b/include/taco/linalg_notation/linalg_notation_visitor.h @@ -8,6 +8,7 @@ class LinalgStmt; class TensorVar; struct LinalgVarNode; +struct LinalgTensorBaseNode; struct LinalgLiteralNode; struct LinalgNegNode; struct LinalgTransposeNode; @@ -31,6 +32,8 @@ class LinalgExprVisitorStrict { virtual void visit(const LinalgVarNode *) = 0; + virtual void visit(const LinalgTensorBaseNode*) = 0; + virtual void visit(const LinalgLiteralNode *) = 0; virtual void visit(const LinalgNegNode *) = 0; @@ -76,6 +79,7 @@ class LinalgNotationVisitor : public LinalgNotationVisitorStrict { // Index Expressions virtual void visit(const LinalgVarNode* node); + virtual void visit(const LinalgTensorBaseNode* node); virtual void visit(const LinalgLiteralNode* node); virtual void visit(const LinalgNegNode* node); virtual void visit(const LinalgAddNode* node); diff --git a/include/taco/parser/lexer.h b/include/taco/parser/lexer.h index 30b2d05f8..c5a626cac 100644 --- a/include/taco/parser/lexer.h +++ b/include/taco/parser/lexer.h @@ -24,6 +24,8 @@ enum class Token { div, eq, caretT, + elemMul, + transpose, eot, // End of tokens error }; diff --git a/include/taco/parser/linalg_parser.h b/include/taco/parser/linalg_parser.h index 86a801f44..a667d8423 100644 --- a/include/taco/parser/linalg_parser.h +++ b/include/taco/parser/linalg_parser.h @@ -68,7 +68,7 @@ class LinalgParser : public AbstractParser { std::vector names; /// assign ::= var '=' expr - LinalgAssignment parseAssign(); + LinalgBase parseAssign(); /// expr ::= term {('+' | '-') term} LinalgExpr parseExpr(); @@ -86,6 +86,8 @@ class LinalgParser : public AbstractParser { /// | scalar LinalgExpr parseFinal(); + LinalgExpr parseCall(); + /// var ::= identifier LinalgBase parseVar(); diff --git a/src/linalg.cpp b/src/linalg.cpp index 16009ca95..7a18c144d 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -21,6 +21,15 @@ LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector } } +LinalgBase::LinalgBase(TensorBase* tbase, bool isColVec) : + LinalgExpr(tbase, isColVec), name(tbase->getName()), + tensorType(tbase->getTensorVar().getType()), idxcount(0) { + if(isa(ptr)) { + /* cout << "LinalgBase constructor - LinalgTensorBaseNode" << endl; */ + cout << this->tensorBase->getName() << endl; + } +} + LinalgBase::LinalgBase(string name, Type tensorType, Format format, bool isColVec) : name(name), tensorType(tensorType), idxcount(0), LinalgExpr(TensorVar(name, tensorType, format), isColVec) { } diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index 654d83b1b..e5ed57cff 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -35,6 +35,10 @@ LinalgExpr::LinalgExpr(TensorVar var, bool isColVec, TensorBase* _tensorBase) : tensorBase = _tensorBase; } +LinalgExpr::LinalgExpr(TensorBase* _tensorBase, bool isColVec) : LinalgExpr(new LinalgTensorBaseNode(_tensorBase->getTensorVar(), _tensorBase, isColVec)) { + +} + LinalgExpr::LinalgExpr(TensorVar var, bool isColVec) : LinalgExpr(new LinalgVarNode(var, isColVec)) { } @@ -151,7 +155,9 @@ LinalgExpr operator*(const LinalgExpr &lhs, const LinalgExpr &rhs) { order = 2; } else { - taco_uassert(lhs.getOrder() != rhs.getOrder()) << "RHS and LHS order/vector type do not match " + taco_uassert(lhs.getOrder() != rhs.getOrder()) << "LHS (" << lhs.getOrder() << "," << lhs.isColVector() + << ") and RHS (" << rhs.getOrder() << "," << rhs.isColVector() + << ") order/vector type do not match " "for linear algebra matrix multiply" << endl; } return new LinalgMatMulNode(lhs, rhs, order, isColVec); diff --git a/src/linalg_notation/linalg_notation_printer.cpp b/src/linalg_notation/linalg_notation_printer.cpp index 7dcb246e9..0c8041895 100644 --- a/src/linalg_notation/linalg_notation_printer.cpp +++ b/src/linalg_notation/linalg_notation_printer.cpp @@ -22,6 +22,10 @@ void LinalgNotationPrinter::visit(const LinalgVarNode* op) { os << op->tensorVar.getName(); } +void LinalgNotationPrinter::visit(const LinalgTensorBaseNode* op) { + os << op->tensorBase->getName(); +} + void LinalgNotationPrinter::visit(const LinalgLiteralNode* op) { switch (op->getDataType().getKind()) { case Datatype::Bool: diff --git a/src/lower/lowerer_impl.cpp b/src/lower/lowerer_impl.cpp index 7a0af13b3..79d8a3362 100644 --- a/src/lower/lowerer_impl.cpp +++ b/src/lower/lowerer_impl.cpp @@ -1378,7 +1378,9 @@ Stmt LowererImpl::lowerSuchThat(SuchThat suchThat) { Expr LowererImpl::lowerAccess(Access access) { + TensorVar var = access.getTensorVar(); + cout << "Lower access " << var.getName() << endl; if (isScalar(var.getType())) { return getTensorVar(var); diff --git a/src/parser/lexer.cpp b/src/parser/lexer.cpp index e462c4023..8b0913dd3 100644 --- a/src/parser/lexer.cpp +++ b/src/parser/lexer.cpp @@ -31,6 +31,10 @@ Token Lexer::getToken() { lastChar = getNextChar(); return Token::complex_scalar; } + if (identifier == "transpose") + return Token::transpose; + if (identifier == "elemMul") + return Token::elemMul; return Token::identifier; } if(isdigit(lastChar)) { @@ -171,6 +175,12 @@ std::string Lexer::tokenString(const Token& token) { case Token::caretT: str = "^T"; break; + case Token::elemMul: + str = "elemMul"; + break; + case Token::transpose: + str = "transpose"; + break; case Token::eot: default: taco_ierror; diff --git a/src/parser/linalg_parser.cpp b/src/parser/linalg_parser.cpp index f25429e09..7cf530e6f 100644 --- a/src/parser/linalg_parser.cpp +++ b/src/parser/linalg_parser.cpp @@ -62,20 +62,23 @@ struct LinalgParser::Content { } void LinalgParser::parse() { - content->resultTensor = parseAssign().getLhs(); + LinalgBase linalgBase = parseAssign(); + linalgBase.rewrite(); + content->resultTensor = *linalgBase.tensorBase; } const TensorBase& LinalgParser::getResultTensor() const { return content->resultTensor; } -LinalgAssignment LinalgParser::parseAssign() { +LinalgBase LinalgParser::parseAssign() { content->parsingLhs = true; cout << "parsing lhs" << endl; - LinalgExpr lhs = parseVar(); - const TensorVar var = to(lhs.get())->tensorVar; + LinalgBase lhs = parseVar(); + cout << "end parsing lhs" << endl; + const TensorVar var = lhs.tensorBase->getTensorVar(); cout << "Result of parsing LHS" << endl; - cout << lhs << endl; + cout << var.getName() << endl; content->parsingLhs = false; cout << "parsing rhs" << endl; @@ -83,8 +86,9 @@ LinalgAssignment LinalgParser::parseAssign() { LinalgExpr rhs = parseExpr(); cout << "Result of parsing RHS" << endl; cout << rhs << endl; + lhs = rhs; - return LinalgAssignment(var, rhs); + return lhs; } LinalgExpr LinalgParser::parseExpr() { @@ -108,6 +112,8 @@ LinalgExpr LinalgParser::parseExpr() { } LinalgExpr LinalgParser::parseTerm() { + + LinalgExpr term = parseFactor(); while (content->currentToken == Token::mul || content->currentToken == Token::div) { @@ -122,6 +128,7 @@ LinalgExpr LinalgParser::parseTerm() { term = term / parseFactor(); break; } + default: taco_unreachable; } @@ -137,19 +144,29 @@ LinalgExpr LinalgParser::parseFactor() { consume(Token::rparen); return factor; } - case Token::sub: + case Token::sub: { consume(Token::sub); - return new LinalgNegNode(parseFactor()); + return -parseFactor(); + } + case Token::transpose: { + consume(Token::transpose); + consume(Token::lparen); + LinalgExpr factor = parseExpr(); + consume(Token::rparen); + return transpose(factor); + } default: break; } - LinalgExpr final = parseFinal(); if (content->currentToken == Token::caretT) { + LinalgExpr factor = parseFactor(); consume(Token::caretT); - return new LinalgTransposeNode(final); + return transpose(factor); } + + LinalgExpr final = parseFinal(); return final; } @@ -185,12 +202,38 @@ LinalgExpr LinalgParser::parseFinal() { return LinalgExpr(float_value); } default: - return parseVar(); + return parseCall(); + } +} + +LinalgExpr LinalgParser::parseCall() { + switch (content->currentToken) { + case Token::elemMul: { + consume(Token::elemMul); + consume(Token::lparen); + LinalgExpr term = parseExpr(); + consume(Token::comma); + term = elemMul(term, parseExpr()); + consume(Token::rparen); + return term; + } + case Token::transpose: { + consume(Token::transpose); + consume(Token::lparen); + LinalgExpr term = parseExpr(); + consume(Token::rparen); + return transpose(term); + } + default: + break; } + return parseVar(); } LinalgBase LinalgParser::parseVar() { + if(content->currentToken != Token::identifier) { + cout << currentTokenString(); throw ParseError("Expected linalg name"); } string tensorName = content->lexer.getIdentifier(); @@ -199,12 +242,27 @@ LinalgBase LinalgParser::parseVar() { names.push_back(tensorName); size_t order = 0; + bool isColVec = false; // LinalgParser: By default assume capital variables are Matrices and lower case variables are vectors if (isupper(tensorName.at(0))) { order = 2; } else { order = 1; + isColVec = true; } + + if (content->formats.find(tensorName) != content->formats.end()) { + + if (content->tensorDimensions.find(tensorName) != content->tensorDimensions.end()) + taco_uassert(content->formats.at(tensorName).getOrder() == (int)content->tensorDimensions.at(tensorName).size()) + << "Tensor format and tensor dimensions must match" << endl; + + order = content->formats.at(tensorName).getOrder(); + } else { + if (content->tensorDimensions.find(tensorName) != content->tensorDimensions.end()) + order = content->tensorDimensions.at(tensorName).size(); + } + cout << order << endl; Format format; @@ -239,7 +297,6 @@ LinalgBase LinalgParser::parseVar() { dataType = content->dataTypes.at(tensorName); } tensor = TensorBase(tensorName,dataType,tensorDimensions,format); - cout << tensor << endl; for (size_t i = 0; i < tensorDimensions.size(); i++) { if (modesWithDefaults[i]) { content->modesWithDefaults.insert({tensor.getTensorVar(), i}); @@ -248,7 +305,10 @@ LinalgBase LinalgParser::parseVar() { content->tensors.insert({tensorName,tensor}); } - return LinalgBase(tensor.getName(),tensor.getComponentType(), tensor.getFormat() ); + LinalgBase resultLinalg(tensor.getName(), tensor.getTensorVar().getType(), tensor.getComponentType(), + tensor.getDimensions(), tensor.getFormat(), isColVec); + return resultLinalg; + //return LinalgBase(tensor.getName(), tensor.getComponentType(), tensor.getFormat() ); } vector LinalgParser::getUniqueIndices(size_t order) { diff --git a/src/parser/parser.cpp b/src/parser/parser.cpp index 472914c60..ea7ed6f89 100644 --- a/src/parser/parser.cpp +++ b/src/parser/parser.cpp @@ -54,6 +54,15 @@ Parser::Parser(string expression, const map& formats, content->defaultDimension = defaultDimension; content->tensors = tensors; content->dataTypes = dataTypes; + + cout << "Parser Constructor" << endl; + for (auto& d: tensorDimensions) { + cout << d.first << ": "; + for (auto i: d.second) { + cout << i << ", "<< endl; + } + } + nextToken(); } diff --git a/src/tensor.cpp b/src/tensor.cpp index cce0d726a..b61098854 100644 --- a/src/tensor.cpp +++ b/src/tensor.cpp @@ -104,6 +104,7 @@ static Format initFormat(Format format) { TensorBase::TensorBase(string name, Datatype ctype, vector dimensions, Format format) : content(new Content(name, ctype, dimensions, initFormat(format))) { + cout << name << endl; taco_uassert((size_t)format.getOrder() == dimensions.size()) << "The number of format mode types (" << format.getOrder() << ") " << "must match the tensor order (" << dimensions.size() << ")."; diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index a426bfb1a..4999ced81 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -145,6 +145,7 @@ TEST(linalg, inner_mul) { cout << x.getIndexAssignment(); + ASSERT_TRUE(1); } @@ -166,6 +167,8 @@ TEST(linalg, outer_mul) { cout << X.getIndexAssignment(); + cout << X; + ASSERT_TRUE(1); } @@ -227,3 +230,19 @@ TEST(linalg, tensorapi) { /* cout << a << endl; */ } + +TEST(linalg, complex_expr) { + Matrix A("A", 2, 2, dense, dense); + Matrix B("B", 2, 2, dense, dense); + Matrix C("C", 2, 2, dense, dense); + Matrix D("D", 2, 2, dense, dense); + Matrix E("D", 2, 2, dense, dense); + + A = E*elemMul(B+C, D); + + cout << A << endl; + + cout << A.getIndexAssignment(); + + ASSERT_TRUE(1); +} From 8bbb892f31a66f6541ffd8bd1ef2e575cfa416c0 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Mon, 16 Nov 2020 15:42:03 -0800 Subject: [PATCH 27/61] Add in scalar support for linalg_parser --- include/taco/parser/linalg_parser.h | 5 ++-- src/linalg.cpp | 9 ++++++- src/linalg_notation/linalg_notation.cpp | 22 +++++++++++++-- src/parser/linalg_parser.cpp | 20 +++++++++++++- tools/taco.cpp | 36 ++++++++++++++++++++++++- 5 files changed, 85 insertions(+), 7 deletions(-) diff --git a/include/taco/parser/linalg_parser.h b/include/taco/parser/linalg_parser.h index a667d8423..892db30db 100644 --- a/include/taco/parser/linalg_parser.h +++ b/include/taco/parser/linalg_parser.h @@ -33,8 +33,9 @@ class LinalgParser : public AbstractParser { /// Create a parser object from linalg notation LinalgParser(std::string expression, const std::map& formats, const std::map& dataTypes, - const std::map>& tensorDimensions, - const std::map& tensors, + const std::map>& tensorDimensions, + const std::map& tensors, + const std::map& linalgShapes, const std::map& linalgVecShapes, int defaultDimension=5); /// Parses the linalg expression and sets the result tensor to the result of that expression diff --git a/src/linalg.cpp b/src/linalg.cpp index 7a18c144d..9b5a9725a 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -42,7 +42,8 @@ LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { cout << var.getOrder() << endl; cout << expr.getOrder() << endl; - taco_uassert(var.getOrder() == expr.getOrder()) << "RHS and LHS of linalg assignment must match order"; + taco_uassert(var.getOrder() == expr.getOrder()) << "LHS (" << var.getOrder() << ") and RHS (" << expr.getOrder() + << ") of linalg assignment must match order"; if (var.getOrder() == 1) taco_uassert(this->isColVector() == expr.isColVector()) << "RHS and LHS of linalg assignment must match vector type"; @@ -119,6 +120,12 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { else if (mul->a.getOrder() == 1 && mul->a.isColVector() && mul->b.getOrder() == 1) { indicesA = {indices[0]}; indicesB = {indices[1]}; + } else if (mul->a.getOrder() == 0) { + indicesA = {}; + indicesB = indices; + } else if (mul->b.getOrder() == 0) { + indicesA = indices; + indicesB = {}; } else { indicesA = {index}; indicesB = {index}; diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index e5ed57cff..c76c338b3 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -109,8 +109,9 @@ std::ostream& operator<<(std::ostream& os, const LinalgExpr& expr) { } void checkCompatibleShape(const LinalgExpr &lhs, const LinalgExpr &rhs) { - taco_uassert(lhs.getOrder() == rhs.getOrder()) << "RHS and LHS order do not match for linear algebra " - "binary operation" << endl; + if (lhs.getOrder() != 0 && rhs.getOrder() != 0) + taco_uassert(lhs.getOrder() == rhs.getOrder()) << "RHS and LHS order do not match for linear algebra " + "binary operation" << endl; if (lhs.getOrder() == 1) taco_uassert(lhs.isColVector() == rhs.isColVector()) << "RHS and LHS vector type do not match for linear algebra " "binary operation" << endl; @@ -122,11 +123,15 @@ LinalgExpr operator-(const LinalgExpr &expr) { LinalgExpr operator+(const LinalgExpr &lhs, const LinalgExpr &rhs) { checkCompatibleShape(lhs, rhs); + if (lhs.getOrder() == 0) + return new LinalgAddNode(lhs, rhs, rhs.getOrder(), rhs.isColVector()); return new LinalgAddNode(lhs, rhs, lhs.getOrder(), lhs.isColVector()); } LinalgExpr operator-(const LinalgExpr &lhs, const LinalgExpr &rhs) { checkCompatibleShape(lhs, rhs); + if (lhs.getOrder() == 0) + return new LinalgSubNode(lhs, rhs, rhs.getOrder(), rhs.isColVector()); return new LinalgSubNode(lhs, rhs, lhs.getOrder(), lhs.isColVector()); } @@ -154,6 +159,15 @@ LinalgExpr operator*(const LinalgExpr &lhs, const LinalgExpr &rhs) { else if (lhs.getOrder() == 1 && lhs.isColVector() && rhs.getOrder() == 1 && !rhs.isColVector()) { order = 2; } + // Scalar product + else if (lhs.getOrder() == 0) { + order = rhs.getOrder(); + isColVec = rhs.isColVector(); + } + else if (rhs.getOrder() == 0) { + order = lhs.getOrder(); + isColVec = lhs.isColVector(); + } else { taco_uassert(lhs.getOrder() != rhs.getOrder()) << "LHS (" << lhs.getOrder() << "," << lhs.isColVector() << ") and RHS (" << rhs.getOrder() << "," << rhs.isColVector() @@ -165,11 +179,15 @@ LinalgExpr operator*(const LinalgExpr &lhs, const LinalgExpr &rhs) { LinalgExpr operator/(const LinalgExpr &lhs, const LinalgExpr &rhs) { checkCompatibleShape(lhs, rhs); + if (lhs.getOrder() == 0) + return new LinalgDivNode(lhs, rhs, rhs.getOrder(), rhs.isColVector()); return new LinalgDivNode(lhs, rhs, lhs.getOrder(), lhs.isColVector()); } LinalgExpr elemMul(const LinalgExpr &lhs, const LinalgExpr &rhs) { checkCompatibleShape(lhs, rhs); + if (lhs.getOrder() == 0) + return new LinalgElemMulNode(lhs, rhs, rhs.getOrder(), rhs.isColVector()); return new LinalgElemMulNode(lhs, rhs, lhs.getOrder(), lhs.isColVector()); } diff --git a/src/parser/linalg_parser.cpp b/src/parser/linalg_parser.cpp index 7cf530e6f..6430edbce 100644 --- a/src/parser/linalg_parser.cpp +++ b/src/parser/linalg_parser.cpp @@ -38,6 +38,8 @@ struct LinalgParser::Content { bool parsingLhs = false; map indexVars; + std::map linalgShapes; + std::map linalgVecShapes; TensorBase resultTensor; map tensors; @@ -47,6 +49,7 @@ struct LinalgParser::Content { const map& dataTypes, const map>& tensorDimensions, const std::map& tensors, + const std::map& linalgShapes, const std::map& linalgVecShapes, int defaultDimension) : content(new LinalgParser::Content) { content->lexer = Lexer(expression); @@ -56,6 +59,8 @@ struct LinalgParser::Content { content->tensors = tensors; content->dataTypes = dataTypes; + content->linalgShapes = linalgShapes; + content->linalgVecShapes = linalgVecShapes; idxcount = 0; nextToken(); @@ -251,7 +256,20 @@ LinalgBase LinalgParser::parseVar() { isColVec = true; } - if (content->formats.find(tensorName) != content->formats.end()) { + if (content->linalgShapes.find(tensorName) != content->linalgShapes.end()) { + if (content->formats.find(tensorName) != content->formats.end()) { + taco_uassert(content->linalgShapes.at(tensorName) == content->formats.at(tensorName).getOrder()) + << "Linalg shape and tensor format must match" << endl; + + } + if (content->tensorDimensions.find(tensorName) != content->tensorDimensions.end()) + taco_uassert(content->linalgShapes.at(tensorName) == (int)content->tensorDimensions.at(tensorName).size()) + << "Linalg shape and the number of tensor dimensions must match" << endl; + + order = content->linalgShapes.at(tensorName); + isColVec = content->linalgVecShapes.at(tensorName); + } + else if (content->formats.find(tensorName) != content->formats.end()) { if (content->tensorDimensions.find(tensorName) != content->tensorDimensions.end()) taco_uassert(content->formats.at(tensorName).getOrder() == (int)content->tensorDimensions.at(tensorName).size()) diff --git a/tools/taco.cpp b/tools/taco.cpp index 926309784..50a915fc8 100644 --- a/tools/taco.cpp +++ b/tools/taco.cpp @@ -192,6 +192,12 @@ static void printUsageInfo() { printFlag("nthreads", "Specify number of threads for parallel execution"); cout << endl; printFlag("linalg", "Specify if the input should be in Linear Algebra (not index) Notation"); + cout << endl; + printFlag("k=:,", + "[LINALG NOTATION ONLY -linalg] Specify the shape of the linear algebra var. " + "Specify the number of dimensions, shape, (0, 1, or 2) and an optional flag of " + "if the var is a column vector for the cases where order == 1 (1 or 0) " + "Examples: A:2, A:0, A:1,1, A:1,0"); } static int reportError(string errorMessage, int errorCode) { @@ -522,6 +528,9 @@ int main(int argc, char* argv[]) { string writeTimeFilename; vector declaredTensors; + map linalgShapes; + map linalgVecShapes; + vector kernelFilenames; vector scheduleCommands; @@ -840,6 +849,31 @@ int main(int argc, char* argv[]) { else if ("-linalg" == argName) { linalg = true; } + else if ("-k" == argName) { + vector descriptor = util::split(argValue, ":"); + string tensorName = descriptor[0]; + vector shapes = util::split(descriptor[1], ","); + + int linalgShape = 0; + bool linalgVecShape = false; + if (shapes.size() == 1) { + linalgShape = std::stoi(shapes[0]); + taco_uassert(linalgShape >= 0 && linalgShape <= 2) << "Shape is not compatible with linalg notation" << endl; + if (linalgShape == 1) + linalgVecShape = true; + } else if (shapes.size() == 2) { + linalgShape = std::stoi(shapes[0]); + taco_uassert(linalgShape >= 0 && linalgShape <= 2) << "Shape is not compatible with linalg notation" << endl; + linalgVecShape = (bool) std::stoi(shapes[1]); + taco_uassert(linalgVecShape == 0 || linalgVecShape == 1) << "Vector type is not compatible with linalg notation" << endl; + if (linalgShape != 1 ) { + linalgVecShape = false; + } + } + linalgShapes.insert({tensorName, linalgShape}); + linalgVecShapes.insert({tensorName, linalgVecShape}); + + } else { if (exprStr.size() != 0) { printUsageInfo(); @@ -891,7 +925,7 @@ int main(int argc, char* argv[]) { TensorBase tensor; parser::AbstractParser *parser; if (linalg) - parser = new parser::LinalgParser(exprStr, formats, dataTypes, tensorsDimensions, loadedTensors, 42); + parser = new parser::LinalgParser(exprStr, formats, dataTypes, tensorsDimensions, loadedTensors, linalgShapes, linalgVecShapes, 42); else parser = new parser::Parser(exprStr, formats, dataTypes, tensorsDimensions, loadedTensors, 42); From 14ce8e930bdd002db6450c6b2df9368d940a1c38 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Wed, 18 Nov 2020 22:26:27 -0800 Subject: [PATCH 28/61] WIP blocking --- include/taco/linalg.h | 41 +++++++++- include/taco/tensor.h | 2 + src/linalg.cpp | 174 +++++++++++++++++++++++++++++++++++++++++- test/tests-linalg.cpp | 54 +++++++++++++ tools/taco.cpp | 7 +- 5 files changed, 270 insertions(+), 8 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index e45fcc1b8..924c5675b 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -24,6 +24,7 @@ class LinalgBase : public LinalgExpr { IndexStmt indexAssignment; int idxcount; + int block; IndexExpr rewrite(LinalgExpr linalg, std::vector indices); @@ -33,6 +34,7 @@ class LinalgBase : public LinalgExpr { public: LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec = false); + LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format, int block, bool isColVec = false); LinalgBase(std::string name, Type tensorType, bool isColVec = false); LinalgBase(std::string name, Type tensorType, Format format, bool isColVec = false); LinalgBase(TensorBase* tensor, bool isColVec = false); @@ -44,6 +46,7 @@ class LinalgBase : public LinalgExpr { const IndexStmt getIndexAssignment() const; + bool isBlocked() const; IndexStmt rewrite(); @@ -79,15 +82,21 @@ class Matrix : public LinalgBase { Matrix(std::string name, Type tensorType, Format format); + Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2, int block_size); + LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); } + bool isBlocked() { + return LinalgBase::isBlocked(); + } // Support some Read methods CType at(int coord_x, int coord_y); // And a Write method void insert(int coord_x, int coord_y, CType value); + void insert(int coord_x, int coord_y, int coord_bx, int coord_by, CType value); }; @@ -123,6 +132,14 @@ Matrix::Matrix(std::string name, Type tensorType) : LinalgBase(name, tens template Matrix::Matrix(std::string name, Type tensorType, Format format) : LinalgBase(name, tensorType, format) {} +template +Matrix::Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2, int block_size) : + LinalgBase(name, Type(type(), {dim1/block_size, dim2/block_size, block_size, block_size}), type(), + {(int)dim1/block_size, (int)dim2/block_size, block_size, block_size}, Format({format1, format2, dense, dense}), block_size, false) { + taco_uassert(block_size >= 0) << "Block size must be non-negative" << std::endl; + taco_uassert(dim1 % block_size == 0 && dim2 % block_size == 0) << "Dimensions must be a multiple of block size" << std::endl; + } + // Definition of Read methods template CType Matrix::at(int coord_x, int coord_y) { @@ -132,9 +149,26 @@ CType Matrix::at(int coord_x, int coord_y) { // Definition of Write methods template void Matrix::insert(int coord_x, int coord_y, CType value) { - tensorBase->insert({coord_x, coord_y}, value); + std::cout << "Blocked " << isBlocked() << std::endl; + std::cout << this->block << std::endl; + if (isBlocked()) { + std::cout << "blocked matrix" << std::endl; + for (int bx = 0; bx < block; bx++) + for (int by = 0; by < block; by++) + tensorBase->insert({coord_x, coord_y, bx, by}, value); + } else { + tensorBase->insert({coord_x, coord_y}, value); + } } +// Blocked Write Method +template +void Matrix::insert(int coord_x, int coord_y, int coord_bx, int coord_by, CType value) { + std::cout << "blocked matrix" << std::endl; + std::cout << "Blocked " << isBlocked() << std::endl; + std::cout << this->block << std::endl; + tensorBase->insert({coord_x, coord_y, coord_bx, coord_by}, value); +} // ------------------------------------------------------------ // Vector class // ------------------------------------------------------------ @@ -156,6 +190,8 @@ class Vector : public LinalgBase { Vector(std::string name, Type type, ModeFormat format, bool isColVec = true); + Vector(std::string name, Type type, ModeFormat format, int blocking, bool isColVec = true); + LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); } @@ -165,6 +201,9 @@ class Vector : public LinalgBase { // Support some Read methods too CType at(int coord); + + // Support Read Method with Blocking + CType at(int bcoord0, int bcoord1); }; // ------------------------------------------------------------ diff --git a/include/taco/tensor.h b/include/taco/tensor.h index 86ea4120c..08feeb0ab 100644 --- a/include/taco/tensor.h +++ b/include/taco/tensor.h @@ -884,6 +884,8 @@ TensorBase::TensorBase(CType val) : TensorBase(type()) { template void TensorBase::insert(const std::initializer_list& coordinate, CType value) { + std::cout << coordinate.size(); + std::cout << getOrder(); taco_uassert(coordinate.size() == (size_t)getOrder()) << "Wrong number of indices"; taco_uassert(getComponentType() == type()) << diff --git a/src/linalg.cpp b/src/linalg.cpp index 9b5a9725a..218493a9c 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -14,13 +14,21 @@ LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : name(name) LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec) : LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), - tensorType(tensorType), idxcount(0) { + tensorType(tensorType), idxcount(0), block(0) { if(isa(ptr)) { /* cout << "LinalgBase constructor - LinalgTensorBaseNode" << endl; */ cout << this->tensorBase->getName() << endl; } } +LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, int block, bool isColVec) : + LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), + tensorType(tensorType), idxcount(0), block(block) { + if(isa(ptr)) { + cout << this->tensorBase->getName() << endl; + } +} + LinalgBase::LinalgBase(TensorBase* tbase, bool isColVec) : LinalgExpr(tbase, isColVec), name(tbase->getName()), tensorType(tbase->getTensorVar().getType()), idxcount(0) { @@ -37,8 +45,14 @@ LinalgBase::LinalgBase(string name, Type tensorType, Format format, bool isColVe LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { /* cout << "LinalgBase operator= on " << name << endl; */ - taco_iassert(isa(this->ptr)); - TensorVar var = to(this->get())->tensorVar; + TensorVar var; + if (isa(this->ptr)) { + var = to(this->get())->tensorVar; + } else if (isa(this->ptr)) { + var = to(this->get())->tensorVar; + } else { + taco_ierror << "LinalgBase LHS must be a VarNode or TensorBaseNode"; + } cout << var.getOrder() << endl; cout << expr.getOrder() << endl; @@ -84,6 +98,139 @@ IndexVar LinalgBase::getUniqueIndex() { return result; } +IndexExpr LinalgBase::blockedRewrite(LinalgExpr linalg, vector indices) { + if (isa(linalg.get())) { + auto sub = to(linalg.get()); + IndexExpr indexA = rewrite(sub->a, indices); + IndexExpr indexB = rewrite(sub->b, indices); + return new SubNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto add = to(linalg.get()); + IndexExpr indexA = rewrite(add->a, indices); + IndexExpr indexB = rewrite(add->b, indices); + return new AddNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto mul = to(linalg.get()); + IndexExpr indexA = rewrite(mul->a, indices); + IndexExpr indexB = rewrite(mul->b, indices); + return new MulNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto mul = to(linalg.get()); + IndexVar index = getUniqueIndex(); + vector indicesA; + vector indicesB; + if (mul->a.getOrder() == 2 && mul->b.getOrder() == 2) { + indicesA = {indices[0], index}; + indicesB = {index, indices[1]}; + } + else if (mul->a.getOrder() == 1 && mul->b.getOrder() == 2) { + indicesA = {index}; + indicesB = {index, indices[0]}; + } + else if (mul->a.getOrder() == 2 && mul->b.getOrder() == 1) { + indicesA = {indices[0], index}; + + indicesB = {index}; + } + else if (mul->a.getOrder() == 1 && mul->a.isColVector() && mul->b.getOrder() == 1) { + indicesA = {indices[0]}; + indicesB = {indices[1]}; + } else if (mul->a.getOrder() == 0) { + indicesA = {}; + indicesB = indices; + } else if (mul->b.getOrder() == 0) { + indicesA = indices; + indicesB = {}; + } else { + indicesA = {index}; + indicesB = {index}; + } + IndexExpr indexA = rewrite(mul->a, indicesA); + IndexExpr indexB = rewrite(mul->b, indicesB); + return new MulNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto div = to(linalg.get()); + IndexExpr indexA = rewrite(div->a, indices); + IndexExpr indexB = rewrite(div->b, indices); + return new DivNode(indexA, indexB); + } else if (isa(linalg.get())) { + auto neg = to(linalg.get()); + IndexExpr index = rewrite(neg->a, indices); + return new NegNode(index); + } else if (isa(linalg.get())) { + auto transpose = to(linalg.get()); + if (transpose->a.getOrder() == 2) { + return rewrite(transpose->a, {indices[1], indices[0]}); + } + else if (transpose->a.getOrder() == 1) { + return rewrite(transpose->a, {indices[0]}); + } + return rewrite(transpose->a, {}); + } else if (isa(linalg.get())) { + auto lit = to(linalg.get()); + + LiteralNode* value; + switch (lit->getDataType().getKind()) { + case Datatype::Bool: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt8: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt16: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt128: + taco_not_supported_yet; + break; + case Datatype::Int8: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int16: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int128: + taco_not_supported_yet; + break; + case Datatype::Float32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Float64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Complex64: + value = new LiteralNode(lit->getVal>()); + break; + case Datatype::Complex128: + value = new LiteralNode(lit->getVal>()); + break; + case Datatype::Undefined: + taco_uerror << "unsupported Datatype"; + break; + } + return value; + } else if (isa(linalg.get())) { + auto var = to(linalg.get()); + return new AccessNode(var->tensorVar, indices); + } else if (isa(linalg.get())) { + /* cout << "LinalgBase::rewrite -- got a tensorbasenode " << linalg.tensorBase->getName() << endl; */ + return linalg.tensorBase->operator()(indices); + } + return IndexExpr(); +} + IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { if (isa(linalg.get())) { auto sub = to(linalg.get()); @@ -229,11 +376,26 @@ IndexStmt LinalgBase::rewrite() { if (tensor.getOrder() == 1) { indices.push_back(IndexVar("i")); } else if (tensor.getOrder() == 2) { + if (this->isBlocked()) { + indices.push_back(IndexVar("i")); + indices.push_back(IndexVar("ib")); + } else { + indices.push_back(IndexVar("i")); + indices.push_back(IndexVar("j")); + } + } else if (tensor.getOrder() == 4) { indices.push_back(IndexVar("i")); + indices.push_back(IndexVar("ib")); indices.push_back(IndexVar("j")); + indices.push_back(IndexVar("jb")); } + Access lhs = Access(tensor, indices); - IndexExpr rhs = rewrite(this->assignment.getRhs(), indices); + IndexExpr rhs; + if (isBlocked()) + rhs = blockedRewrite(this->assignment.getRhs(), indices); + else + rhs = rewrite(this->assignment.getRhs(), indices); cout << "rhs done here" << endl; if(this->tensorBase != nullptr) { @@ -250,6 +412,10 @@ IndexStmt LinalgBase::rewrite() { return IndexStmt(); } +bool LinalgBase::isBlocked() const{ + return this->block != 0; +} + std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg) { LinalgAssignment assignment = linalg.getAssignment(); diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 4999ced81..325d92fd5 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -246,3 +246,57 @@ TEST(linalg, complex_expr) { ASSERT_TRUE(1); } + +TEST(linalg, blocking_matmul) { + Matrix B("B", 16, 16, dense, dense, 4); + Matrix C("C", 16, 16, dense, dense, 4); + Matrix A("A", 16, 16, dense, dense, 4); + + cout << "--- Before inserting ---" << endl; + B.insert(0,0,0, 0,2); + B.insert(1,1, 0,0,1); + B.insert(0,1, 0,0,2); + + C.insert(0,0,2); + C.insert(1,1,2); + cout << "--- After inserting ---" << endl; + cout << "B: " << B << endl; + cout << "C: " << C << endl; + cout << "--- Before Expression ---" << endl; + A = B * C; +// cout << "--- After Expression ---" << endl; +// +// cout << "--- Before At ---" << endl; +// cout << "B(0,0): " << B.at(0,0) << endl; +// cout << "A(0,0): " << A.at(0,0) << endl; +// cout << "--- After At ---" << endl; +// +// cout << "--- Before Rewrite of A ---" << endl; +// A.rewrite(); +// cout << "--- After Rewrite of A ---" << endl; +// +// cout << "--- Before At (A) ---" << endl; +// cout << "A(0,0): " << A.at(0,0) << endl; +// cout << "--- After At (A) ---" << endl; +// +// cout << "--- before cout of a ---" << endl; +// cout << A << endl; +// cout << "--- after cout of a ---" << endl; +// +// cout << "--- Before getIndexAssignment on A ---" << endl; +// cout << A.getIndexAssignment() << endl; +// cout << "--- After getIndexAssignment on A ---" << endl; +// +// ASSERT_EQ(A.at(0,0), 4); +// ASSERT_EQ(A.at(0,1), 4); +// ASSERT_EQ(A.at(1,0), 0); + ASSERT_TRUE(1); + + // TODO: Support this style of accessing and querying the values, too + /* map, double> vals = {{{0,0},4}, {{0,1},4}, {{1,1},2}}; */ + /* for (auto val = A.beginTyped(); val != A.endTyped(); ++val) { */ + /* ASSERT_TRUE(util::contains(vals, val->first.toVector())); */ + /* ASSERT_EQ(vals.at(val->first.toVector()), val->second); */ + /* } */ + +} diff --git a/tools/taco.cpp b/tools/taco.cpp index 50a915fc8..029cf4dab 100644 --- a/tools/taco.cpp +++ b/tools/taco.cpp @@ -193,11 +193,12 @@ static void printUsageInfo() { cout << endl; printFlag("linalg", "Specify if the input should be in Linear Algebra (not index) Notation"); cout << endl; - printFlag("k=:,", + printFlag("k=:", "[LINALG NOTATION ONLY -linalg] Specify the shape of the linear algebra var. " - "Specify the number of dimensions, shape, (0, 1, or 2) and an optional flag of " - "if the var is a column vector for the cases where order == 1 (1 or 0) " + "Specify the number of dimensions (0, 1, or 2) and an optional flag of " + "if the var is a column vector when order == 1 (1 or 0) " "Examples: A:2, A:0, A:1,1, A:1,0"); + cout << endl; } static int reportError(string errorMessage, int errorCode) { From 34732e99398c0c52a7465eb52b39e3a07611ee05 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Thu, 19 Nov 2020 03:50:47 -0800 Subject: [PATCH 29/61] Add in blocking code...still may be a little bit buggy. Has a lot of print statements --- include/taco/linalg.h | 79 ++++++++-- .../taco/linalg_notation/linalg_notation.h | 5 +- .../linalg_notation/linalg_notation_nodes.h | 12 +- .../linalg_notation_nodes_abstract.h | 4 + src/linalg.cpp | 68 ++++----- src/linalg_notation/linalg_notation.cpp | 135 +++++++++++++----- .../linalg_notation_nodes_abstract.cpp | 24 +++- test/tests-linalg.cpp | 95 +++++++----- 8 files changed, 303 insertions(+), 119 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 924c5675b..4320b395e 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -27,6 +27,7 @@ class LinalgBase : public LinalgExpr { int block; IndexExpr rewrite(LinalgExpr linalg, std::vector indices); + IndexExpr blockedRewrite(LinalgExpr linalg, std::vector indices); IndexVar getUniqueIndex(); @@ -34,7 +35,7 @@ class LinalgBase : public LinalgExpr { public: LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec = false); - LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format, int block, bool isColVec = false); + LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format, int block = 0, bool isColVec = false); LinalgBase(std::string name, Type tensorType, bool isColVec = false); LinalgBase(std::string name, Type tensorType, Format format, bool isColVec = false); LinalgBase(TensorBase* tensor, bool isColVec = false); @@ -92,7 +93,9 @@ class Matrix : public LinalgBase { return LinalgBase::isBlocked(); } // Support some Read methods + std::vector atBlock(int coord_x, int coord_y); CType at(int coord_x, int coord_y); + CType at(int coord_x, int coord_bx, int coord_y, int coord_by); // And a Write method void insert(int coord_x, int coord_y, CType value); @@ -135,7 +138,7 @@ Matrix::Matrix(std::string name, Type tensorType, Format format) : Linalg template Matrix::Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2, int block_size) : LinalgBase(name, Type(type(), {dim1/block_size, dim2/block_size, block_size, block_size}), type(), - {(int)dim1/block_size, (int)dim2/block_size, block_size, block_size}, Format({format1, format2, dense, dense}), block_size, false) { + {(int)dim1/block_size, block_size, (int)dim2/block_size, block_size}, Format({format1, dense, format2, dense}), block_size, false) { taco_uassert(block_size >= 0) << "Block size must be non-negative" << std::endl; taco_uassert(dim1 % block_size == 0 && dim2 % block_size == 0) << "Dimensions must be a multiple of block size" << std::endl; } @@ -146,6 +149,25 @@ CType Matrix::at(int coord_x, int coord_y) { return tensorBase->at({coord_x, coord_y}); } +template +std::vector Matrix::atBlock(int coord_x, int coord_y) { + std::vector result; + if (isBlocked()) { + std::cout << "blocked matrix" << std::endl; + for (int bx = 0; bx < block; bx++) + for (int by = 0; by < block; by++) + result.push_back(tensorBase->at({coord_x, bx, coord_y, by})); + return result; + } + return {tensorBase->at({coord_x, coord_y})}; +} + +// Blocked read Method +template +CType Matrix::at(int coord_x, int coord_bx, int coord_y, int coord_by) { + return tensorBase->at({coord_x, coord_bx, coord_y, coord_by}); +} + // Definition of Write methods template void Matrix::insert(int coord_x, int coord_y, CType value) { @@ -155,7 +177,7 @@ void Matrix::insert(int coord_x, int coord_y, CType value) { std::cout << "blocked matrix" << std::endl; for (int bx = 0; bx < block; bx++) for (int by = 0; by < block; by++) - tensorBase->insert({coord_x, coord_y, bx, by}, value); + tensorBase->insert({coord_x, bx, coord_y, by}, value); } else { tensorBase->insert({coord_x, coord_y}, value); } @@ -163,11 +185,11 @@ void Matrix::insert(int coord_x, int coord_y, CType value) { // Blocked Write Method template -void Matrix::insert(int coord_x, int coord_y, int coord_bx, int coord_by, CType value) { +void Matrix::insert(int coord_x, int coord_bx, int coord_y, int coord_by, CType value) { std::cout << "blocked matrix" << std::endl; std::cout << "Blocked " << isBlocked() << std::endl; std::cout << this->block << std::endl; - tensorBase->insert({coord_x, coord_y, coord_bx, coord_by}, value); + tensorBase->insert({coord_x, coord_bx, coord_y, coord_by}, value); } // ------------------------------------------------------------ // Vector class @@ -183,14 +205,12 @@ class Vector : public LinalgBase { Vector(std::string name, size_t dim, bool isColVec = true); Vector(std::string name, size_t dim, Format format, bool isColVec = true); - Vector(std::string name, size_t dim, ModeFormat format, bool isColVec = true); - + Vector(std::string name, size_t dim, ModeFormat format, int block, bool isColVec = true); Vector(std::string name, Type type, Format format, bool isColVec = true); Vector(std::string name, Type type, ModeFormat format, bool isColVec = true); - Vector(std::string name, Type type, ModeFormat format, int blocking, bool isColVec = true); LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); @@ -198,12 +218,14 @@ class Vector : public LinalgBase { // Support some Write methods void insert(int coord, CType value); + void insert(int coord, int coord_b, CType value); // Support some Read methods too + std::vector atBlock(int coord); CType at(int coord); // Support Read Method with Blocking - CType at(int bcoord0, int bcoord1); + CType at(int coord, int coord_b); }; // ------------------------------------------------------------ @@ -226,7 +248,16 @@ Vector::Vector(std::string name, size_t dim, Format format, bool isColVec template Vector::Vector(std::string name, size_t dim, ModeFormat format, bool isColVec) : - LinalgBase(name, Type(type(), {dim}), type(), {(int)dim}, Format(format), isColVec) {} + LinalgBase(name, Type(type(), {dim}), type(), {(int)dim}, Format(format), isColVec) { +} + +template +Vector::Vector(std::string name, size_t dim, ModeFormat format, int block, bool isColVec) : + LinalgBase(name, Type(type(), {dim/block, block}), type(), {(int)dim/block, block}, Format({format, dense}), block, isColVec) { + std::cout << "IS COL VEC" << isColVec << std::endl; + taco_uassert(block >= 0) << "Block size must be non-negative" << std::endl; + taco_uassert(dim % block == 0) << "Dimension must be a multiple of block size" << std::endl; + } template Vector::Vector(std::string name, Type type, Format format, bool isColVec) : @@ -240,7 +271,17 @@ Vector::Vector(std::string name, Type type, ModeFormat format, bool isCol // Vector write methods template void Vector::insert(int coord, CType value) { - tensorBase->insert({coord}, value); + if (isBlocked()) { + for (int bx = 0; bx < block; bx++) + tensorBase->insert({coord, bx}, value); + } else { + tensorBase->insert({coord}, value); + } +} + +template +void Vector::insert(int coord, int coord_b, CType value) { + tensorBase->insert({coord, coord_b}, value); } template @@ -248,6 +289,22 @@ CType Vector::at(int coord) { return tensorBase->at({coord}); } +template +CType Vector::at(int coord, int coord_b) { + return tensorBase->at({coord, coord_b}); +} + +template +std::vector Vector::atBlock(int coord) { + std::vector result; + if (isBlocked()) { + for (int coord_b = 0; coord_b < block; coord_b++) + result.push_back(tensorBase->at({coord, coord_b})); + return result; + } + return {tensorBase->at({coord})}; +} + template class Scalar : public LinalgBase { std::string name; diff --git a/include/taco/linalg_notation/linalg_notation.h b/include/taco/linalg_notation/linalg_notation.h index 8e761c1f3..104feb09c 100644 --- a/include/taco/linalg_notation/linalg_notation.h +++ b/include/taco/linalg_notation/linalg_notation.h @@ -77,8 +77,9 @@ class LinalgExpr : public util::IntrusivePtr { explicit LinalgExpr(TensorVar); LinalgExpr(TensorVar, bool isColVec, TensorBase* tensorBase); + LinalgExpr(TensorVar, bool isColVec, int block, TensorBase* tensorBase); - explicit LinalgExpr(TensorBase* _tensorBase, bool isColVec=false); + explicit LinalgExpr(TensorBase* _tensorBase, bool isColVec=false, int block=0); LinalgExpr(TensorVar var, bool isColVec); /// Consturct an integer literal. @@ -127,6 +128,8 @@ class LinalgExpr : public util::IntrusivePtr { int getOrder() const; bool isColVector() const; void setColVector(bool) const; + bool isBlocked() const; + int getBlock() const; /// Visit the linalg expression's sub-expressions. void accept(LinalgExprVisitorStrict *) const; diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h index 5c47de49e..6a30a91e8 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes.h +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -25,6 +25,9 @@ namespace taco { : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder()), tensorVar(tensorVar) {} LinalgVarNode(TensorVar tensorVar, bool isColVec) : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder(), isColVec), tensorVar(tensorVar) {} + LinalgVarNode(TensorVar tensorVar, bool isColVec, int block) + : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder(), isColVec, block), tensorVar(tensorVar) {} + void accept(LinalgExprVisitorStrict* v) const override { v->visit(this); @@ -40,6 +43,8 @@ namespace taco { : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder()), tensorVar(tensorVar), tensorBase(tensorBase) {} LinalgTensorBaseNode(TensorVar tensorVar, TensorBase *tensorBase, bool isColVec) : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder(), isColVec), tensorVar(tensorVar), tensorBase(tensorBase) {} + LinalgTensorBaseNode(TensorVar tensorVar, TensorBase *tensorBase, bool isColVec, int block) + : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder(), isColVec, block), tensorVar(tensorVar), tensorBase(tensorBase) {} void accept(LinalgExprVisitorStrict* v) const override { v->visit(this); } @@ -78,8 +83,9 @@ namespace taco { LinalgExpr a; protected: - LinalgUnaryExprNode(LinalgExpr a) : LinalgExprNode(a.getDataType(), a.getOrder(), a.isColVector()), a(a) {} - LinalgUnaryExprNode(LinalgExpr a, bool isColVec) : LinalgExprNode(a.getDataType(), a.getOrder(), isColVec), a(a) {} + LinalgUnaryExprNode(LinalgExpr a) : LinalgExprNode(a.getDataType(), a.getOrder(), a.isColVector(), a.getBlock()), a(a) {} + LinalgUnaryExprNode(LinalgExpr a, bool isColVec) : LinalgExprNode(a.getDataType(), a.getOrder(), isColVec, a.getBlock()), a(a) {} + LinalgUnaryExprNode(LinalgExpr a, bool isColVec, int block) : LinalgExprNode(a.getDataType(), a.getOrder(), isColVec, block), a(a) {} }; @@ -111,7 +117,7 @@ namespace taco { LinalgBinaryExprNode(LinalgExpr a, LinalgExpr b, int order) : LinalgExprNode(max_type(a.getDataType(), b.getDataType()), order), a(a), b(b) {} LinalgBinaryExprNode(LinalgExpr a, LinalgExpr b, int order, bool isColVec) - : LinalgExprNode(max_type(a.getDataType(), b.getDataType()), order, isColVec), a(a), b(b) {} + : LinalgExprNode(max_type(a.getDataType(), b.getDataType()), order, isColVec, a.getBlock()), a(a), b(b) {} }; diff --git a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h index 136034053..4ae2d431c 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes_abstract.h +++ b/include/taco/linalg_notation/linalg_notation_nodes_abstract.h @@ -27,6 +27,7 @@ struct LinalgExprNode : public util::Manageable, explicit LinalgExprNode(Datatype type); LinalgExprNode(Datatype type, int order); LinalgExprNode(Datatype type, int order, bool isColVec); + LinalgExprNode(Datatype type, int order, bool isColVec, int block); virtual ~LinalgExprNode() = default; @@ -37,11 +38,14 @@ struct LinalgExprNode : public util::Manageable, int getOrder() const; bool isColVector() const; void setColVector(bool val); + bool isBlocked() const; + int getBlock() const; private: Datatype dataType; int order; bool isColVec; + int block; }; struct LinalgStmtNode : public util::Manageable, diff --git a/src/linalg.cpp b/src/linalg.cpp index 218493a9c..384a91338 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -13,7 +13,7 @@ LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : name(name) } LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec) : - LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), + LinalgExpr(TensorVar(name, tensorType, format), isColVec,new TensorBase(name, dtype, dims, format)), name(name), tensorType(tensorType), idxcount(0), block(0) { if(isa(ptr)) { /* cout << "LinalgBase constructor - LinalgTensorBaseNode" << endl; */ @@ -22,8 +22,9 @@ LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector } LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, int block, bool isColVec) : - LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), + LinalgExpr(TensorVar(name, tensorType, format), isColVec, block,new TensorBase(name, dtype, dims, format)), name(name), tensorType(tensorType), idxcount(0), block(block) { + cout << "LINALGBASE: " << isColVec; if(isa(ptr)) { cout << this->tensorBase->getName() << endl; } @@ -54,7 +55,7 @@ LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { taco_ierror << "LinalgBase LHS must be a VarNode or TensorBaseNode"; } - cout << var.getOrder() << endl; + cout << "ASSIGN: " << var.getOrder() << endl; cout << expr.getOrder() << endl; taco_uassert(var.getOrder() == expr.getOrder()) << "LHS (" << var.getOrder() << ") and RHS (" << expr.getOrder() << ") of linalg assignment must match order"; @@ -101,40 +102,41 @@ IndexVar LinalgBase::getUniqueIndex() { IndexExpr LinalgBase::blockedRewrite(LinalgExpr linalg, vector indices) { if (isa(linalg.get())) { auto sub = to(linalg.get()); - IndexExpr indexA = rewrite(sub->a, indices); - IndexExpr indexB = rewrite(sub->b, indices); + IndexExpr indexA = blockedRewrite(sub->a, indices); + IndexExpr indexB = blockedRewrite(sub->b, indices); return new SubNode(indexA, indexB); } else if (isa(linalg.get())) { auto add = to(linalg.get()); - IndexExpr indexA = rewrite(add->a, indices); - IndexExpr indexB = rewrite(add->b, indices); + IndexExpr indexA = blockedRewrite(add->a, indices); + IndexExpr indexB = blockedRewrite(add->b, indices); return new AddNode(indexA, indexB); } else if (isa(linalg.get())) { auto mul = to(linalg.get()); - IndexExpr indexA = rewrite(mul->a, indices); - IndexExpr indexB = rewrite(mul->b, indices); + IndexExpr indexA = blockedRewrite(mul->a, indices); + IndexExpr indexB = blockedRewrite(mul->b, indices); return new MulNode(indexA, indexB); } else if (isa(linalg.get())) { auto mul = to(linalg.get()); IndexVar index = getUniqueIndex(); + IndexVar indexb = getUniqueIndex(); vector indicesA; vector indicesB; - if (mul->a.getOrder() == 2 && mul->b.getOrder() == 2) { - indicesA = {indices[0], index}; - indicesB = {index, indices[1]}; + if (mul->a.getOrder() == 4 && mul->b.getOrder() == 4) { + indicesA = {indices[0], indices[1], index, indexb}; + indicesB = {index, indexb, indices[2], indices[3]}; } - else if (mul->a.getOrder() == 1 && mul->b.getOrder() == 2) { - indicesA = {index}; - indicesB = {index, indices[0]}; + else if (mul->a.getOrder() == 2 && mul->b.getOrder() == 4) { + indicesA = {index, indexb}; + indicesB = {index, indexb, indices[0], indices[1]}; } - else if (mul->a.getOrder() == 2 && mul->b.getOrder() == 1) { - indicesA = {indices[0], index}; + else if (mul->a.getOrder() == 4 && mul->b.getOrder() == 2) { + indicesA = {indices[0], indices[1], index, indexb}; - indicesB = {index}; + indicesB = {index, indexb}; } - else if (mul->a.getOrder() == 1 && mul->a.isColVector() && mul->b.getOrder() == 1) { - indicesA = {indices[0]}; - indicesB = {indices[1]}; + else if (mul->a.getOrder() == 2 && mul->a.isColVector() && mul->b.getOrder() == 2) { + indicesA = {indices[0], indices[1]}; + indicesB = {indices[2], indices[3]}; } else if (mul->a.getOrder() == 0) { indicesA = {}; indicesB = indices; @@ -142,30 +144,30 @@ IndexExpr LinalgBase::blockedRewrite(LinalgExpr linalg, vector indices indicesA = indices; indicesB = {}; } else { - indicesA = {index}; - indicesB = {index}; + indicesA = {index, indexb}; + indicesB = {index, indexb}; } - IndexExpr indexA = rewrite(mul->a, indicesA); - IndexExpr indexB = rewrite(mul->b, indicesB); + IndexExpr indexA = blockedRewrite(mul->a, indicesA); + IndexExpr indexB = blockedRewrite(mul->b, indicesB); return new MulNode(indexA, indexB); } else if (isa(linalg.get())) { auto div = to(linalg.get()); - IndexExpr indexA = rewrite(div->a, indices); - IndexExpr indexB = rewrite(div->b, indices); + IndexExpr indexA = blockedRewrite(div->a, indices); + IndexExpr indexB = blockedRewrite(div->b, indices); return new DivNode(indexA, indexB); } else if (isa(linalg.get())) { auto neg = to(linalg.get()); - IndexExpr index = rewrite(neg->a, indices); + IndexExpr index = blockedRewrite(neg->a, indices); return new NegNode(index); } else if (isa(linalg.get())) { auto transpose = to(linalg.get()); - if (transpose->a.getOrder() == 2) { - return rewrite(transpose->a, {indices[1], indices[0]}); + if (transpose->a.getOrder() == 4) { + return blockedRewrite(transpose->a, {indices[2], indices[3], indices[0], indices[1]}); } - else if (transpose->a.getOrder() == 1) { - return rewrite(transpose->a, {indices[0]}); + else if (transpose->a.getOrder() == 2) { + return blockedRewrite(transpose->a, indices); } - return rewrite(transpose->a, {}); + return blockedRewrite(transpose->a, {}); } else if (isa(linalg.get())) { auto lit = to(linalg.get()); diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index c76c338b3..bb437aeef 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -35,7 +35,11 @@ LinalgExpr::LinalgExpr(TensorVar var, bool isColVec, TensorBase* _tensorBase) : tensorBase = _tensorBase; } -LinalgExpr::LinalgExpr(TensorBase* _tensorBase, bool isColVec) : LinalgExpr(new LinalgTensorBaseNode(_tensorBase->getTensorVar(), _tensorBase, isColVec)) { +LinalgExpr::LinalgExpr(TensorVar var, bool isColVec, int block, TensorBase* _tensorBase) : LinalgExpr(new LinalgTensorBaseNode(var, _tensorBase, isColVec, block)) { + tensorBase = _tensorBase; +} + +LinalgExpr::LinalgExpr(TensorBase* _tensorBase, bool isColVec, int block) : LinalgExpr(new LinalgTensorBaseNode(_tensorBase->getTensorVar(), _tensorBase, isColVec, block)) { } @@ -89,6 +93,14 @@ int LinalgExpr::getOrder() const { return const_cast(this->ptr)->getOrder(); } +bool LinalgExpr::isBlocked() const { + return const_cast(this->ptr)->isBlocked(); +} + +int LinalgExpr::getBlock() const { + return const_cast(this->ptr)->getBlock(); +} + bool LinalgExpr::isColVector() const { return const_cast(this->ptr)->isColVector(); } @@ -112,9 +124,18 @@ void checkCompatibleShape(const LinalgExpr &lhs, const LinalgExpr &rhs) { if (lhs.getOrder() != 0 && rhs.getOrder() != 0) taco_uassert(lhs.getOrder() == rhs.getOrder()) << "RHS and LHS order do not match for linear algebra " "binary operation" << endl; - if (lhs.getOrder() == 1) + if (lhs.getOrder() == 1 && !lhs.isBlocked()) taco_uassert(lhs.isColVector() == rhs.isColVector()) << "RHS and LHS vector type do not match for linear algebra " "binary operation" << endl; + + if (lhs.isBlocked()) { + taco_uassert(lhs.getBlock() == rhs.getBlock()) << "LHS (" << lhs.getBlock() << ") and RHS (" << rhs.getBlock() << + "block size must match for linear algebra binary op" << endl; + if (lhs.getOrder() == 2) + taco_uassert(lhs.isColVector() == rhs.isColVector()) << "RHS and LHS vector type do not match for linear algebra " + "binary operation" << endl; + } + } LinalgExpr operator-(const LinalgExpr &expr) { @@ -138,41 +159,84 @@ LinalgExpr operator-(const LinalgExpr &lhs, const LinalgExpr &rhs) { LinalgExpr operator*(const LinalgExpr &lhs, const LinalgExpr &rhs) { int order = 0; bool isColVec = false; - // Matrix-matrix mult - if (lhs.getOrder() == 2 && rhs.getOrder() == 2) { - order = 2; - } - // Matrix-column vector multiply - else if (lhs.getOrder() == 2 && rhs.getOrder() == 1 && rhs.isColVector()) { - order = 1; - isColVec = true; - } - // Row-vector Matrix multiply - else if (lhs.getOrder() == 1 && !lhs.isColVector() && rhs.getOrder() == 2) { - order = 1; - } - // Inner product - else if (lhs.getOrder() == 1 && !lhs.isColVector() && rhs.getOrder() == 1 && rhs.isColVector()) { - order = 0; - } - // Outer product - else if (lhs.getOrder() == 1 && lhs.isColVector() && rhs.getOrder() == 1 && !rhs.isColVector()) { - order = 2; - } - // Scalar product - else if (lhs.getOrder() == 0) { - order = rhs.getOrder(); - isColVec = rhs.isColVector(); - } - else if (rhs.getOrder() == 0) { - order = lhs.getOrder(); - isColVec = lhs.isColVector(); + if (lhs.isBlocked()) { + cout << "BLOCKED" << endl; + cout << lhs.getOrder() << ", " << rhs.getOrder() << endl; + cout << lhs.isColVector() << ", " << rhs.isColVector() << endl; + taco_uassert(lhs.getBlock() == rhs.getBlock()) << "LHS (" << lhs.getBlock() << ") and RHS (" << rhs.getBlock() << + "block size must match for linear algebra binary op" << endl; + + // Matrix-matrix mult + if (lhs.getOrder() == 4 && rhs.getOrder() == 4) { + order = 4; + } + // Matrix-column vector multiply + else if (lhs.getOrder() == 4 && rhs.getOrder() == 2 && rhs.isColVector()) { + order = 2; + isColVec = true; + } + // Row-vector Matrix multiply + else if (lhs.getOrder() == 2 && !lhs.isColVector() && rhs.getOrder() == 4) { + order = 2; + } + // Inner product + else if (lhs.getOrder() == 2 && !lhs.isColVector() && rhs.getOrder() == 2 && rhs.isColVector()) { + order = 0; + } + // Outer product + else if (lhs.getOrder() == 2 && lhs.isColVector() && rhs.getOrder() == 2 && !rhs.isColVector()) { + order = 4; + } + // Scalar product + else if (lhs.getOrder() == 0) { + cout << "SCALAR" << endl; + order = rhs.getOrder(); + isColVec = rhs.isColVector(); + } else if (rhs.getOrder() == 0) { + order = lhs.getOrder(); + isColVec = lhs.isColVector(); + } else { + taco_uassert(lhs.getOrder() != rhs.getOrder()) << "LHS (" << lhs.getOrder() << "," << lhs.isColVector() + << ") and RHS (" << rhs.getOrder() << "," << rhs.isColVector() + << ") order/vector type do not match " + "for linear algebra matrix multiply" << endl; + } } else { - taco_uassert(lhs.getOrder() != rhs.getOrder()) << "LHS (" << lhs.getOrder() << "," << lhs.isColVector() - << ") and RHS (" << rhs.getOrder() << "," << rhs.isColVector() - << ") order/vector type do not match " - "for linear algebra matrix multiply" << endl; + // Matrix-matrix mult + if (lhs.getOrder() == 2 && rhs.getOrder() == 2) { + order = 2; + } + // Matrix-column vector multiply + else if (lhs.getOrder() == 2 && rhs.getOrder() == 1 && rhs.isColVector()) { + order = 1; + isColVec = true; + } + // Row-vector Matrix multiply + else if (lhs.getOrder() == 1 && !lhs.isColVector() && rhs.getOrder() == 2) { + order = 1; + } + // Inner product + else if (lhs.getOrder() == 1 && !lhs.isColVector() && rhs.getOrder() == 1 && rhs.isColVector()) { + order = 0; + } + // Outer product + else if (lhs.getOrder() == 1 && lhs.isColVector() && rhs.getOrder() == 1 && !rhs.isColVector()) { + order = 2; + } + // Scalar product + else if (lhs.getOrder() == 0) { + order = rhs.getOrder(); + isColVec = rhs.isColVector(); + } else if (rhs.getOrder() == 0) { + order = lhs.getOrder(); + isColVec = lhs.isColVector(); + } else { + taco_uassert(lhs.getOrder() != rhs.getOrder()) << "LHS (" << lhs.getOrder() << "," << lhs.isColVector() + << ") and RHS (" << rhs.getOrder() << "," << rhs.isColVector() + << ") order/vector type do not match " + "for linear algebra matrix multiply" << endl; + } } return new LinalgMatMulNode(lhs, rhs, order, isColVec); } @@ -192,7 +256,6 @@ LinalgExpr elemMul(const LinalgExpr &lhs, const LinalgExpr &rhs) { } LinalgExpr transpose(const LinalgExpr &lhs) { - cout << "transpose here" << endl; return new LinalgTransposeNode(lhs, !lhs.isColVector()); } diff --git a/src/linalg_notation/linalg_notation_nodes_abstract.cpp b/src/linalg_notation/linalg_notation_nodes_abstract.cpp index 52cf9ba3e..50e04e459 100644 --- a/src/linalg_notation/linalg_notation_nodes_abstract.cpp +++ b/src/linalg_notation/linalg_notation_nodes_abstract.cpp @@ -5,11 +5,11 @@ using namespace std; namespace taco { LinalgExprNode::LinalgExprNode(Datatype type) - : dataType(type), order(0), isColVec(false) { + : dataType(type), order(0), isColVec(false), block(0) { } LinalgExprNode::LinalgExprNode(Datatype type, int order) - : dataType(type), order(order) { + : dataType(type), order(order), block(0) { if (order != 1) isColVec = false; else @@ -17,13 +17,23 @@ LinalgExprNode::LinalgExprNode(Datatype type, int order) } LinalgExprNode::LinalgExprNode(Datatype type, int order, bool isColVec) - : dataType(type), order(order) { + : dataType(type), order(order), block(0) { if (order != 1) this->isColVec = false; else this->isColVec = isColVec; } +LinalgExprNode::LinalgExprNode(Datatype type, int order, bool isColVec, int block) + : dataType(type), order(order), block(block) { + if (block == 0 && order != 1) + this->isColVec = false; + else if (block != 0 && order != 2) + this->isColVec = false; + else + this->isColVec = isColVec; +} + Datatype LinalgExprNode::getDataType() const { return dataType; } @@ -36,6 +46,14 @@ bool LinalgExprNode::isColVector() const { return isColVec; } +bool LinalgExprNode::isBlocked() const { + return block != 0; +} + +int LinalgExprNode::getBlock() const { + return block; +} + void LinalgExprNode::setColVector(bool val) { isColVec = val; } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 325d92fd5..c9f184521 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -248,9 +248,9 @@ TEST(linalg, complex_expr) { } TEST(linalg, blocking_matmul) { - Matrix B("B", 16, 16, dense, dense, 4); - Matrix C("C", 16, 16, dense, dense, 4); - Matrix A("A", 16, 16, dense, dense, 4); + Matrix B("B", 4, 4, sparse, dense, 2); + Matrix C("C", 4, 4, dense, sparse, 2); + Matrix A("A", 4, 4, dense, dense, 2); cout << "--- Before inserting ---" << endl; B.insert(0,0,0, 0,2); @@ -263,40 +263,71 @@ TEST(linalg, blocking_matmul) { cout << "B: " << B << endl; cout << "C: " << C << endl; cout << "--- Before Expression ---" << endl; - A = B * C; -// cout << "--- After Expression ---" << endl; -// -// cout << "--- Before At ---" << endl; -// cout << "B(0,0): " << B.at(0,0) << endl; -// cout << "A(0,0): " << A.at(0,0) << endl; -// cout << "--- After At ---" << endl; -// -// cout << "--- Before Rewrite of A ---" << endl; -// A.rewrite(); -// cout << "--- After Rewrite of A ---" << endl; -// -// cout << "--- Before At (A) ---" << endl; -// cout << "A(0,0): " << A.at(0,0) << endl; -// cout << "--- After At (A) ---" << endl; -// -// cout << "--- before cout of a ---" << endl; -// cout << A << endl; -// cout << "--- after cout of a ---" << endl; -// -// cout << "--- Before getIndexAssignment on A ---" << endl; -// cout << A.getIndexAssignment() << endl; -// cout << "--- After getIndexAssignment on A ---" << endl; + A = transpose(B * C); + cout << "--- After Expression ---" << endl; + + cout << "--- Before At ---" << endl; + auto vec = B.atBlock(0,0); + for (auto it = 0; it < vec.size(); ++it) + cout << "B(0,0): " << vec.at(it) << endl; + vec = A.atBlock(0,0); + for (auto it = 0; it < vec.size(); ++it) + cout << "B(0,0): " << vec.at(it) << endl; + cout << "--- After At ---" << endl; + + cout << "--- before cout of a ---" << endl; + cout << A << endl; + cout << "--- after cout of a ---" << endl; + + cout << "--- Before getIndexAssignment on A ---" << endl; + cout << A.getIndexAssignment() << endl; + cout << "--- After getIndexAssignment on A ---" << endl; // // ASSERT_EQ(A.at(0,0), 4); // ASSERT_EQ(A.at(0,1), 4); // ASSERT_EQ(A.at(1,0), 0); ASSERT_TRUE(1); +} - // TODO: Support this style of accessing and querying the values, too - /* map, double> vals = {{{0,0},4}, {{0,1},4}, {{1,1},2}}; */ - /* for (auto val = A.beginTyped(); val != A.endTyped(); ++val) { */ - /* ASSERT_TRUE(util::contains(vals, val->first.toVector())); */ - /* ASSERT_EQ(vals.at(val->first.toVector()), val->second); */ - /* } */ +TEST(linalg, blocking_matvec) { + Matrix B("B", 4, 4, sparse, dense, 2); + Vector c("c", 4, sparse, 2); + Vector a("a", 4, dense, 2); + cout << "--- Before inserting ---" << endl; + B.insert(0,0,0, 0,2); + B.insert(1,1, 0,0,1); + B.insert(0,1, 0,0,2); + + c.insert(0,0,2); + c.insert(1,1,2); + cout << "--- After inserting ---" << endl; + cout << "B: " << B << endl; + cout << "C: " << c << endl; + cout << "c(isColVec): " << c.isColVector(); + cout << "--- Before Expression ---" << endl; + a = (transpose(c)*c)*(B*c); + cout << "--- After Expression ---" << endl; + + cout << "--- Before At ---" << endl; + auto vec = B.atBlock(0, 0); + for (auto it = 0; it < vec.size(); ++it) + cout << "B(0): " << vec.at(it) << endl; + vec = a.atBlock(0); + for (auto it = 0; it < vec.size(); ++it) + cout << "B(0): " << vec.at(it) << endl; + cout << "--- After At ---" << endl; + + cout << "--- before cout of a ---" << endl; + cout << a << endl; + cout << "--- after cout of a ---" << endl; + + cout << "--- Before getIndexAssignment on A ---" << endl; + cout << a.getIndexAssignment() << endl; + cout << "--- After getIndexAssignment on A ---" << endl; +// +// ASSERT_EQ(A.at(0,0), 4); +// ASSERT_EQ(A.at(0,1), 4); +// ASSERT_EQ(A.at(1,0), 0); + ASSERT_TRUE(1); } From 7ee2315738dfee0ca728bbf20fac8e2e4a3c12e7 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 8 Dec 2020 15:03:12 -0800 Subject: [PATCH 30/61] enable A(1,2) = 3 writes, and define custom cast operator for Scalars --- include/taco/linalg.h | 16 ++++++- test/tests-linalg.cpp | 101 ++++++++++++++---------------------------- 2 files changed, 48 insertions(+), 69 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index e45fcc1b8..ec7974f8f 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -89,6 +89,7 @@ class Matrix : public LinalgBase { // And a Write method void insert(int coord_x, int coord_y, CType value); + ScalarAccess operator()(int i, int j); }; @@ -135,6 +136,11 @@ void Matrix::insert(int coord_x, int coord_y, CType value) { tensorBase->insert({coord_x, coord_y}, value); } +template +ScalarAccess Matrix::operator()(int i, int j) { + return ScalarAccess(tensorBase, {i, j}); +} + // ------------------------------------------------------------ // Vector class // ------------------------------------------------------------ @@ -163,8 +169,11 @@ class Vector : public LinalgBase { // Support some Write methods void insert(int coord, CType value); + ScalarAccess operator()(int i); + // Support some Read methods too CType at(int coord); + }; // ------------------------------------------------------------ @@ -204,6 +213,11 @@ void Vector::insert(int coord, CType value) { tensorBase->insert({coord}, value); } +template +ScalarAccess Vector::operator()(int i) { + return ScalarAccess(tensorBase, {i}); +} + template CType Vector::at(int coord) { return tensorBase->at({coord}); @@ -221,7 +235,7 @@ class Scalar : public LinalgBase { return LinalgBase::operator=(expr); } - /* operator int() const { return */ + operator CType() const { return tensorBase->at({}); } }; template diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 4999ced81..8ba4f064d 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -9,52 +9,18 @@ TEST(linalg, matmul) { Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); - cout << "--- Before inserting ---" << endl; - B.insert(0,0,2); - B.insert(1,1,1); - B.insert(0,1,2); + B(0,0) = 2; + B(1,1) = 1; + B(0,1) = 2; + C(0,0) = 2; + C(1,1) = 2; - C.insert(0,0,2); - C.insert(1,1,2); - cout << "--- After inserting ---" << endl; - - cout << "--- Before Expression ---" << endl; A = B * C; - cout << "--- After Expression ---" << endl; - - cout << "--- Before At ---" << endl; - cout << "B(0,0): " << B.at(0,0) << endl; - cout << "A(0,0): " << A.at(0,0) << endl; - cout << "--- After At ---" << endl; - - cout << "--- Before Rewrite of A ---" << endl; - A.rewrite(); - cout << "--- After Rewrite of A ---" << endl; - - cout << "--- Before At (A) ---" << endl; - cout << "A(0,0): " << A.at(0,0) << endl; - cout << "--- After At (A) ---" << endl; - - cout << "--- before cout of a ---" << endl; - cout << A << endl; - cout << "--- after cout of a ---" << endl; - - cout << "--- Before getIndexAssignment on A ---" << endl; - cout << A.getIndexAssignment() << endl; - cout << "--- After getIndexAssignment on A ---" << endl; ASSERT_EQ(A.at(0,0), 4); ASSERT_EQ(A.at(0,1), 4); ASSERT_EQ(A.at(1,0), 0); ASSERT_EQ(A.at(1,1), 2); - - // TODO: Support this style of accessing and querying the values, too - /* map, double> vals = {{{0,0},4}, {{0,1},4}, {{1,1},2}}; */ - /* for (auto val = A.beginTyped(); val != A.endTyped(); ++val) { */ - /* ASSERT_TRUE(util::contains(vals, val->first.toVector())); */ - /* ASSERT_EQ(vals.at(val->first.toVector()), val->second); */ - /* } */ - } TEST(linalg, tensorbase) { @@ -62,11 +28,11 @@ TEST(linalg, tensorbase) { Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); - B.insert(0,0,1); - B.insert(1,1,4); + B(0,0) = 1; + B(1,1) = 4; - C.insert(0,1,2); - C.insert(1,0,3); + C(0,1) = 2; + C(1,0) = 3; A = B + C; @@ -86,12 +52,12 @@ TEST(linalg, matvec_mul) { Vector b("b", 2, dense); Matrix A("A", 2, 2, dense, dense); - b.insert(0,2); - b.insert(1,1); + b(0) = 2; + b(1) = 1; - A.insert(0,0,1); - A.insert(0,1,3); - A.insert(1,1,2); + A(0,0) = 1; + A(0,1) = 3; + A(1,1) = 2; x = A*b; @@ -109,12 +75,12 @@ TEST(linalg, vecmat_mul) { Vector b("b", 2, dense, false); Matrix A("A", 2, 2, dense, dense); - b.insert(0,3); - b.insert(1,-2); + b(0) = 3; + b(1) = -2; - A.insert(0,0,5); - A.insert(0,1,2); - A.insert(1,0,-1); + A(0,0) = 5; + A(0,1) = 2; + A(1,0) = -1; // Should be [17, 6] x = b * A; @@ -132,11 +98,11 @@ TEST(linalg, inner_mul) { Vector b("b", 2, dense, false); Vector a("a", 2, dense, true); - b.insert(0,2); - b.insert(1,3); + b(0) = 2; + b(1) = 3; - a.insert(0,-3); - a.insert(1,5); + a(0) = -3; + a(1) = 5; x = b * a; @@ -145,8 +111,7 @@ TEST(linalg, inner_mul) { cout << x.getIndexAssignment(); - - ASSERT_TRUE(1); + ASSERT_EQ(x, 9); } TEST(linalg, outer_mul) { @@ -154,11 +119,11 @@ TEST(linalg, outer_mul) { Vector b("b", 2, dense, false); Vector a("a", 2, dense, true); - b.insert(0,2); - b.insert(1,3); + b(0) = 2; + b(1) = 3; - a.insert(0,-3); - a.insert(1,5); + a(0) = -3; + a(1) = 5; X = a * b; @@ -177,12 +142,12 @@ TEST(linalg, rowvec_transpose) { Matrix A("A", 2, 2, dense, dense); Scalar a("a", true); - b.insert(0,2); - b.insert(1,5); + b(0) = 2; + b(1) = 5; - A.insert(0,0,1); - A.insert(0,1,2); - A.insert(1,1,4); + A(0,0) = 1; + A(0,1) = 2; + A(1,1) = 4; a = transpose(transpose(b) * A * b); From b7ba7fafde7ff57a55cbc3b129e4ef84b1be0205 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 8 Dec 2020 16:47:25 -0800 Subject: [PATCH 31/61] Access Matrix/Vector classes with IndexVars --- include/taco/linalg.h | 33 +++++++++++++++++++++++++++++++ test/tests-linalg.cpp | 45 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index ec7974f8f..dfeb1e8cc 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -91,6 +91,9 @@ class Matrix : public LinalgBase { ScalarAccess operator()(int i, int j); + // Access methods for use in IndexExprs + const Access operator()(const IndexVar i, const IndexVar j) const; + Access operator()(const IndexVar i, const IndexVar j); }; // ------------------------------------------------------------ @@ -141,6 +144,18 @@ ScalarAccess Matrix::operator()(int i, int j) { return ScalarAccess(tensorBase, {i, j}); } +// Definition of Access methods +template +const Access Matrix::operator()(const IndexVar i, const IndexVar j) const { + return (*tensorBase)({i,j}); +} + +template +Access Matrix::operator()(const IndexVar i, const IndexVar j) { + return (*tensorBase)({i,j}); +} + + // ------------------------------------------------------------ // Vector class // ------------------------------------------------------------ @@ -174,6 +189,9 @@ class Vector : public LinalgBase { // Support some Read methods too CType at(int coord); + // Access methods for use in IndexExprs + const Access operator()(const IndexVar i) const; + Access operator()(const IndexVar i); }; // ------------------------------------------------------------ @@ -223,6 +241,21 @@ CType Vector::at(int coord) { return tensorBase->at({coord}); } +// Definition of Access methods +template +const Access Vector::operator()(const IndexVar i) const { + return (*tensorBase)({i}); +} + +template +Access Vector::operator()(const IndexVar i) { + return (*tensorBase)({i}); +} + +// ------------------------------------------------------------ +// Scalar class +// ------------------------------------------------------------ + template class Scalar : public LinalgBase { std::string name; diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 8ba4f064d..3caf5628a 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -4,6 +4,51 @@ using namespace taco; +TEST(linalg, matmul_index_expr) { + Matrix B("B", 2, 2, dense, dense); + Matrix C("C", 2, 2, dense, dense); + Matrix A("A", 2, 2, dense, dense); + + B(0,0) = 2; + B(1,1) = 1; + B(0,1) = 2; + C(0,0) = 2; + C(1,1) = 2; + + IndexVar i, j, k; + A(i,j) = B(i,k) * C(k,j); + + ASSERT_EQ(A.at(0,0), 4); + ASSERT_EQ(A.at(0,1), 4); + ASSERT_EQ(A.at(1,0), 0); + ASSERT_EQ(A.at(1,1), 2); +} + +TEST(linalg, vecmat_mul_index_expr) { + Vector x("x", 2, dense, false); + Vector b("b", 2, dense, false); + Matrix A("A", 2, 2, dense, dense); + + b(0) = 3; + b(1) = -2; + + A(0,0) = 5; + A(0,1) = 2; + A(1,0) = -1; + + // Should be [17, 6] + IndexVar i, j; + x(i) = b(j) * A(j,i); + + ASSERT_EQ(x.at(0), 17); + ASSERT_EQ(x.at(1), 6); + + cout << x << endl; + + cout << x.getIndexAssignment(); +} + + TEST(linalg, matmul) { Matrix B("B", 2, 2, dense, dense); Matrix C("C", 2, 2, dense, dense); From ce9a605cd1cec228a75e092d50616a164cacaa7d Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 8 Dec 2020 17:31:07 -0800 Subject: [PATCH 32/61] assign IndexExpr to Scalar properly --- include/taco/linalg.h | 4 ++++ test/tests-linalg.cpp | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index dfeb1e8cc..eb24e4534 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -268,6 +268,10 @@ class Scalar : public LinalgBase { return LinalgBase::operator=(expr); } + void operator=(const IndexExpr& expr) { + (*tensorBase) = expr; + } + operator CType() const { return tensorBase->at({}); } }; diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 3caf5628a..da532a8dd 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -49,6 +49,28 @@ TEST(linalg, vecmat_mul_index_expr) { } +TEST(linalg, inner_mul_index_expr) { + Scalar x("x", true); + Vector b("b", 2, dense, false); + Vector a("a", 2, dense, true); + + b(0) = 2; + b(1) = 3; + + a(0) = -3; + a(1) = 5; + + IndexVar i; + x = b(i) * a(i); + + // Should be 9 + cout << x << endl; + + cout << x.getIndexAssignment(); + + ASSERT_EQ(x, 9); +} + TEST(linalg, matmul) { Matrix B("B", 2, 2, dense, dense); Matrix C("C", 2, 2, dense, dense); From 3e8f4a680f4c2fe72812e235dfa9d862b72cd3a5 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Thu, 10 Dec 2020 14:44:14 -0800 Subject: [PATCH 33/61] quick test that printing and reading the ScalarAccess returned by A(1,1) works --- test/tests-linalg.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index da532a8dd..e10d15083 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -88,6 +88,10 @@ TEST(linalg, matmul) { ASSERT_EQ(A.at(0,1), 4); ASSERT_EQ(A.at(1,0), 0); ASSERT_EQ(A.at(1,1), 2); + + cout << "A(1,1) = " << A(1,1) << endl; + double a11 = A(1,1); + cout << "a11 = " << a11 << endl; } TEST(linalg, tensorbase) { From 79c61a5d03c34cdc1a631f5aba74750bf03e13d5 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 14 Dec 2020 16:33:39 -0800 Subject: [PATCH 34/61] change indexexpr test to show mixed use of tensor and matrix in linalg expression --- test/tests-linalg.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index e10d15083..e49fb0d35 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -5,7 +5,7 @@ using namespace taco; TEST(linalg, matmul_index_expr) { - Matrix B("B", 2, 2, dense, dense); + Tensor B("B", {2,2}); Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); From 711d7c888fd4619f87ab9b4bebcebd3c8598bfe0 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 14 Dec 2020 19:51:50 -0800 Subject: [PATCH 35/61] fix warning about initialization out of order --- src/linalg.cpp | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/src/linalg.cpp b/src/linalg.cpp index 9b5a9725a..af59ad00c 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -8,35 +8,28 @@ using namespace std; namespace taco { -LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : name(name), tensorType(tensorType), idxcount(0), - LinalgExpr(TensorVar(name, tensorType), isColVec) { +LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : LinalgExpr(TensorVar(name, tensorType), isColVec), name(name), tensorType(tensorType), idxcount(0) { } LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec) : LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), tensorType(tensorType), idxcount(0) { - if(isa(ptr)) { - /* cout << "LinalgBase constructor - LinalgTensorBaseNode" << endl; */ - cout << this->tensorBase->getName() << endl; - } } -LinalgBase::LinalgBase(TensorBase* tbase, bool isColVec) : - LinalgExpr(tbase, isColVec), name(tbase->getName()), - tensorType(tbase->getTensorVar().getType()), idxcount(0) { - if(isa(ptr)) { - /* cout << "LinalgBase constructor - LinalgTensorBaseNode" << endl; */ - cout << this->tensorBase->getName() << endl; - } -} +//TODO: remove this entirely +/* LinalgBase::LinalgBase(TensorBase* tbase, bool isColVec) : */ +/* LinalgExpr(tbase, isColVec), name(tbase->getName()), */ +/* tensorType(tbase->getTensorVar().getType()), idxcount(0) { */ +/* // Checking if this is used */ +/* cout << "!!!!!! LinalgBase::LinalgBase with tbase arg used" << endl; */ +/* } */ -LinalgBase::LinalgBase(string name, Type tensorType, Format format, bool isColVec) : name(name), tensorType(tensorType), - idxcount(0), LinalgExpr(TensorVar(name, tensorType, format), isColVec) { -} +/* LinalgBase::LinalgBase(string name, Type tensorType, Format format, bool isColVec) : name(name), tensorType(tensorType), */ +/* idxcount(0), LinalgExpr(TensorVar(name, tensorType, format), isColVec) { */ +/* } */ LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { - /* cout << "LinalgBase operator= on " << name << endl; */ taco_iassert(isa(this->ptr)); TensorVar var = to(this->get())->tensorVar; From 7bd3e89973a35d35ed6e2457ef23e01e95279a70 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 14 Dec 2020 22:05:00 -0800 Subject: [PATCH 36/61] fixed another Matrix constructor --- include/taco/linalg.h | 13 +++++--- src/linalg.cpp | 4 +-- test/tests-linalg.cpp | 72 +++++++++++++++++++++++++++---------------- 3 files changed, 57 insertions(+), 32 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index eb24e4534..6e76800b2 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -33,9 +33,9 @@ class LinalgBase : public LinalgExpr { public: LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec = false); - LinalgBase(std::string name, Type tensorType, bool isColVec = false); - LinalgBase(std::string name, Type tensorType, Format format, bool isColVec = false); - LinalgBase(TensorBase* tensor, bool isColVec = false); + /* LinalgBase(std::string name, Type tensorType, bool isColVec = false); */ + /* LinalgBase(std::string name, Type tensorType, Format format, bool isColVec = false); */ + /* LinalgBase(TensorBase* tensor, bool isColVec = false); */ /// [LINALG NOTATION] LinalgAssignment operator=(const LinalgExpr &expr); @@ -68,6 +68,7 @@ class Matrix : public LinalgBase { Matrix(std::string name, size_t dim1, size_t dim2); Matrix(std::string name, std::vector dimensions); + /* Matrix(std::string name, std::initializer_list dimensions); */ Matrix(std::string name, size_t dim1, size_t dim2, Format format); @@ -104,7 +105,10 @@ template Matrix::Matrix(std::string name) : LinalgBase(name, Type(type(), {42, 42})) {} template -Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, Type(type(), dimensions)) {} +/* Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, Type(type(), dimensions)) {} */ +Matrix::Matrix(std::string name, std::vector dimensions) : +/* Matrix::Matrix(std::string name, std::initializer_list dimensions) : */ + LinalgBase(name, Type(type(), Shape(std::vector(dimensions.begin(), dimensions.end()))), type(), std::vector(dimensions.begin(), dimensions.end()), Format({dense,dense})) {} template Matrix::Matrix(std::string name, size_t dim1, size_t dim2) : LinalgBase(name, Type(type(), {dim1, dim2})) {} @@ -117,6 +121,7 @@ template Matrix::Matrix(std::string name, std::vector dimensions, Format format) : LinalgBase(name, Type(type(), dimensions), format) {} +/* This is the one in use currently */ template Matrix::Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2) : LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int)dim1, (int)dim2}, Format({format1, format2}), false) {} diff --git a/src/linalg.cpp b/src/linalg.cpp index af59ad00c..f607b3e65 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -8,8 +8,8 @@ using namespace std; namespace taco { -LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : LinalgExpr(TensorVar(name, tensorType), isColVec), name(name), tensorType(tensorType), idxcount(0) { -} +/* LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : LinalgExpr(TensorVar(name, tensorType), isColVec), name(name), tensorType(tensorType), idxcount(0) { */ +/* } */ LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec) : LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index e49fb0d35..c3e03a7e3 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -4,6 +4,25 @@ using namespace taco; +TEST(linalg, matrix_constructors) { + Matrix B("B", {2, 2}); + Matrix C("C", 2, 2, dense, dense); + Matrix A("A", 2, 2, dense, dense); + + B(0,0) = 2; + B(1,1) = 1; + B(0,1) = 2; + C(0,0) = 2; + C(1,1) = 2; + + A = B * C; + + ASSERT_EQ(A.at(0,0), 4); + ASSERT_EQ(A.at(0,1), 4); + ASSERT_EQ(A.at(1,0), 0); + ASSERT_EQ(A.at(1,1), 2); +} + TEST(linalg, matmul_index_expr) { Tensor B("B", {2,2}); Matrix C("C", 2, 2, dense, dense); @@ -89,6 +108,7 @@ TEST(linalg, matmul) { ASSERT_EQ(A.at(1,0), 0); ASSERT_EQ(A.at(1,1), 2); + //TODO: make this better cout << "A(1,1) = " << A(1,1) << endl; double a11 = A(1,1); cout << "a11 = " << a11 << endl; @@ -208,27 +228,27 @@ TEST(linalg, outer_mul) { ASSERT_TRUE(1); } -TEST(linalg, rowvec_transpose) { - Vector b("b", 2, dense, false); - Matrix A("A", 2, 2, dense, dense); - Scalar a("a", true); +/* TEST(linalg, rowvec_transpose) { */ +/* Vector b("b", 2, dense, false); */ +/* Matrix A("A", 2, 2, dense, dense); */ +/* Scalar a("a", true); */ - b(0) = 2; - b(1) = 5; +/* b(0) = 2; */ +/* b(1) = 5; */ - A(0,0) = 1; - A(0,1) = 2; - A(1,1) = 4; +/* A(0,0) = 1; */ +/* A(0,1) = 2; */ +/* A(1,1) = 4; */ - a = transpose(transpose(b) * A * b); +/* a = transpose(transpose(b) * A * b); */ - // Should be 124 - cout << a << endl; +/* // Should be 124 */ +/* cout << a << endl; */ - cout << a.getIndexAssignment(); +/* cout << a.getIndexAssignment(); */ - ASSERT_TRUE(1); -} +/* ASSERT_TRUE(1); */ +/* } */ TEST(linalg, tensorapi) { cout << "--- Beginning of TensorAPI test ---" << endl; @@ -267,18 +287,18 @@ TEST(linalg, tensorapi) { /* cout << a << endl; */ } -TEST(linalg, complex_expr) { - Matrix A("A", 2, 2, dense, dense); - Matrix B("B", 2, 2, dense, dense); - Matrix C("C", 2, 2, dense, dense); - Matrix D("D", 2, 2, dense, dense); - Matrix E("D", 2, 2, dense, dense); +/* TEST(linalg, complex_expr) { */ +/* Matrix A("A", 2, 2, dense, dense); */ +/* Matrix B("B", 2, 2, dense, dense); */ +/* Matrix C("C", 2, 2, dense, dense); */ +/* Matrix D("D", 2, 2, dense, dense); */ +/* Matrix E("D", 2, 2, dense, dense); */ - A = E*elemMul(B+C, D); +/* A = E*elemMul(B+C, D); */ - cout << A << endl; +/* cout << A << endl; */ - cout << A.getIndexAssignment(); +/* cout << A.getIndexAssignment(); */ - ASSERT_TRUE(1); -} +/* ASSERT_TRUE(1); */ +/* } */ From b9ac34f00714a0a526b07872ebcba928f0b915e8 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 14 Dec 2020 22:24:00 -0800 Subject: [PATCH 37/61] fixed another Matrix constructor --- include/taco/linalg.h | 7 ++++--- test/tests-linalg.cpp | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 6e76800b2..c5ecac744 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -104,14 +104,15 @@ class Matrix : public LinalgBase { template Matrix::Matrix(std::string name) : LinalgBase(name, Type(type(), {42, 42})) {} +// Works template -/* Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, Type(type(), dimensions)) {} */ Matrix::Matrix(std::string name, std::vector dimensions) : -/* Matrix::Matrix(std::string name, std::initializer_list dimensions) : */ LinalgBase(name, Type(type(), Shape(std::vector(dimensions.begin(), dimensions.end()))), type(), std::vector(dimensions.begin(), dimensions.end()), Format({dense,dense})) {} +// Works template -Matrix::Matrix(std::string name, size_t dim1, size_t dim2) : LinalgBase(name, Type(type(), {dim1, dim2})) {} +Matrix::Matrix(std::string name, size_t dim1, size_t dim2) : + LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int) dim1, (int) dim2}, Format({dense,dense})) {} template Matrix::Matrix(std::string name, size_t dim1, size_t dim2, Format format) : diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index c3e03a7e3..2bf0ac374 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -6,6 +6,7 @@ using namespace taco; TEST(linalg, matrix_constructors) { Matrix B("B", {2, 2}); + Matrix D("D", 2, 2); Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); From 15a41d5836df315e3568556b41412166a7c8fbf1 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 14 Dec 2020 22:35:51 -0800 Subject: [PATCH 38/61] fixed two more Matrix constructors --- include/taco/linalg.h | 6 ++++-- test/tests-linalg.cpp | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index c5ecac744..8e4900451 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -114,13 +114,15 @@ template Matrix::Matrix(std::string name, size_t dim1, size_t dim2) : LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int) dim1, (int) dim2}, Format({dense,dense})) {} +// Works template Matrix::Matrix(std::string name, size_t dim1, size_t dim2, Format format) : - LinalgBase(name, Type(type(), {dim1, dim2}), format) {} + LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int) dim1, (int) dim2}, format) {} +// Works template Matrix::Matrix(std::string name, std::vector dimensions, Format format) : - LinalgBase(name, Type(type(), dimensions), format) {} + LinalgBase(name, Type(type(), Shape(std::vector(dimensions.begin(), dimensions.end()))), type(), std::vector(dimensions.begin(), dimensions.end()), format) {} /* This is the one in use currently */ template diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 2bf0ac374..137774818 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -7,6 +7,8 @@ using namespace taco; TEST(linalg, matrix_constructors) { Matrix B("B", {2, 2}); Matrix D("D", 2, 2); + Matrix E("E", 2, 2, {dense, dense}); + Matrix F("F", {2, 2}, {dense, dense}); Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); From 1f4f5747a0db0f618bf566faefc9e0ff59789747 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Mon, 14 Dec 2020 23:02:14 -0800 Subject: [PATCH 39/61] fixed simplest Matrix constructor and commented out two Matrix constructors I think we should remove --- include/taco/linalg.h | 20 ++++++++++++-------- test/tests-linalg.cpp | 15 +-------------- 2 files changed, 13 insertions(+), 22 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 8e4900451..33ad83b46 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -76,9 +76,10 @@ class Matrix : public LinalgBase { Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2); - Matrix(std::string name, Type tensorType); + //TODO: are these really necessary? + /* Matrix(std::string name, Type tensorType); */ - Matrix(std::string name, Type tensorType, Format format); + /* Matrix(std::string name, Type tensorType, Format format); */ LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); @@ -102,7 +103,8 @@ class Matrix : public LinalgBase { // ------------------------------------------------------------ template -Matrix::Matrix(std::string name) : LinalgBase(name, Type(type(), {42, 42})) {} +Matrix::Matrix(std::string name) : + LinalgBase(name, Type(type(), {42, 42}), type(), {42, 42}, Format({dense, dense})) {} // Works template @@ -129,11 +131,12 @@ template Matrix::Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2) : LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int)dim1, (int)dim2}, Format({format1, format2}), false) {} -template -Matrix::Matrix(std::string name, Type tensorType) : LinalgBase(name, tensorType) {} +// TODO: do we really need these? +/* template */ +/* Matrix::Matrix(std::string name, Type tensorType) : LinalgBase(name, tensorType) {} */ -template -Matrix::Matrix(std::string name, Type tensorType, Format format) : LinalgBase(name, tensorType, format) {} +/* template */ +/* Matrix::Matrix(std::string name, Type tensorType, Format format) : LinalgBase(name, tensorType, format) {} */ // Definition of Read methods template @@ -207,7 +210,8 @@ class Vector : public LinalgBase { // ------------------------------------------------------------ template -Vector::Vector(std::string name, bool isColVec) : LinalgBase(name, Type(type(), {42}), isColVec) {} +Vector::Vector(std::string name, bool isColVec) : + LinalgBase(name, Type(type(), {42}), isColVec) {} template Vector::Vector(std::string name, size_t dim, bool isColVec) : LinalgBase(name, Type(type(), {dim}), diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 137774818..79d06c721 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -5,25 +5,12 @@ using namespace taco; TEST(linalg, matrix_constructors) { + Matrix A("A"); Matrix B("B", {2, 2}); Matrix D("D", 2, 2); Matrix E("E", 2, 2, {dense, dense}); Matrix F("F", {2, 2}, {dense, dense}); Matrix C("C", 2, 2, dense, dense); - Matrix A("A", 2, 2, dense, dense); - - B(0,0) = 2; - B(1,1) = 1; - B(0,1) = 2; - C(0,0) = 2; - C(1,1) = 2; - - A = B * C; - - ASSERT_EQ(A.at(0,0), 4); - ASSERT_EQ(A.at(0,1), 4); - ASSERT_EQ(A.at(1,0), 0); - ASSERT_EQ(A.at(1,1), 2); } TEST(linalg, matmul_index_expr) { From f6fe3437b843f3902fbc1b8e541df78f18b2ff71 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Tue, 15 Dec 2020 11:03:17 -0500 Subject: [PATCH 40/61] Fix minor mistake in test --- test/tests-linalg.cpp | 88 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 87 insertions(+), 1 deletion(-) diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index e49fb0d35..843404e1f 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -209,7 +209,7 @@ TEST(linalg, outer_mul) { } TEST(linalg, rowvec_transpose) { - Vector b("b", 2, dense, false); + Vector b("b", 2, dense, true); Matrix A("A", 2, 2, dense, dense); Scalar a("a", true); @@ -282,3 +282,89 @@ TEST(linalg, complex_expr) { ASSERT_TRUE(1); } + +TEST(linalg, blocking_matmul) { + Matrix B("B", 4, 4, sparse, dense, 2); + Matrix C("C", 4, 4, dense, sparse, 2); + Matrix A("A", 4, 4, dense, dense, 2); + + cout << "--- Before inserting ---" << endl; + B.insert(0,0,0, 0,2); + B.insert(1,1, 0,0,1); + B.insert(0,1, 0,0,2); + + C.insert(0,0,2); + C.insert(1,1,2); + cout << "--- After inserting ---" << endl; + cout << "B: " << B << endl; + cout << "C: " << C << endl; + cout << "--- Before Expression ---" << endl; + A = transpose(B * C); + cout << "--- After Expression ---" << endl; + + cout << "--- Before At ---" << endl; + auto vec = B.atBlock(0,0); + for (auto it = 0; it < vec.size(); ++it) + cout << "B(0,0): " << vec.at(it) << endl; + vec = A.atBlock(0,0); + for (auto it = 0; it < vec.size(); ++it) + cout << "B(0,0): " << vec.at(it) << endl; + cout << "--- After At ---" << endl; + + cout << "--- before cout of a ---" << endl; + cout << A << endl; + cout << "--- after cout of a ---" << endl; + + cout << "--- Before getIndexAssignment on A ---" << endl; + cout << A.getIndexAssignment() << endl; + cout << "--- After getIndexAssignment on A ---" << endl; +// +// ASSERT_EQ(A.at(0,0), 4); +// ASSERT_EQ(A.at(0,1), 4); +// ASSERT_EQ(A.at(1,0), 0); + ASSERT_TRUE(1); +} + +TEST(linalg, blocking_matvec) { + Matrix B("B", 4, 4, sparse, dense, 2); + Vector c("c", 4, sparse, 2); + Vector a("a", 4, dense, 2); + + cout << "--- Before inserting ---" << endl; + B.insert(0,0,0, 0,2); + B.insert(1,1, 0,0,1); + B.insert(0,1, 0,0,2); + + c.insert(0,0,2); + c.insert(1,1,2); + + cout << "--- After inserting ---" << endl; + cout << "B: " << B << endl; + cout << "C: " << c << endl; + cout << "c(isColVec): " << c.isColVector(); + cout << "--- Before Expression ---" << endl; + a = (transpose(c)*c)*(B*c); + cout << "--- After Expression ---" << endl; + + cout << "--- Before At ---" << endl; + auto vec = B.atBlock(0, 0); + for (auto it = 0; it < vec.size(); ++it) + cout << "B(0): " << vec.at(it) << endl; + vec = a.atBlock(0); + for (auto it = 0; it < vec.size(); ++it) + cout << "B(0): " << vec.at(it) << endl; + cout << "--- After At ---" << endl; + + cout << "--- before cout of a ---" << endl; + cout << a << endl; + cout << "--- after cout of a ---" << endl; + + cout << "--- Before getIndexAssignment on A ---" << endl; + cout << a.getIndexAssignment() << endl; + cout << "--- After getIndexAssignment on A ---" << endl; +// +// ASSERT_EQ(A.at(0,0), 4); +// ASSERT_EQ(A.at(0,1), 4); +// ASSERT_EQ(A.at(1,0), 0); + ASSERT_TRUE(1); +} From 0feaa60e22b6f6ca45a79bda05f5b2980a3ac489 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Tue, 15 Dec 2020 15:25:07 -0500 Subject: [PATCH 41/61] Rename indexVars for linalg compiler and remove blocking tests --- include/taco/linalg.h | 1 + src/linalg.cpp | 19 +++++++--- test/tests-linalg.cpp | 88 +------------------------------------------ 3 files changed, 16 insertions(+), 92 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index eb24e4534..aedae270d 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -24,6 +24,7 @@ class LinalgBase : public LinalgExpr { IndexStmt indexAssignment; int idxcount; + std::vector indexVarNameList = {"i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"}; IndexExpr rewrite(LinalgExpr linalg, std::vector indices); diff --git a/src/linalg.cpp b/src/linalg.cpp index 9b5a9725a..4bc7720a1 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -78,9 +78,18 @@ vector LinalgBase::getUniqueIndices(size_t order) { } IndexVar LinalgBase::getUniqueIndex() { - string name = "i" + to_string(idxcount); + int loc = idxcount % indexVarNameList.size(); + cout << "Locatopm" << loc << endl; + int num = idxcount / indexVarNameList.size(); + + string indexVarName; + if (num == 0) + indexVarName = indexVarNameList.at(loc); + else + indexVarName = indexVarNameList.at(loc) + to_string(num); + idxcount += 1; - IndexVar result(name); + IndexVar result(indexVarName); return result; } @@ -227,10 +236,10 @@ IndexStmt LinalgBase::rewrite() { vector indices = {}; if (tensor.getOrder() == 1) { - indices.push_back(IndexVar("i")); + indices.push_back(getUniqueIndex()); } else if (tensor.getOrder() == 2) { - indices.push_back(IndexVar("i")); - indices.push_back(IndexVar("j")); + indices.push_back(getUniqueIndex()); + indices.push_back(getUniqueIndex()); } Access lhs = Access(tensor, indices); IndexExpr rhs = rewrite(this->assignment.getRhs(), indices); diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 843404e1f..f9d57149c 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -272,7 +272,7 @@ TEST(linalg, complex_expr) { Matrix B("B", 2, 2, dense, dense); Matrix C("C", 2, 2, dense, dense); Matrix D("D", 2, 2, dense, dense); - Matrix E("D", 2, 2, dense, dense); + Matrix E("E", 2, 2, dense, dense); A = E*elemMul(B+C, D); @@ -282,89 +282,3 @@ TEST(linalg, complex_expr) { ASSERT_TRUE(1); } - -TEST(linalg, blocking_matmul) { - Matrix B("B", 4, 4, sparse, dense, 2); - Matrix C("C", 4, 4, dense, sparse, 2); - Matrix A("A", 4, 4, dense, dense, 2); - - cout << "--- Before inserting ---" << endl; - B.insert(0,0,0, 0,2); - B.insert(1,1, 0,0,1); - B.insert(0,1, 0,0,2); - - C.insert(0,0,2); - C.insert(1,1,2); - cout << "--- After inserting ---" << endl; - cout << "B: " << B << endl; - cout << "C: " << C << endl; - cout << "--- Before Expression ---" << endl; - A = transpose(B * C); - cout << "--- After Expression ---" << endl; - - cout << "--- Before At ---" << endl; - auto vec = B.atBlock(0,0); - for (auto it = 0; it < vec.size(); ++it) - cout << "B(0,0): " << vec.at(it) << endl; - vec = A.atBlock(0,0); - for (auto it = 0; it < vec.size(); ++it) - cout << "B(0,0): " << vec.at(it) << endl; - cout << "--- After At ---" << endl; - - cout << "--- before cout of a ---" << endl; - cout << A << endl; - cout << "--- after cout of a ---" << endl; - - cout << "--- Before getIndexAssignment on A ---" << endl; - cout << A.getIndexAssignment() << endl; - cout << "--- After getIndexAssignment on A ---" << endl; -// -// ASSERT_EQ(A.at(0,0), 4); -// ASSERT_EQ(A.at(0,1), 4); -// ASSERT_EQ(A.at(1,0), 0); - ASSERT_TRUE(1); -} - -TEST(linalg, blocking_matvec) { - Matrix B("B", 4, 4, sparse, dense, 2); - Vector c("c", 4, sparse, 2); - Vector a("a", 4, dense, 2); - - cout << "--- Before inserting ---" << endl; - B.insert(0,0,0, 0,2); - B.insert(1,1, 0,0,1); - B.insert(0,1, 0,0,2); - - c.insert(0,0,2); - c.insert(1,1,2); - - cout << "--- After inserting ---" << endl; - cout << "B: " << B << endl; - cout << "C: " << c << endl; - cout << "c(isColVec): " << c.isColVector(); - cout << "--- Before Expression ---" << endl; - a = (transpose(c)*c)*(B*c); - cout << "--- After Expression ---" << endl; - - cout << "--- Before At ---" << endl; - auto vec = B.atBlock(0, 0); - for (auto it = 0; it < vec.size(); ++it) - cout << "B(0): " << vec.at(it) << endl; - vec = a.atBlock(0); - for (auto it = 0; it < vec.size(); ++it) - cout << "B(0): " << vec.at(it) << endl; - cout << "--- After At ---" << endl; - - cout << "--- before cout of a ---" << endl; - cout << a << endl; - cout << "--- after cout of a ---" << endl; - - cout << "--- Before getIndexAssignment on A ---" << endl; - cout << a.getIndexAssignment() << endl; - cout << "--- After getIndexAssignment on A ---" << endl; -// -// ASSERT_EQ(A.at(0,0), 4); -// ASSERT_EQ(A.at(0,1), 4); -// ASSERT_EQ(A.at(1,0), 0); - ASSERT_TRUE(1); -} From 1ed47a402d5347d1153bd46f2cce3439bf06474f Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 15 Dec 2020 16:05:13 -0800 Subject: [PATCH 42/61] remove unwanted constructors + remove optional colVec arg from simplest Vector constructor --- include/taco/linalg.h | 43 ++++++------------------------------------- 1 file changed, 6 insertions(+), 37 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 33ad83b46..b3e99a1d0 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -68,7 +68,6 @@ class Matrix : public LinalgBase { Matrix(std::string name, size_t dim1, size_t dim2); Matrix(std::string name, std::vector dimensions); - /* Matrix(std::string name, std::initializer_list dimensions); */ Matrix(std::string name, size_t dim1, size_t dim2, Format format); @@ -76,11 +75,6 @@ class Matrix : public LinalgBase { Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2); - //TODO: are these really necessary? - /* Matrix(std::string name, Type tensorType); */ - - /* Matrix(std::string name, Type tensorType, Format format); */ - LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); } @@ -106,38 +100,26 @@ template Matrix::Matrix(std::string name) : LinalgBase(name, Type(type(), {42, 42}), type(), {42, 42}, Format({dense, dense})) {} -// Works template Matrix::Matrix(std::string name, std::vector dimensions) : LinalgBase(name, Type(type(), Shape(std::vector(dimensions.begin(), dimensions.end()))), type(), std::vector(dimensions.begin(), dimensions.end()), Format({dense,dense})) {} -// Works template Matrix::Matrix(std::string name, size_t dim1, size_t dim2) : LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int) dim1, (int) dim2}, Format({dense,dense})) {} -// Works template Matrix::Matrix(std::string name, size_t dim1, size_t dim2, Format format) : LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int) dim1, (int) dim2}, format) {} -// Works template Matrix::Matrix(std::string name, std::vector dimensions, Format format) : LinalgBase(name, Type(type(), Shape(std::vector(dimensions.begin(), dimensions.end()))), type(), std::vector(dimensions.begin(), dimensions.end()), format) {} -/* This is the one in use currently */ template Matrix::Matrix(std::string name, size_t dim1, size_t dim2, ModeFormat format1, ModeFormat format2) : LinalgBase(name, Type(type(), {dim1, dim2}), type(), {(int)dim1, (int)dim2}, Format({format1, format2}), false) {} -// TODO: do we really need these? -/* template */ -/* Matrix::Matrix(std::string name, Type tensorType) : LinalgBase(name, tensorType) {} */ - -/* template */ -/* Matrix::Matrix(std::string name, Type tensorType, Format format) : LinalgBase(name, tensorType, format) {} */ - // Definition of Read methods template CType Matrix::at(int coord_x, int coord_y) { @@ -176,18 +158,14 @@ class Vector : public LinalgBase { std::string name; Datatype ctype; public: - explicit Vector(std::string name, bool isColVec = true); + explicit Vector(std::string name); - Vector(std::string name, size_t dim, bool isColVec = true); + Vector(std::string name, int dim, bool isColVec = true); Vector(std::string name, size_t dim, Format format, bool isColVec = true); Vector(std::string name, size_t dim, ModeFormat format, bool isColVec = true); - Vector(std::string name, Type type, Format format, bool isColVec = true); - - Vector(std::string name, Type type, ModeFormat format, bool isColVec = true); - LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); } @@ -210,12 +188,12 @@ class Vector : public LinalgBase { // ------------------------------------------------------------ template -Vector::Vector(std::string name, bool isColVec) : - LinalgBase(name, Type(type(), {42}), isColVec) {} +Vector::Vector(std::string name) : + LinalgBase(name, Type(type(), {42}), type(), {42}, Format({dense}), true) {} template -Vector::Vector(std::string name, size_t dim, bool isColVec) : LinalgBase(name, Type(type(), {dim}), - isColVec) {} +Vector::Vector(std::string name, int dim, bool isColVec) : + LinalgBase(name, Type(type(), {(size_t)dim}), type(), {(int)dim}, Format({dense}), isColVec) {} template Vector::Vector(std::string name, size_t dim, Format format, bool isColVec) : LinalgBase(name, @@ -228,15 +206,6 @@ template Vector::Vector(std::string name, size_t dim, ModeFormat format, bool isColVec) : LinalgBase(name, Type(type(), {dim}), type(), {(int)dim}, Format(format), isColVec) {} -template -Vector::Vector(std::string name, Type type, Format format, bool isColVec) : - LinalgBase(name, type, format, isColVec) {} - -template -Vector::Vector(std::string name, Type type, ModeFormat format, bool isColVec) : - LinalgBase(name, type, Format(format), isColVec) {} - - // Vector write methods template void Vector::insert(int coord, CType value) { From 238cbbf746762b4ca5c3603a67849621a709f2c6 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 15 Dec 2020 16:05:26 -0800 Subject: [PATCH 43/61] rename tensorbase test --- test/tests-linalg.cpp | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 79d06c721..a4cf5f105 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -7,10 +7,15 @@ using namespace taco; TEST(linalg, matrix_constructors) { Matrix A("A"); Matrix B("B", {2, 2}); + Matrix C("C", 2, 2, dense, dense); Matrix D("D", 2, 2); Matrix E("E", 2, 2, {dense, dense}); Matrix F("F", {2, 2}, {dense, dense}); - Matrix C("C", 2, 2, dense, dense); + + Vector a("a"); + Vector b("b", 2, false); + Vector c("c", 2, dense); + Vector d("d", 2, {dense}); } TEST(linalg, matmul_index_expr) { @@ -104,7 +109,7 @@ TEST(linalg, matmul) { cout << "a11 = " << a11 << endl; } -TEST(linalg, tensorbase) { +TEST(linalg, matmat_add) { Matrix B("B", 2, 2, dense, dense); Matrix C("C", 2, 2, dense, dense); Matrix A("A", 2, 2, dense, dense); @@ -117,11 +122,6 @@ TEST(linalg, tensorbase) { A = B + C; - // Should be [1,2,3,4] - cout << A << endl; - - cout << A.getIndexAssignment(); - ASSERT_EQ(A.at(0,0), 1); ASSERT_EQ(A.at(0,1), 2); ASSERT_EQ(A.at(1,0), 3); @@ -277,18 +277,19 @@ TEST(linalg, tensorapi) { /* cout << a << endl; */ } -/* TEST(linalg, complex_expr) { */ -/* Matrix A("A", 2, 2, dense, dense); */ -/* Matrix B("B", 2, 2, dense, dense); */ -/* Matrix C("C", 2, 2, dense, dense); */ -/* Matrix D("D", 2, 2, dense, dense); */ -/* Matrix E("D", 2, 2, dense, dense); */ +TEST(linalg, complex_expr) { + Matrix A("A", 2, 2, dense, dense); + Matrix B("B", 2, 2, dense, dense); + Matrix C("C", 2, 2, dense, dense); + Matrix D("D", 2, 2, dense, dense); + Matrix E("D", 2, 2, dense, dense); /* A = E*elemMul(B+C, D); */ + A = elemMul(B+C, D); -/* cout << A << endl; */ + cout << A << endl; -/* cout << A.getIndexAssignment(); */ + cout << A.getIndexAssignment(); -/* ASSERT_TRUE(1); */ -/* } */ + ASSERT_TRUE(1); +} From e90083a972d3f0074c8d79be454ec925dd76aa51 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 15 Dec 2020 16:06:10 -0800 Subject: [PATCH 44/61] remove tensorapi test --- test/tests-linalg.cpp | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index a4cf5f105..a0dc3c083 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -240,43 +240,6 @@ TEST(linalg, outer_mul) { /* ASSERT_TRUE(1); */ /* } */ -TEST(linalg, tensorapi) { - cout << "--- Beginning of TensorAPI test ---" << endl; - Tensor a({2,2}, dense); - Tensor b({2,3}, dense); - Tensor c({3,2}, dense); - - cout << "--- Initialized Tensors ---" << endl; - - b(0,0) = 2; - b(1,1) = 1; - b(0,1) = 2; - - cout << "--- Initializing c ---" << endl; - - c(0,0) = 2; - c(1,1) = 2; - - cout << "--- Declaring IndexVars ---" << endl; - - IndexVar i,j,k; - - // The original - /* a(i,j) = b(i,k) * c(k,j); */ - - // The broken-up version - cout << "--- Creating operand IndexExprs ---" << endl; - - IndexExpr tc = c(k,j); - IndexExpr tb = b(i,k); - - cout << "Pre-assignment" << endl; - a(i,j) = tb * tc; - cout << "Post-assignment" << endl; - - /* cout << a << endl; */ -} - TEST(linalg, complex_expr) { Matrix A("A", 2, 2, dense, dense); Matrix B("B", 2, 2, dense, dense); From f07081189fffc721c599c238e982cae218712283 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Tue, 15 Dec 2020 17:37:45 -0800 Subject: [PATCH 45/61] removing some cruft --- include/taco/linalg.h | 6 ------ src/linalg.cpp | 18 ------------------ 2 files changed, 24 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index b3e99a1d0..cef60d57d 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -17,9 +17,6 @@ class LinalgBase : public LinalgExpr { std::string name; Type tensorType; - // The associated tensor - TensorBase *tbase; - LinalgAssignment assignment; IndexStmt indexAssignment; @@ -33,9 +30,6 @@ class LinalgBase : public LinalgExpr { public: LinalgBase(std::string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec = false); - /* LinalgBase(std::string name, Type tensorType, bool isColVec = false); */ - /* LinalgBase(std::string name, Type tensorType, Format format, bool isColVec = false); */ - /* LinalgBase(TensorBase* tensor, bool isColVec = false); */ /// [LINALG NOTATION] LinalgAssignment operator=(const LinalgExpr &expr); diff --git a/src/linalg.cpp b/src/linalg.cpp index f607b3e65..bb23a2311 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -8,27 +8,11 @@ using namespace std; namespace taco { -/* LinalgBase::LinalgBase(string name, Type tensorType, bool isColVec) : LinalgExpr(TensorVar(name, tensorType), isColVec), name(name), tensorType(tensorType), idxcount(0) { */ -/* } */ - LinalgBase::LinalgBase(string name, Type tensorType, Datatype dtype, std::vector dims, Format format, bool isColVec) : LinalgExpr(TensorVar(name, tensorType, format), isColVec, new TensorBase(name, dtype, dims, format)), name(name), tensorType(tensorType), idxcount(0) { } -//TODO: remove this entirely -/* LinalgBase::LinalgBase(TensorBase* tbase, bool isColVec) : */ -/* LinalgExpr(tbase, isColVec), name(tbase->getName()), */ -/* tensorType(tbase->getTensorVar().getType()), idxcount(0) { */ -/* // Checking if this is used */ -/* cout << "!!!!!! LinalgBase::LinalgBase with tbase arg used" << endl; */ -/* } */ - -/* LinalgBase::LinalgBase(string name, Type tensorType, Format format, bool isColVec) : name(name), tensorType(tensorType), */ -/* idxcount(0), LinalgExpr(TensorVar(name, tensorType, format), isColVec) { */ -/* } */ - - LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { taco_iassert(isa(this->ptr)); TensorVar var = to(this->get())->tensorVar; @@ -42,9 +26,7 @@ LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { LinalgAssignment assignment = LinalgAssignment(var, expr); this->assignment = assignment; - cout << "rewrite here" << endl; this->rewrite(); - cout << "end rewrite" << endl; return assignment; } From e9b2cb5df7fc6323f64f20beb65677cbb648a798 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Wed, 16 Dec 2020 14:02:01 -0500 Subject: [PATCH 46/61] Change rewrite to use LinalgNotationRewriter --- include/taco/linalg.h | 2 +- .../taco/linalg_notation/linalg_rewriter.h | 75 +++++ src/linalg.cpp | 264 +++++++++--------- src/linalg_notation/linalg_rewriter.cpp | 225 +++++++++++++++ 4 files changed, 436 insertions(+), 130 deletions(-) create mode 100644 include/taco/linalg_notation/linalg_rewriter.h create mode 100644 src/linalg_notation/linalg_rewriter.cpp diff --git a/include/taco/linalg.h b/include/taco/linalg.h index aedae270d..1a9196b1d 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -23,7 +23,7 @@ class LinalgBase : public LinalgExpr { LinalgAssignment assignment; IndexStmt indexAssignment; - int idxcount; + int idxcount = 0; std::vector indexVarNameList = {"i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"}; IndexExpr rewrite(LinalgExpr linalg, std::vector indices); diff --git a/include/taco/linalg_notation/linalg_rewriter.h b/include/taco/linalg_notation/linalg_rewriter.h new file mode 100644 index 000000000..7d5d30f57 --- /dev/null +++ b/include/taco/linalg_notation/linalg_rewriter.h @@ -0,0 +1,75 @@ +#ifndef TACO_LINALG_REWRITER_H +#define TACO_LINALG_REWRITER_H + +#include +#include +#include +#include +#include + +#include "taco/lower/iterator.h" +#include "taco/util/scopedset.h" +#include "taco/util/uncopyable.h" +#include "taco/ir_tags.h" + +namespace taco { + +class TensorVar; + +class IndexVar; + +class IndexExpr; + +class LinalgBase; + +class LinalgRewriter : public util::Uncopyable { +public: + LinalgRewriter(); + + virtual ~LinalgRewriter() = default; + + /// Lower an index statement to an IR function. + IndexExpr rewrite(LinalgBase linalgBase); + +// void setLiveIndices(std::vector indices); +protected: + + virtual IndexExpr rewriteSub(const LinalgSubNode* sub); + + virtual IndexExpr rewriteAdd(const LinalgAddNode* add); + + virtual IndexExpr rewriteElemMul(const LinalgElemMulNode* elemMul); + + virtual IndexExpr rewriteMatMul(const LinalgMatMulNode* matMul); + + virtual IndexExpr rewriteDiv(const LinalgDivNode* div); + + virtual IndexExpr rewriteNeg(const LinalgNegNode* neg); + + virtual IndexExpr rewriteTranspose(const LinalgTransposeNode* transpose); + + virtual IndexExpr rewriteLiteral(const LinalgLiteralNode* literal); + + virtual IndexExpr rewriteVar(const LinalgVarNode* var); + + virtual IndexExpr rewriteTensorBase(const LinalgTensorBaseNode* node); + + virtual IndexStmt rewriteAssignment(const LinalgAssignmentNode* node); + + IndexExpr rewrite(LinalgExpr linalgExpr); + +private: + std::vector liveIndices; + + int idxcount; + std::vector indexVarNameList = {"i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"}; + + IndexVar getUniqueIndex(); + + class Visitor; + friend class Visitor; + std::shared_ptr visitor; +}; + +} // namespace taco +#endif //TACO_LINALG_REWRITER_H diff --git a/src/linalg.cpp b/src/linalg.cpp index 4bc7720a1..293f9f67e 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -3,6 +3,7 @@ #include "taco/index_notation/index_notation.h" #include "taco/index_notation/index_notation_nodes.h" #include "taco/linalg_notation/linalg_notation_nodes.h" +#include "taco/linalg_notation/linalg_rewriter.h" using namespace std; @@ -94,134 +95,134 @@ IndexVar LinalgBase::getUniqueIndex() { } IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { - if (isa(linalg.get())) { - auto sub = to(linalg.get()); - IndexExpr indexA = rewrite(sub->a, indices); - IndexExpr indexB = rewrite(sub->b, indices); - return new SubNode(indexA, indexB); - } else if (isa(linalg.get())) { - auto add = to(linalg.get()); - IndexExpr indexA = rewrite(add->a, indices); - IndexExpr indexB = rewrite(add->b, indices); - return new AddNode(indexA, indexB); - } else if (isa(linalg.get())) { - auto mul = to(linalg.get()); - IndexExpr indexA = rewrite(mul->a, indices); - IndexExpr indexB = rewrite(mul->b, indices); - return new MulNode(indexA, indexB); - } else if (isa(linalg.get())) { - auto mul = to(linalg.get()); - IndexVar index = getUniqueIndex(); - vector indicesA; - vector indicesB; - if (mul->a.getOrder() == 2 && mul->b.getOrder() == 2) { - indicesA = {indices[0], index}; - indicesB = {index, indices[1]}; - } - else if (mul->a.getOrder() == 1 && mul->b.getOrder() == 2) { - indicesA = {index}; - indicesB = {index, indices[0]}; - } - else if (mul->a.getOrder() == 2 && mul->b.getOrder() == 1) { - indicesA = {indices[0], index}; - indicesB = {index}; - } - else if (mul->a.getOrder() == 1 && mul->a.isColVector() && mul->b.getOrder() == 1) { - indicesA = {indices[0]}; - indicesB = {indices[1]}; - } else if (mul->a.getOrder() == 0) { - indicesA = {}; - indicesB = indices; - } else if (mul->b.getOrder() == 0) { - indicesA = indices; - indicesB = {}; - } else { - indicesA = {index}; - indicesB = {index}; - } - IndexExpr indexA = rewrite(mul->a, indicesA); - IndexExpr indexB = rewrite(mul->b, indicesB); - return new MulNode(indexA, indexB); - } else if (isa(linalg.get())) { - auto div = to(linalg.get()); - IndexExpr indexA = rewrite(div->a, indices); - IndexExpr indexB = rewrite(div->b, indices); - return new DivNode(indexA, indexB); - } else if (isa(linalg.get())) { - auto neg = to(linalg.get()); - IndexExpr index = rewrite(neg->a, indices); - return new NegNode(index); - } else if (isa(linalg.get())) { - auto transpose = to(linalg.get()); - if (transpose->a.getOrder() == 2) { - return rewrite(transpose->a, {indices[1], indices[0]}); - } - else if (transpose->a.getOrder() == 1) { - return rewrite(transpose->a, {indices[0]}); - } - return rewrite(transpose->a, {}); - } else if (isa(linalg.get())) { - auto lit = to(linalg.get()); - - LiteralNode* value; - switch (lit->getDataType().getKind()) { - case Datatype::Bool: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::UInt8: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::UInt16: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::UInt32: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::UInt64: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::UInt128: - taco_not_supported_yet; - break; - case Datatype::Int8: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::Int16: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::Int32: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::Int64: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::Int128: - taco_not_supported_yet; - break; - case Datatype::Float32: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::Float64: - value = new LiteralNode(lit->getVal()); - break; - case Datatype::Complex64: - value = new LiteralNode(lit->getVal>()); - break; - case Datatype::Complex128: - value = new LiteralNode(lit->getVal>()); - break; - case Datatype::Undefined: - taco_uerror << "unsupported Datatype"; - break; - } - return value; - } else if (isa(linalg.get())) { - auto var = to(linalg.get()); - return new AccessNode(var->tensorVar, indices); - } else if (isa(linalg.get())) { - /* cout << "LinalgBase::rewrite -- got a tensorbasenode " << linalg.tensorBase->getName() << endl; */ - return linalg.tensorBase->operator()(indices); - } +// if (isa(linalg.get())) { +// auto sub = to(linalg.get()); +// IndexExpr indexA = rewrite(sub->a, indices); +// IndexExpr indexB = rewrite(sub->b, indices); +// return new SubNode(indexA, indexB); +// } else if (isa(linalg.get())) { +// auto add = to(linalg.get()); +// IndexExpr indexA = rewrite(add->a, indices); +// IndexExpr indexB = rewrite(add->b, indices); +// return new AddNode(indexA, indexB); +// } else if (isa(linalg.get())) { +// auto mul = to(linalg.get()); +// IndexExpr indexA = rewrite(mul->a, indices); +// IndexExpr indexB = rewrite(mul->b, indices); +// return new MulNode(indexA, indexB); +// } else if (isa(linalg.get())) { +// auto mul = to(linalg.get()); +// IndexVar index = getUniqueIndex(); +// vector indicesA; +// vector indicesB; +// if (mul->a.getOrder() == 2 && mul->b.getOrder() == 2) { +// indicesA = {indices[0], index}; +// indicesB = {index, indices[1]}; +// } +// else if (mul->a.getOrder() == 1 && mul->b.getOrder() == 2) { +// indicesA = {index}; +// indicesB = {index, indices[0]}; +// } +// else if (mul->a.getOrder() == 2 && mul->b.getOrder() == 1) { +// indicesA = {indices[0], index}; +// indicesB = {index}; +// } +// else if (mul->a.getOrder() == 1 && mul->a.isColVector() && mul->b.getOrder() == 1) { +// indicesA = {indices[0]}; +// indicesB = {indices[1]}; +// } else if (mul->a.getOrder() == 0) { +// indicesA = {}; +// indicesB = indices; +// } else if (mul->b.getOrder() == 0) { +// indicesA = indices; +// indicesB = {}; +// } else { +// indicesA = {index}; +// indicesB = {index}; +// } +// IndexExpr indexA = rewrite(mul->a, indicesA); +// IndexExpr indexB = rewrite(mul->b, indicesB); +// return new MulNode(indexA, indexB); +// } else if (isa(linalg.get())) { +// auto div = to(linalg.get()); +// IndexExpr indexA = rewrite(div->a, indices); +// IndexExpr indexB = rewrite(div->b, indices); +// return new DivNode(indexA, indexB); +// } else if (isa(linalg.get())) { +// auto neg = to(linalg.get()); +// IndexExpr index = rewrite(neg->a, indices); +// return new NegNode(index); +// } else if (isa(linalg.get())) { +// auto transpose = to(linalg.get()); +// if (transpose->a.getOrder() == 2) { +// return rewrite(transpose->a, {indices[1], indices[0]}); +// } +// else if (transpose->a.getOrder() == 1) { +// return rewrite(transpose->a, {indices[0]}); +// } +// return rewrite(transpose->a, {}); +// } else if (isa(linalg.get())) { +// auto lit = to(linalg.get()); +// +// LiteralNode* value; +// switch (lit->getDataType().getKind()) { +// case Datatype::Bool: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::UInt8: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::UInt16: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::UInt32: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::UInt64: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::UInt128: +// taco_not_supported_yet; +// break; +// case Datatype::Int8: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::Int16: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::Int32: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::Int64: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::Int128: +// taco_not_supported_yet; +// break; +// case Datatype::Float32: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::Float64: +// value = new LiteralNode(lit->getVal()); +// break; +// case Datatype::Complex64: +// value = new LiteralNode(lit->getVal>()); +// break; +// case Datatype::Complex128: +// value = new LiteralNode(lit->getVal>()); +// break; +// case Datatype::Undefined: +// taco_uerror << "unsupported Datatype"; +// break; +// } +// return value; +// } else if (isa(linalg.get())) { +// auto var = to(linalg.get()); +// return new AccessNode(var->tensorVar, indices); +// } else if (isa(linalg.get())) { +// /* cout << "LinalgBase::rewrite -- got a tensorbasenode " << linalg.tensorBase->getName() << endl; */ +// return linalg.tensorBase->operator()(indices); +// } return IndexExpr(); } @@ -241,8 +242,13 @@ IndexStmt LinalgBase::rewrite() { indices.push_back(getUniqueIndex()); indices.push_back(getUniqueIndex()); } + + Access lhs = Access(tensor, indices); - IndexExpr rhs = rewrite(this->assignment.getRhs(), indices); + + auto linalgRewriter = new LinalgRewriter(); + //linalgRewriter->setLiveIndices(indices); + IndexExpr rhs = linalgRewriter->rewrite(*this); cout << "rhs done here" << endl; if(this->tensorBase != nullptr) { diff --git a/src/linalg_notation/linalg_rewriter.cpp b/src/linalg_notation/linalg_rewriter.cpp new file mode 100644 index 000000000..568ff74f3 --- /dev/null +++ b/src/linalg_notation/linalg_rewriter.cpp @@ -0,0 +1,225 @@ +#include "taco/linalg_notation/linalg_rewriter.h" + +#include "taco/linalg_notation/linalg_notation_nodes.h" +#include "taco/index_notation/index_notation_nodes.h" + +using namespace std; +using namespace taco; + +class LinalgRewriter::Visitor : public LinalgNotationVisitorStrict { +public: + Visitor(LinalgRewriter* rewriter ) : rewriter(rewriter) {} + IndexExpr rewrite(LinalgExpr linalgExpr) { + this->expr = IndexExpr(); + LinalgNotationVisitorStrict::visit(linalgExpr); + return this->expr; + } + IndexStmt rewrite(LinalgStmt linalgStmt) { + this->stmt = IndexStmt(); + LinalgNotationVisitorStrict::visit(linalgStmt); + return this->stmt; + } +private: + LinalgRewriter* rewriter; + IndexExpr expr; + IndexStmt stmt; + using LinalgNotationVisitorStrict::visit; + void visit(const LinalgSubNode* node) { expr = rewriter->rewriteSub(node); } + void visit(const LinalgAddNode* node) { expr = rewriter->rewriteAdd(node); } + void visit(const LinalgElemMulNode* node) { expr = rewriter->rewriteElemMul(node); } + void visit(const LinalgMatMulNode* node) { expr = rewriter->rewriteMatMul(node); } + void visit(const LinalgDivNode* node) { expr = rewriter->rewriteDiv(node); } + void visit(const LinalgNegNode* node) { expr = rewriter->rewriteNeg(node); } + void visit(const LinalgTransposeNode* node) { expr = rewriter->rewriteTranspose(node); } + void visit(const LinalgLiteralNode* node) { expr = rewriter->rewriteLiteral(node); } + void visit(const LinalgVarNode* node) { expr = rewriter->rewriteVar(node); } + void visit(const LinalgTensorBaseNode* node) { expr = rewriter->rewriteTensorBase(node); } + void visit(const LinalgAssignmentNode* node) { stmt = rewriter->rewriteAssignment(node); } + +}; + +LinalgRewriter::LinalgRewriter() : visitor(new Visitor(this)) { +} + +IndexExpr LinalgRewriter::rewriteSub(const LinalgSubNode* sub) { + IndexExpr indexA = rewrite(sub->a); + IndexExpr indexB = rewrite(sub->b); + return new SubNode(indexA, indexB); +} + +IndexExpr LinalgRewriter::rewriteAdd(const LinalgAddNode* add) { + IndexExpr indexA = rewrite(add->a); + IndexExpr indexB = rewrite(add->b); + return new AddNode(indexA, indexB); +} + +IndexExpr LinalgRewriter::rewriteElemMul(const LinalgElemMulNode* elemMul) { + IndexExpr indexA = rewrite(elemMul->a); + IndexExpr indexB = rewrite(elemMul->b); + return new MulNode(indexA, indexB); +} + +IndexExpr LinalgRewriter::rewriteMatMul(const LinalgMatMulNode *matMul) { + IndexVar index = getUniqueIndex(); + vector indicesA; + vector indicesB; + if (matMul->a.getOrder() == 2 && matMul->b.getOrder() == 2) { + indicesA = {liveIndices[0], index}; + indicesB = {index, liveIndices[1]}; + } + else if (matMul->a.getOrder() == 1 && matMul->b.getOrder() == 2) { + indicesA = {index}; + indicesB = {index, liveIndices[0]}; + } + else if (matMul->a.getOrder() == 2 && matMul->b.getOrder() == 1) { + indicesA = {liveIndices[0], index}; + indicesB = {index}; + } + else if (matMul->a.getOrder() == 1 && matMul->a.isColVector() && matMul->b.getOrder() == 1) { + indicesA = {liveIndices[0]}; + indicesB = {liveIndices[1]}; + } else if (matMul->a.getOrder() == 0) { + indicesA = {}; + indicesB = liveIndices; + } else if (matMul->b.getOrder() == 0) { + indicesA = liveIndices; + indicesB = {}; + } else { + indicesA = {index}; + indicesB = {index}; + } + liveIndices = indicesA; + IndexExpr indexA = rewrite(matMul->a); + liveIndices = indicesB; + IndexExpr indexB = rewrite(matMul->b); + return new MulNode(indexA, indexB); +} + +IndexExpr LinalgRewriter::rewriteDiv(const LinalgDivNode *div) { + IndexExpr indexA = rewrite(div->a); + IndexExpr indexB = rewrite(div->b); + return new DivNode(indexA, indexB); +} + +IndexExpr LinalgRewriter::rewriteNeg(const LinalgNegNode *neg) { + IndexExpr index = rewrite(neg->a); + return new NegNode(index); +} + +IndexExpr LinalgRewriter::rewriteTranspose(const LinalgTransposeNode *transpose) { + if (transpose->a.getOrder() == 2) { + liveIndices = {liveIndices[1], liveIndices[0]}; + return rewrite(transpose->a); + } + else if (transpose->a.getOrder() == 1) { + liveIndices = {liveIndices[0]}; + return rewrite(transpose->a); + } + liveIndices = {}; + return rewrite(transpose->a); +} + +IndexExpr LinalgRewriter::rewriteLiteral(const LinalgLiteralNode *lit) { + LiteralNode* value; + switch (lit->getDataType().getKind()) { + case Datatype::Bool: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt8: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt16: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::UInt128: + taco_not_supported_yet; + break; + case Datatype::Int8: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int16: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Int128: + taco_not_supported_yet; + break; + case Datatype::Float32: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Float64: + value = new LiteralNode(lit->getVal()); + break; + case Datatype::Complex64: + value = new LiteralNode(lit->getVal>()); + break; + case Datatype::Complex128: + value = new LiteralNode(lit->getVal>()); + break; + case Datatype::Undefined: + taco_uerror << "unsupported Datatype"; + break; + } + return value; +} + +IndexExpr LinalgRewriter::rewriteVar(const LinalgVarNode *var) { + return new AccessNode(var->tensorVar, liveIndices); +} + +IndexExpr LinalgRewriter::rewriteTensorBase(const LinalgTensorBaseNode *node) { + return node->tensorBase->operator()(liveIndices); +} + +IndexVar LinalgRewriter::getUniqueIndex() { + int loc = idxcount % indexVarNameList.size(); + int num = idxcount / indexVarNameList.size(); + + string indexVarName; + if (num == 0) + indexVarName = indexVarNameList.at(loc); + else + indexVarName = indexVarNameList.at(loc) + to_string(num); + + idxcount += 1; + IndexVar result(indexVarName); + return result; +} + +IndexStmt LinalgRewriter::rewriteAssignment(const LinalgAssignmentNode *node) { + return IndexStmt(); +} + +//void LinalgRewriter::setLiveIndices(std::vector indices) { +// liveIndices = indices; +//} + +IndexExpr LinalgRewriter::rewrite(LinalgExpr linalgExpr) { + return visitor->rewrite(linalgExpr); +} + +IndexExpr LinalgRewriter::rewrite(LinalgBase linalgBase) { + TensorVar tensor = linalgBase.getAssignment().getLhs(); + + vector indices = {}; + if (tensor.getOrder() == 1) { + indices.push_back(getUniqueIndex()); + } else if (tensor.getOrder() == 2) { + indices.push_back(getUniqueIndex()); + indices.push_back(getUniqueIndex()); + } + + liveIndices = indices; + return rewrite(linalgBase.getAssignment().getRhs()); +} \ No newline at end of file From 40101c8fb21cd27d4cf3df3634e42aed3b21d9bf Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Wed, 16 Dec 2020 11:12:36 -0800 Subject: [PATCH 47/61] get ASSERT_TENSOR_EQ working, update tests to use new ScalarAccess methods --- include/taco/linalg.h | 11 ++-- src/linalg.cpp | 1 - test/tests-linalg.cpp | 121 +++++++++++++++++++++++++++++++----------- 3 files changed, 97 insertions(+), 36 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index cef60d57d..22ca51f8d 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -38,7 +38,6 @@ class LinalgBase : public LinalgExpr { const IndexStmt getIndexAssignment() const; - IndexStmt rewrite(); typedef LinalgVarNode Node; @@ -73,17 +72,21 @@ class Matrix : public LinalgBase { return LinalgBase::operator=(expr); } - // Support some Read methods + // Read method CType at(int coord_x, int coord_y); - // And a Write method + // Write method void insert(int coord_x, int coord_y, CType value); + // ScalarAccess supports reading/assigning to single element ScalarAccess operator()(int i, int j); - // Access methods for use in IndexExprs + // Access methods const Access operator()(const IndexVar i, const IndexVar j) const; Access operator()(const IndexVar i, const IndexVar j); + + // Allow to be cast to a TensorBase for the sake of ASSERT_TENSOR_EQ + operator TensorBase() const { return *tensorBase; } }; // ------------------------------------------------------------ diff --git a/src/linalg.cpp b/src/linalg.cpp index bb23a2311..90be4422f 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -185,7 +185,6 @@ IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { auto var = to(linalg.get()); return new AccessNode(var->tensorVar, indices); } else if (isa(linalg.get())) { - /* cout << "LinalgBase::rewrite -- got a tensorbasenode " << linalg.tensorBase->getName() << endl; */ return linalg.tensorBase->operator()(indices); } return IndexExpr(); diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index a0dc3c083..70549ac79 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -4,6 +4,36 @@ using namespace taco; +TEST(linalg, reassignment) { + Matrix A("A", {2,2}); + Matrix B1("B1", {2,2}); + Matrix B2("B2", {2,2}); + Matrix B3("B3", {2,2}); + Matrix C1("C1", {2,2}); + Matrix C2("C2", {2,2}); + Matrix C3("C3", {2,2}); + + A = B1 * C1; + + IndexVar i,j,k; + A(i,j) = B2(i,k) * C2(k,j); + + A = B3 * C3; +} + +TEST(linalg, tensor_comparison) { + Matrix A("A", {2,2}); + Tensor B("B", {2,2}); + + A(0,0) = 1; + A(1,1) = 1; + + B(0,0) = 1; + B(1,1) = 1; + + ASSERT_TENSOR_EQ(A,B); +} + TEST(linalg, matrix_constructors) { Matrix A("A"); Matrix B("B", {2, 2}); @@ -32,10 +62,10 @@ TEST(linalg, matmul_index_expr) { IndexVar i, j, k; A(i,j) = B(i,k) * C(k,j); - ASSERT_EQ(A.at(0,0), 4); - ASSERT_EQ(A.at(0,1), 4); - ASSERT_EQ(A.at(1,0), 0); - ASSERT_EQ(A.at(1,1), 2); + ASSERT_EQ((double) A(0,0), 4); + ASSERT_EQ((double) A(0,1), 4); + ASSERT_EQ((double) A(1,0), 0); + ASSERT_EQ((double) A(1,1), 2); } TEST(linalg, vecmat_mul_index_expr) { @@ -54,8 +84,8 @@ TEST(linalg, vecmat_mul_index_expr) { IndexVar i, j; x(i) = b(j) * A(j,i); - ASSERT_EQ(x.at(0), 17); - ASSERT_EQ(x.at(1), 6); + ASSERT_EQ((double) x(0), 17); + ASSERT_EQ((double) x(1), 6); cout << x << endl; @@ -82,7 +112,7 @@ TEST(linalg, inner_mul_index_expr) { cout << x.getIndexAssignment(); - ASSERT_EQ(x, 9); + ASSERT_EQ((double) x, 9); } TEST(linalg, matmul) { @@ -98,15 +128,26 @@ TEST(linalg, matmul) { A = B * C; - ASSERT_EQ(A.at(0,0), 4); - ASSERT_EQ(A.at(0,1), 4); - ASSERT_EQ(A.at(1,0), 0); - ASSERT_EQ(A.at(1,1), 2); + ASSERT_EQ((double) A(0,0), 4); + ASSERT_EQ((double) A(0,1), 4); + ASSERT_EQ((double) A(1,0), 0); + ASSERT_EQ((double) A(1,1), 2); + + // Equivalent Tensor API computation + Tensor tB("B", {2, 2}, dense); + Tensor tC("C", {2, 2}, dense); + Tensor tA("A", {2, 2}, dense); + + tB(0,0) = 2; + tB(1,1) = 1; + tB(0,1) = 2; + tC(0,0) = 2; + tC(1,1) = 2; + + IndexVar i,j,k; + tA(i,j) = tB(i,k) * tC(k,j); - //TODO: make this better - cout << "A(1,1) = " << A(1,1) << endl; - double a11 = A(1,1); - cout << "a11 = " << a11 << endl; + ASSERT_TENSOR_EQ(A,tA); } TEST(linalg, matmat_add) { @@ -122,10 +163,10 @@ TEST(linalg, matmat_add) { A = B + C; - ASSERT_EQ(A.at(0,0), 1); - ASSERT_EQ(A.at(0,1), 2); - ASSERT_EQ(A.at(1,0), 3); - ASSERT_EQ(A.at(1,1), 4); + ASSERT_EQ((double) A(0,0), 1); + ASSERT_EQ((double) A(0,1), 2); + ASSERT_EQ((double) A(1,0), 3); + ASSERT_EQ((double) A(1,1), 4); } TEST(linalg, matvec_mul) { @@ -142,8 +183,8 @@ TEST(linalg, matvec_mul) { x = A*b; - ASSERT_EQ(x.at(0), 5); - ASSERT_EQ(x.at(1), 2); + ASSERT_EQ((double) x(0), 5); + ASSERT_EQ((double) x(1), 2); // Should be [5,2] cout << x << endl; @@ -166,8 +207,8 @@ TEST(linalg, vecmat_mul) { // Should be [17, 6] x = b * A; - ASSERT_EQ(x.at(0), 17); - ASSERT_EQ(x.at(1), 6); + ASSERT_EQ((double) x(0), 17); + ASSERT_EQ((double) x(1), 6); cout << x << endl; @@ -192,7 +233,7 @@ TEST(linalg, inner_mul) { cout << x.getIndexAssignment(); - ASSERT_EQ(x, 9); + ASSERT_EQ((double) x, 9); } TEST(linalg, outer_mul) { @@ -240,19 +281,37 @@ TEST(linalg, outer_mul) { /* ASSERT_TRUE(1); */ /* } */ -TEST(linalg, complex_expr) { +TEST(linalg, compound_expr_elemmul_elemadd) { Matrix A("A", 2, 2, dense, dense); Matrix B("B", 2, 2, dense, dense); Matrix C("C", 2, 2, dense, dense); Matrix D("D", 2, 2, dense, dense); - Matrix E("D", 2, 2, dense, dense); -/* A = E*elemMul(B+C, D); */ - A = elemMul(B+C, D); + Tensor tA("A", {2,2}, dense); + Tensor tB("B", {2,2}, dense); + Tensor tC("C", {2,2}, dense); + Tensor tD("D", {2,2}, dense); - cout << A << endl; + A(0,0) = 1; + A(0,1) = 2; + A(0,2) = 3; - cout << A.getIndexAssignment(); + tA(0,0) = 1; + tA(0,1) = 2; + tA(0,2) = 3; - ASSERT_TRUE(1); + D(0,0) = 2; + D(0,1) = 3; + D(0,2) = 4; + + tD(0,0) = 2; + tD(0,1) = 3; + tD(0,2) = 4; + + A = elemMul(B+C, D); + + IndexVar i,j; + tA(i,j) = (tB(i,j) + tC(i,j)) * tD(i,j); + + ASSERT_TENSOR_EQ(A,tA); } From 02188df083270abcde887cb35025cf50a4a58235 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Wed, 16 Dec 2020 15:25:06 -0500 Subject: [PATCH 48/61] Fixes to rewrite using LinalgNotationRewriter --- .../taco/linalg_notation/linalg_rewriter.h | 6 ++-- src/linalg.cpp | 30 ++----------------- src/linalg_notation/linalg_rewriter.cpp | 25 ++++++++++++---- 3 files changed, 26 insertions(+), 35 deletions(-) diff --git a/include/taco/linalg_notation/linalg_rewriter.h b/include/taco/linalg_notation/linalg_rewriter.h index 7d5d30f57..6f8a37578 100644 --- a/include/taco/linalg_notation/linalg_rewriter.h +++ b/include/taco/linalg_notation/linalg_rewriter.h @@ -29,9 +29,9 @@ class LinalgRewriter : public util::Uncopyable { virtual ~LinalgRewriter() = default; /// Lower an index statement to an IR function. - IndexExpr rewrite(LinalgBase linalgBase); + IndexStmt rewrite(LinalgBase linalgBase); -// void setLiveIndices(std::vector indices); + void setLiveIndices(std::vector indices); protected: virtual IndexExpr rewriteSub(const LinalgSubNode* sub); @@ -61,7 +61,7 @@ class LinalgRewriter : public util::Uncopyable { private: std::vector liveIndices; - int idxcount; + int idxcount = 0; std::vector indexVarNameList = {"i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"}; IndexVar getUniqueIndex(); diff --git a/src/linalg.cpp b/src/linalg.cpp index 293f9f67e..954f5efea 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -232,35 +232,11 @@ IndexStmt rewrite(LinalgStmt linalg) { IndexStmt LinalgBase::rewrite() { if (this->assignment.defined()) { - - TensorVar tensor = this->assignment.getLhs(); - - vector indices = {}; - if (tensor.getOrder() == 1) { - indices.push_back(getUniqueIndex()); - } else if (tensor.getOrder() == 2) { - indices.push_back(getUniqueIndex()); - indices.push_back(getUniqueIndex()); - } - - - Access lhs = Access(tensor, indices); - auto linalgRewriter = new LinalgRewriter(); //linalgRewriter->setLiveIndices(indices); - IndexExpr rhs = linalgRewriter->rewrite(*this); - cout << "rhs done here" << endl; - - if(this->tensorBase != nullptr) { - cout << "--- Going to use the Tensor API to assign the RHS ---" << endl; - cout << rhs << endl; - this->tensorBase->operator()(indices) = rhs; - cout << "--- Done assigning RHS to Tensor API ---" << endl; - } - - Assignment indexAssign = Assignment(lhs, rhs); - this->indexAssignment = indexAssign; - return indexAssign; + IndexStmt stmt = linalgRewriter->rewrite(*this); + this->indexAssignment = stmt; + return stmt; } return IndexStmt(); } diff --git a/src/linalg_notation/linalg_rewriter.cpp b/src/linalg_notation/linalg_rewriter.cpp index 568ff74f3..e3bf752b3 100644 --- a/src/linalg_notation/linalg_rewriter.cpp +++ b/src/linalg_notation/linalg_rewriter.cpp @@ -201,15 +201,15 @@ IndexStmt LinalgRewriter::rewriteAssignment(const LinalgAssignmentNode *node) { return IndexStmt(); } -//void LinalgRewriter::setLiveIndices(std::vector indices) { -// liveIndices = indices; -//} +void LinalgRewriter::setLiveIndices(std::vector indices) { + liveIndices = indices; +} IndexExpr LinalgRewriter::rewrite(LinalgExpr linalgExpr) { return visitor->rewrite(linalgExpr); } -IndexExpr LinalgRewriter::rewrite(LinalgBase linalgBase) { +IndexStmt LinalgRewriter::rewrite(LinalgBase linalgBase) { TensorVar tensor = linalgBase.getAssignment().getLhs(); vector indices = {}; @@ -220,6 +220,21 @@ IndexExpr LinalgRewriter::rewrite(LinalgBase linalgBase) { indices.push_back(getUniqueIndex()); } + Access lhs = Access(tensor, indices); + liveIndices = indices; - return rewrite(linalgBase.getAssignment().getRhs()); + auto rhs = rewrite(linalgBase.getAssignment().getRhs()); + + cout << "rhs done here" << endl; + + if(linalgBase.tensorBase != nullptr) { + cout << "--- Going to use the Tensor API to assign the RHS ---" << endl; + cout << lhs << " = "; + cout << rhs << endl; + linalgBase.tensorBase->operator()(indices) = rhs; + cout << "--- Done assigning RHS to Tensor API ---" << endl; + } + + Assignment indexAssign = Assignment(lhs, rhs); + return indexAssign; } \ No newline at end of file From f311d36eeb1c182505f823a192d69b9885f38e0a Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Wed, 16 Dec 2020 16:20:54 -0800 Subject: [PATCH 49/61] more tests cleanup --- test/tests-linalg.cpp | 224 +++++++++++++++++++++++------------------- 1 file changed, 122 insertions(+), 102 deletions(-) diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 70549ac79..f58c39903 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -4,50 +4,6 @@ using namespace taco; -TEST(linalg, reassignment) { - Matrix A("A", {2,2}); - Matrix B1("B1", {2,2}); - Matrix B2("B2", {2,2}); - Matrix B3("B3", {2,2}); - Matrix C1("C1", {2,2}); - Matrix C2("C2", {2,2}); - Matrix C3("C3", {2,2}); - - A = B1 * C1; - - IndexVar i,j,k; - A(i,j) = B2(i,k) * C2(k,j); - - A = B3 * C3; -} - -TEST(linalg, tensor_comparison) { - Matrix A("A", {2,2}); - Tensor B("B", {2,2}); - - A(0,0) = 1; - A(1,1) = 1; - - B(0,0) = 1; - B(1,1) = 1; - - ASSERT_TENSOR_EQ(A,B); -} - -TEST(linalg, matrix_constructors) { - Matrix A("A"); - Matrix B("B", {2, 2}); - Matrix C("C", 2, 2, dense, dense); - Matrix D("D", 2, 2); - Matrix E("E", 2, 2, {dense, dense}); - Matrix F("F", {2, 2}, {dense, dense}); - - Vector a("a"); - Vector b("b", 2, false); - Vector c("c", 2, dense); - Vector d("d", 2, {dense}); -} - TEST(linalg, matmul_index_expr) { Tensor B("B", {2,2}); Matrix C("C", 2, 2, dense, dense); @@ -80,16 +36,11 @@ TEST(linalg, vecmat_mul_index_expr) { A(0,1) = 2; A(1,0) = -1; - // Should be [17, 6] IndexVar i, j; x(i) = b(j) * A(j,i); ASSERT_EQ((double) x(0), 17); ASSERT_EQ((double) x(1), 6); - - cout << x << endl; - - cout << x.getIndexAssignment(); } @@ -107,17 +58,12 @@ TEST(linalg, inner_mul_index_expr) { IndexVar i; x = b(i) * a(i); - // Should be 9 - cout << x << endl; - - cout << x.getIndexAssignment(); - ASSERT_EQ((double) x, 9); } TEST(linalg, matmul) { Matrix B("B", 2, 2, dense, dense); - Matrix C("C", 2, 2, dense, dense); + Matrix C("C", 2, 2, sparse, sparse); Matrix A("A", 2, 2, dense, dense); B(0,0) = 2; @@ -185,11 +131,6 @@ TEST(linalg, matvec_mul) { ASSERT_EQ((double) x(0), 5); ASSERT_EQ((double) x(1), 2); - - // Should be [5,2] - cout << x << endl; - - cout << x.getIndexAssignment(); } TEST(linalg, vecmat_mul) { @@ -204,15 +145,10 @@ TEST(linalg, vecmat_mul) { A(0,1) = 2; A(1,0) = -1; - // Should be [17, 6] x = b * A; ASSERT_EQ((double) x(0), 17); ASSERT_EQ((double) x(1), 6); - - cout << x << endl; - - cout << x.getIndexAssignment(); } TEST(linalg, inner_mul) { @@ -228,11 +164,6 @@ TEST(linalg, inner_mul) { x = b * a; - // Should be 9 - cout << x << endl; - - cout << x.getIndexAssignment(); - ASSERT_EQ((double) x, 9); } @@ -249,37 +180,39 @@ TEST(linalg, outer_mul) { X = a * b; - // Should be [-6,-9,10,15] - cout << X << endl; + // Tensor API equivalent + Tensor tX("X", {2, 2}, dense); + Tensor tb("b", {2}, dense); + Tensor ta("a", {2}, dense); - cout << X.getIndexAssignment(); + tb(0) = 2; + tb(1) = 3; - cout << X; + ta(0) = -3; + ta(1) = 5; - ASSERT_TRUE(1); -} - -/* TEST(linalg, rowvec_transpose) { */ -/* Vector b("b", 2, dense, false); */ -/* Matrix A("A", 2, 2, dense, dense); */ -/* Scalar a("a", true); */ + IndexVar i,j; + tX(i,j) = a(i) * b(j); -/* b(0) = 2; */ -/* b(1) = 5; */ + ASSERT_TENSOR_EQ(X,tX); +} -/* A(0,0) = 1; */ -/* A(0,1) = 2; */ -/* A(1,1) = 4; */ +TEST(linalg, rowvec_transpose) { + Vector b("b", 2, dense, true); + Matrix A("A", 2, 2, dense, dense); + Scalar a("a", true); -/* a = transpose(transpose(b) * A * b); */ + b(0) = 2; + b(1) = 5; -/* // Should be 124 */ -/* cout << a << endl; */ + A(0,0) = 1; + A(0,1) = 2; + A(1,1) = 4; -/* cout << a.getIndexAssignment(); */ + a = transpose(transpose(b) * A * b); -/* ASSERT_TRUE(1); */ -/* } */ + ASSERT_EQ((double) a, 124); +} TEST(linalg, compound_expr_elemmul_elemadd) { Matrix A("A", 2, 2, dense, dense); @@ -287,31 +220,118 @@ TEST(linalg, compound_expr_elemmul_elemadd) { Matrix C("C", 2, 2, dense, dense); Matrix D("D", 2, 2, dense, dense); + A(0,0) = 1; + A(0,1) = 2; + A(0,2) = 3; + + D(0,0) = 2; + D(0,1) = 3; + D(0,2) = 4; + + A = elemMul(B+C, D); + + // Tensor API equivalent Tensor tA("A", {2,2}, dense); Tensor tB("B", {2,2}, dense); Tensor tC("C", {2,2}, dense); Tensor tD("D", {2,2}, dense); - A(0,0) = 1; - A(0,1) = 2; - A(0,2) = 3; - tA(0,0) = 1; tA(0,1) = 2; tA(0,2) = 3; - D(0,0) = 2; - D(0,1) = 3; - D(0,2) = 4; - tD(0,0) = 2; tD(0,1) = 3; tD(0,2) = 4; - A = elemMul(B+C, D); - IndexVar i,j; tA(i,j) = (tB(i,j) + tC(i,j)) * tD(i,j); ASSERT_TENSOR_EQ(A,tA); } + +TEST(linalg, matrix_constructors) { + Matrix A("A"); + Matrix B("B", {2, 2}); + Matrix C("C", 2, 2, dense, dense); + Matrix D("D", 2, 2); + Matrix E("E", 2, 2, {dense, dense}); + Matrix F("F", {2, 2}, {dense, dense}); + + Vector a("a"); + Vector b("b", 2, false); + Vector c("c", 2, dense); + Vector d("d", 2, {dense}); +} + +TEST(linalg, reassignment) { + Matrix A("A", {2,2}); + Matrix B1("B1", {2,2}); + Matrix B2("B2", {2,2}); + Matrix B3("B3", {2,2}); + Matrix C1("C1", {2,2}); + Matrix C2("C2", {2,2}); + Matrix C3("C3", {2,2}); + + B1(0,0) = 1; + B1(0,1) = 2; + B1(1,0) = 3; + B1(1,1) = 4; + C1(0,0) = 1; + C1(0,1) = 2; + C1(1,0) = 3; + C1(1,1) = 4; + + A = B1 * C1; + + ASSERT_EQ((double) A(0,0), 7); + ASSERT_EQ((double) A(0,1), 10); + ASSERT_EQ((double) A(1,0), 15); + ASSERT_EQ((double) A(1,1), 22); + + B2(0,0) = 2; + B2(0,1) = 1; + B2(1,0) = 4; + B2(1,1) = 3; + C2(0,0) = 2; + C2(0,1) = 1; + C2(1,0) = 4; + C2(1,1) = 3; + + IndexVar i,j,k; + A(i,j) = B2(i,k) * C2(k,j); + + ASSERT_EQ((double) A(0,0), 8); + ASSERT_EQ((double) A(0,1), 5); + ASSERT_EQ((double) A(1,0), 20); + ASSERT_EQ((double) A(1,1), 13); + + B3(0,0) = 2; + B3(0,1) = 1; + B3(1,0) = 5; + B3(1,1) = 3; + C3(0,0) = 2; + C3(0,1) = 1; + C3(1,0) = 5; + C3(1,1) = 3; + + A = B3 * C3; + + ASSERT_EQ((double) A(0,0), 9); + ASSERT_EQ((double) A(0,1), 5); + ASSERT_EQ((double) A(1,0), 25); + ASSERT_EQ((double) A(1,1), 14); +} + +TEST(linalg, tensor_comparison) { + Matrix A("A", {2,2}); + Tensor B("B", {2,2}); + + A(0,0) = 1; + A(1,1) = 1; + + B(0,0) = 1; + B(1,1) = 1; + + ASSERT_TENSOR_EQ(A,B); +} From 9efe965c46f60330b818456e351698043d0abd32 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Wed, 16 Dec 2020 17:08:45 -0800 Subject: [PATCH 50/61] remove unnecessary couts and large commented-out section for rewrite --- src/linalg.cpp | 132 ------------------------ src/linalg_notation/linalg_notation.cpp | 1 - src/linalg_notation/linalg_rewriter.cpp | 8 +- src/lower/lowerer_impl.cpp | 1 - src/parser/linalg_parser.cpp | 16 --- src/parser/parser.cpp | 8 -- src/tensor.cpp | 6 -- 7 files changed, 1 insertion(+), 171 deletions(-) diff --git a/src/linalg.cpp b/src/linalg.cpp index 433ab1882..00ed6c263 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -18,8 +18,6 @@ LinalgAssignment LinalgBase::operator=(const LinalgExpr& expr) { taco_iassert(isa(this->ptr)); TensorVar var = to(this->get())->tensorVar; - cout << var.getOrder() << endl; - cout << expr.getOrder() << endl; taco_uassert(var.getOrder() == expr.getOrder()) << "LHS (" << var.getOrder() << ") and RHS (" << expr.getOrder() << ") of linalg assignment must match order"; if (var.getOrder() == 1) @@ -44,7 +42,6 @@ const IndexStmt LinalgBase::getIndexAssignment() const { vector LinalgBase::getUniqueIndices(size_t order) { vector result; for (int i = idxcount; i < (idxcount + (int)order); i++) { - cout << i << ": "; string name = "i" + to_string(i); IndexVar indexVar(name); result.push_back(indexVar); @@ -55,7 +52,6 @@ vector LinalgBase::getUniqueIndices(size_t order) { IndexVar LinalgBase::getUniqueIndex() { int loc = idxcount % indexVarNameList.size(); - cout << "Locatopm" << loc << endl; int num = idxcount / indexVarNameList.size(); string indexVarName; @@ -70,134 +66,6 @@ IndexVar LinalgBase::getUniqueIndex() { } IndexExpr LinalgBase::rewrite(LinalgExpr linalg, vector indices) { -// if (isa(linalg.get())) { -// auto sub = to(linalg.get()); -// IndexExpr indexA = rewrite(sub->a, indices); -// IndexExpr indexB = rewrite(sub->b, indices); -// return new SubNode(indexA, indexB); -// } else if (isa(linalg.get())) { -// auto add = to(linalg.get()); -// IndexExpr indexA = rewrite(add->a, indices); -// IndexExpr indexB = rewrite(add->b, indices); -// return new AddNode(indexA, indexB); -// } else if (isa(linalg.get())) { -// auto mul = to(linalg.get()); -// IndexExpr indexA = rewrite(mul->a, indices); -// IndexExpr indexB = rewrite(mul->b, indices); -// return new MulNode(indexA, indexB); -// } else if (isa(linalg.get())) { -// auto mul = to(linalg.get()); -// IndexVar index = getUniqueIndex(); -// vector indicesA; -// vector indicesB; -// if (mul->a.getOrder() == 2 && mul->b.getOrder() == 2) { -// indicesA = {indices[0], index}; -// indicesB = {index, indices[1]}; -// } -// else if (mul->a.getOrder() == 1 && mul->b.getOrder() == 2) { -// indicesA = {index}; -// indicesB = {index, indices[0]}; -// } -// else if (mul->a.getOrder() == 2 && mul->b.getOrder() == 1) { -// indicesA = {indices[0], index}; -// indicesB = {index}; -// } -// else if (mul->a.getOrder() == 1 && mul->a.isColVector() && mul->b.getOrder() == 1) { -// indicesA = {indices[0]}; -// indicesB = {indices[1]}; -// } else if (mul->a.getOrder() == 0) { -// indicesA = {}; -// indicesB = indices; -// } else if (mul->b.getOrder() == 0) { -// indicesA = indices; -// indicesB = {}; -// } else { -// indicesA = {index}; -// indicesB = {index}; -// } -// IndexExpr indexA = rewrite(mul->a, indicesA); -// IndexExpr indexB = rewrite(mul->b, indicesB); -// return new MulNode(indexA, indexB); -// } else if (isa(linalg.get())) { -// auto div = to(linalg.get()); -// IndexExpr indexA = rewrite(div->a, indices); -// IndexExpr indexB = rewrite(div->b, indices); -// return new DivNode(indexA, indexB); -// } else if (isa(linalg.get())) { -// auto neg = to(linalg.get()); -// IndexExpr index = rewrite(neg->a, indices); -// return new NegNode(index); -// } else if (isa(linalg.get())) { -// auto transpose = to(linalg.get()); -// if (transpose->a.getOrder() == 2) { -// return rewrite(transpose->a, {indices[1], indices[0]}); -// } -// else if (transpose->a.getOrder() == 1) { -// return rewrite(transpose->a, {indices[0]}); -// } -// return rewrite(transpose->a, {}); -// } else if (isa(linalg.get())) { -// auto lit = to(linalg.get()); -// -// LiteralNode* value; -// switch (lit->getDataType().getKind()) { -// case Datatype::Bool: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::UInt8: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::UInt16: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::UInt32: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::UInt64: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::UInt128: -// taco_not_supported_yet; -// break; -// case Datatype::Int8: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::Int16: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::Int32: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::Int64: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::Int128: -// taco_not_supported_yet; -// break; -// case Datatype::Float32: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::Float64: -// value = new LiteralNode(lit->getVal()); -// break; -// case Datatype::Complex64: -// value = new LiteralNode(lit->getVal>()); -// break; -// case Datatype::Complex128: -// value = new LiteralNode(lit->getVal>()); -// break; -// case Datatype::Undefined: -// taco_uerror << "unsupported Datatype"; -// break; -// } -// return value; -// } else if (isa(linalg.get())) { -// auto var = to(linalg.get()); -// return new AccessNode(var->tensorVar, indices); -// } else if (isa(linalg.get())) { -// /* cout << "LinalgBase::rewrite -- got a tensorbasenode " << linalg.tensorBase->getName() << endl; */ -// return linalg.tensorBase->operator()(indices); -// } return IndexExpr(); } diff --git a/src/linalg_notation/linalg_notation.cpp b/src/linalg_notation/linalg_notation.cpp index c76c338b3..a90e52dad 100644 --- a/src/linalg_notation/linalg_notation.cpp +++ b/src/linalg_notation/linalg_notation.cpp @@ -192,7 +192,6 @@ LinalgExpr elemMul(const LinalgExpr &lhs, const LinalgExpr &rhs) { } LinalgExpr transpose(const LinalgExpr &lhs) { - cout << "transpose here" << endl; return new LinalgTransposeNode(lhs, !lhs.isColVector()); } diff --git a/src/linalg_notation/linalg_rewriter.cpp b/src/linalg_notation/linalg_rewriter.cpp index e3bf752b3..f8a2aae6f 100644 --- a/src/linalg_notation/linalg_rewriter.cpp +++ b/src/linalg_notation/linalg_rewriter.cpp @@ -225,16 +225,10 @@ IndexStmt LinalgRewriter::rewrite(LinalgBase linalgBase) { liveIndices = indices; auto rhs = rewrite(linalgBase.getAssignment().getRhs()); - cout << "rhs done here" << endl; - if(linalgBase.tensorBase != nullptr) { - cout << "--- Going to use the Tensor API to assign the RHS ---" << endl; - cout << lhs << " = "; - cout << rhs << endl; linalgBase.tensorBase->operator()(indices) = rhs; - cout << "--- Done assigning RHS to Tensor API ---" << endl; } Assignment indexAssign = Assignment(lhs, rhs); return indexAssign; -} \ No newline at end of file +} diff --git a/src/lower/lowerer_impl.cpp b/src/lower/lowerer_impl.cpp index 79d8a3362..e71ac8772 100644 --- a/src/lower/lowerer_impl.cpp +++ b/src/lower/lowerer_impl.cpp @@ -1380,7 +1380,6 @@ Stmt LowererImpl::lowerSuchThat(SuchThat suchThat) { Expr LowererImpl::lowerAccess(Access access) { TensorVar var = access.getTensorVar(); - cout << "Lower access " << var.getName() << endl; if (isScalar(var.getType())) { return getTensorVar(var); diff --git a/src/parser/linalg_parser.cpp b/src/parser/linalg_parser.cpp index 6430edbce..42b4a20fe 100644 --- a/src/parser/linalg_parser.cpp +++ b/src/parser/linalg_parser.cpp @@ -78,19 +78,12 @@ const TensorBase& LinalgParser::getResultTensor() const { LinalgBase LinalgParser::parseAssign() { content->parsingLhs = true; - cout << "parsing lhs" << endl; LinalgBase lhs = parseVar(); - cout << "end parsing lhs" << endl; const TensorVar var = lhs.tensorBase->getTensorVar(); - cout << "Result of parsing LHS" << endl; - cout << var.getName() << endl; content->parsingLhs = false; - cout << "parsing rhs" << endl; consume(Token::eq); LinalgExpr rhs = parseExpr(); - cout << "Result of parsing RHS" << endl; - cout << rhs << endl; lhs = rhs; return lhs; @@ -242,7 +235,6 @@ LinalgBase LinalgParser::parseVar() { throw ParseError("Expected linalg name"); } string tensorName = content->lexer.getIdentifier(); - cout << tensorName << endl; consume(Token::identifier); names.push_back(tensorName); @@ -281,8 +273,6 @@ LinalgBase LinalgParser::parseVar() { order = content->tensorDimensions.at(tensorName).size(); } - cout << order << endl; - Format format; if (util::contains(content->formats, tensorName)) { format = content->formats.at(tensorName); @@ -290,7 +280,6 @@ LinalgBase LinalgParser::parseVar() { else { format = Format(std::vector(order, Dense)); } - cout << format << endl; TensorBase tensor; if (util::contains(content->tensors, tensorName)) { @@ -300,12 +289,10 @@ LinalgBase LinalgParser::parseVar() { vector tensorDimensions(order); vector modesWithDefaults(order, false); for (size_t i = 0; i < tensorDimensions.size(); i++) { - cout << i << endl; if (util::contains(content->tensorDimensions, tensorName)) { tensorDimensions[i] = content->tensorDimensions.at(tensorName)[i]; } else { - cout << "default" << endl; tensorDimensions[i] = content->defaultDimension; modesWithDefaults[i] = true; } @@ -332,11 +319,8 @@ LinalgBase LinalgParser::parseVar() { vector LinalgParser::getUniqueIndices(size_t order) { vector result; for (int i = idxcount; i < (idxcount + (int)order); i++) { - cout << i << ": "; string name = "i" + to_string(i); - cout << name << " "; IndexVar indexVar = getIndexVar(name); - cout << indexVar << endl; result.push_back(indexVar); } idxcount += order; diff --git a/src/parser/parser.cpp b/src/parser/parser.cpp index ea7ed6f89..53df97415 100644 --- a/src/parser/parser.cpp +++ b/src/parser/parser.cpp @@ -55,14 +55,6 @@ Parser::Parser(string expression, const map& formats, content->tensors = tensors; content->dataTypes = dataTypes; - cout << "Parser Constructor" << endl; - for (auto& d: tensorDimensions) { - cout << d.first << ": "; - for (auto i: d.second) { - cout << i << ", "<< endl; - } - } - nextToken(); } diff --git a/src/tensor.cpp b/src/tensor.cpp index b61098854..32edd1c17 100644 --- a/src/tensor.cpp +++ b/src/tensor.cpp @@ -104,7 +104,6 @@ static Format initFormat(Format format) { TensorBase::TensorBase(string name, Datatype ctype, vector dimensions, Format format) : content(new Content(name, ctype, dimensions, initFormat(format))) { - cout << name << endl; taco_uassert((size_t)format.getOrder() == dimensions.size()) << "The number of format mode types (" << format.getOrder() << ") " << "must match the tensor order (" << dimensions.size() << ")."; @@ -488,7 +487,6 @@ struct AccessTensorNode : public AccessNode { }; const Access TensorBase::operator()(const std::vector& indices) const { - cout << "Const op() call" << endl; taco_uassert(indices.size() == (size_t)getOrder()) << "A tensor of order " << getOrder() << " must be indexed with " << getOrder() << " variables, but is indexed with: " @@ -497,14 +495,10 @@ const Access TensorBase::operator()(const std::vector& indices) const } Access TensorBase::operator()(const std::vector& indices) { - cout << "Non-Const op() call" << endl; - cout << to_string(getOrder()) << endl; - cout << " after getOrder" << endl; taco_uassert(indices.size() == (size_t)getOrder()) << "A tensor of order " << getOrder() << " must be indexed with " << getOrder() << " variables, but is indexed with: " << util::join(indices); - cout << " after uassert" << endl; return Access(new AccessTensorNode(*this, indices)); } From db3b5c033a810bd0722c48e25832f27fb43ed2dc Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Thu, 17 Dec 2020 18:54:17 -0500 Subject: [PATCH 51/61] Fix some things with rewriter and Scalar LinalgBase constructor and add new compound test --- include/taco/linalg.h | 4 +- src/linalg_notation/linalg_rewriter.cpp | 8 ++++ test/tests-linalg.cpp | 49 +++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 2 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 61cfd5d38..f32c977a9 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -254,8 +254,8 @@ class Scalar : public LinalgBase { operator CType() const { return tensorBase->at({}); } }; -template -Scalar::Scalar(std::string name) : LinalgBase(name, Type(type(), {})) {} +//template +//Scalar::Scalar(std::string name) : LinalgBase(name, Type(type(), {})) {} template Scalar::Scalar(std::string name, bool useTensorBase) : LinalgBase(name, Type(type(), {}) , type(), {}, Format(), false) {} diff --git a/src/linalg_notation/linalg_rewriter.cpp b/src/linalg_notation/linalg_rewriter.cpp index f8a2aae6f..d6f6c6a82 100644 --- a/src/linalg_notation/linalg_rewriter.cpp +++ b/src/linalg_notation/linalg_rewriter.cpp @@ -42,19 +42,25 @@ LinalgRewriter::LinalgRewriter() : visitor(new Visitor(this)) { } IndexExpr LinalgRewriter::rewriteSub(const LinalgSubNode* sub) { + auto originalIndices = liveIndices; IndexExpr indexA = rewrite(sub->a); + liveIndices = originalIndices; IndexExpr indexB = rewrite(sub->b); return new SubNode(indexA, indexB); } IndexExpr LinalgRewriter::rewriteAdd(const LinalgAddNode* add) { + auto originalIndices = liveIndices; IndexExpr indexA = rewrite(add->a); + liveIndices = originalIndices; IndexExpr indexB = rewrite(add->b); return new AddNode(indexA, indexB); } IndexExpr LinalgRewriter::rewriteElemMul(const LinalgElemMulNode* elemMul) { + auto originalIndices = liveIndices; IndexExpr indexA = rewrite(elemMul->a); + liveIndices = originalIndices; IndexExpr indexB = rewrite(elemMul->b); return new MulNode(indexA, indexB); } @@ -96,7 +102,9 @@ IndexExpr LinalgRewriter::rewriteMatMul(const LinalgMatMulNode *matMul) { } IndexExpr LinalgRewriter::rewriteDiv(const LinalgDivNode *div) { + auto originalIndices = liveIndices; IndexExpr indexA = rewrite(div->a); + liveIndices = originalIndices; IndexExpr indexB = rewrite(div->b); return new DivNode(indexA, indexB); } diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index f58c39903..ea1c4f7d4 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -250,6 +250,55 @@ TEST(linalg, compound_expr_elemmul_elemadd) { ASSERT_TENSOR_EQ(A,tA); } +TEST(linalg, compound_sparse_matmul_transpose_outer) { + Matrix A("A", 16, 16, dense, sparse); + Matrix B("B", 16, 16, dense, sparse); + Matrix C("C", 16, 16, dense, sparse); + Matrix D("D", 16, 16, dense, dense); + Vector e("e", 16, sparse); + Vector f("f", 16, sparse); + + A(0,0) = 1; + A(0,1) = 2; + A(0,2) = 3; + B(0,0) = 1; + B(1,1) = 2; + B(2,2) = 3; + C(0, 0) = 8; + D(0,0) = 2; + D(0,1) = 3; + D(0,2) = 4; + + e(0) = 43; + f(1) = 2; + A = ((B*C)*D) + transpose(e*transpose(f)); + + // Tensor API equivalent + Tensor tA("tA", {16,16}, {dense, sparse}); + Tensor tB("tB", {16,16}, {dense, sparse}); + Tensor tC("tC", {16,16}, {dense, sparse}); + Tensor tD("tD", {16,16}, dense); + Tensor te("te", {16}, {sparse}); + Tensor tf("tf", {16}, {sparse}); + tA(0,0) = 1; + tA(0,1) = 2; + tA(0,2) = 3; + tB(0,0) = 1; + tB(1,1) = 2; + tB(2,2) = 3; + tC(0, 0) = 8; + tD(0,0) = 2; + tD(0,1) = 3; + tD(0,2) = 4; + + te(0) = 43; + tf(1) = 2; + IndexVar i,j, k, l, m, n; + tA(i,j) = ((tB(i,k) * tC(k,l)) * tD(l,j)) + (te(j)*tf(i)); + + ASSERT_TENSOR_EQ(tA, A); +} + TEST(linalg, matrix_constructors) { Matrix A("A"); Matrix B("B", {2, 2}); From 470a1c5787614fdc5547356f220776648a72dc3c Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Thu, 17 Dec 2020 19:05:00 -0500 Subject: [PATCH 52/61] Remove bool useTensorBase from Scalar constructor --- include/taco/linalg.h | 2 +- test/tests-linalg.cpp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index f32c977a9..baad9ac14 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -257,7 +257,7 @@ class Scalar : public LinalgBase { //template //Scalar::Scalar(std::string name) : LinalgBase(name, Type(type(), {})) {} template -Scalar::Scalar(std::string name, bool useTensorBase) : +Scalar::Scalar(std::string name) : LinalgBase(name, Type(type(), {}) , type(), {}, Format(), false) {} } // namespace taco diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index ea1c4f7d4..d86654be3 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -45,7 +45,7 @@ TEST(linalg, vecmat_mul_index_expr) { TEST(linalg, inner_mul_index_expr) { - Scalar x("x", true); + Scalar x("x"); Vector b("b", 2, dense, false); Vector a("a", 2, dense, true); @@ -152,7 +152,7 @@ TEST(linalg, vecmat_mul) { } TEST(linalg, inner_mul) { - Scalar x("x", true); + Scalar x("x"); Vector b("b", 2, dense, false); Vector a("a", 2, dense, true); @@ -200,7 +200,7 @@ TEST(linalg, outer_mul) { TEST(linalg, rowvec_transpose) { Vector b("b", 2, dense, true); Matrix A("A", 2, 2, dense, dense); - Scalar a("a", true); + Scalar a("a"); b(0) = 2; b(1) = 5; From 2ebb31f3138b25cfd424c6e8f3cb8474f927ab89 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Thu, 17 Dec 2020 21:35:51 -0800 Subject: [PATCH 53/61] allow for vector ASSERT_TENSOR_EQ too --- include/taco/linalg.h | 3 +++ test/tests-linalg.cpp | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 61cfd5d38..da9a2d123 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -179,6 +179,9 @@ class Vector : public LinalgBase { // Access methods for use in IndexExprs const Access operator()(const IndexVar i) const; Access operator()(const IndexVar i); + + // Allow to be cast to a TensorBase for the sake of ASSERT_TENSOR_EQ + operator TensorBase() const { return *tensorBase; } }; // ------------------------------------------------------------ diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index f58c39903..0277afcf0 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -334,4 +334,12 @@ TEST(linalg, tensor_comparison) { B(1,1) = 1; ASSERT_TENSOR_EQ(A,B); + + Vector a("a", 2); + Tensor ta("ta", {2}); + + a(0) = 1; + ta(0) = 1; + + ASSERT_TENSOR_EQ(a,ta); } From e8399dd65c4742e2224587eeca0bbb4acb6b8597 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Thu, 17 Dec 2020 23:13:47 -0800 Subject: [PATCH 54/61] add some Scalar tests (and allow for direct assignment of value to Scalar) --- include/taco/linalg.h | 7 +++ test/tests-linalg.cpp | 100 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 3e356c890..6ee9a9f1b 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -254,7 +254,14 @@ class Scalar : public LinalgBase { (*tensorBase) = expr; } + CType operator=(CType x) { + tensorBase->insert({}, x); + return x; + } + operator CType() const { return tensorBase->at({}); } + + operator TensorBase() const { return *tensorBase; } }; //template diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 6d73582d2..90b5dabc6 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -392,3 +392,103 @@ TEST(linalg, tensor_comparison) { ASSERT_TENSOR_EQ(a,ta); } + +TEST(linalg, scalar_assignment) { + Scalar x("x"); + Scalar y("y"); + Scalar z("z"); + x = 1; + y = x; + z = 1; + ASSERT_TENSOR_EQ(x,y); + ASSERT_TENSOR_EQ(x,z); +} + +TEST(linalg, scalar_coeff_vector) { + Scalar x("x"); + x = 2; + Vector y("y", 5); + for(int i=0;i<5;i++) { + y(i) = i; + } + + Vector z("z", 5); + for(int i=0;i<5;i++) { + z(i) = 2*i; + } + + Vector xy("xy", 5); + xy = x * y; + + ASSERT_TENSOR_EQ(xy,z); +} + +TEST(linalg, scalar_coeff_matrix) { + Scalar x("x"); + x = 2; + Matrix A("A", 5,5); + Matrix xA("xA", 5,5); + + A(0,0) = 1; + A(2,3) = 2; + A(4,0) = 3; + + xA = x * A; + + Matrix B("B", 5,5); + + B(0,0) = 2; + B(2,3) = 4; + B(4,0) = 6; + + ASSERT_TENSOR_EQ(xA,B); +} + +TEST(linalg, compound_scalar_expr) { + Matrix A("A", {3,3}); + Matrix B("B", {3,3}); + Vector x("x", 3); + Vector y("y", 3, false); + Scalar a("a"); + Scalar b("b"); + Scalar c("c"); + + a = 2; + b = 3; + c = 4; + B(0,0) = 2; + B(0,1) = 3; + B(1,2) = 4; + x(0) = 2; + x(1) = 5; + x(2) = 1; + y(0) = 4; + y(1) = 5; + y(2) = 1; + + A = a*B + b*x*y*c; + + // Tensor API equivalent + Tensor tA("tA", {3,3}); + Tensor tB("tB", {3,3}); + Tensor tx("tx", {3}); + Tensor ty("ty", {3}); + Tensor ta(2); + Tensor tb(3); + Tensor tc(4); + + tB(0,0) = 2; + tB(0,1) = 3; + tB(1,2) = 4; + tx(0) = 2; + tx(1) = 5; + tx(2) = 1; + ty(0) = 4; + ty(1) = 5; + ty(2) = 1; + + IndexVar i,j; + tA(i,j) = ta() * tB(i,j) + tb() * tx(i) * ty(j) * tc(); + + ASSERT_TENSOR_EQ(A,tA); +} From 542566ad8168fcef78fb7676c5ca357d93f20cb2 Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Fri, 18 Dec 2020 10:30:56 -0800 Subject: [PATCH 55/61] minor cleanup --- include/taco/linalg.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index 6ee9a9f1b..f7c2222ad 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -264,8 +264,6 @@ class Scalar : public LinalgBase { operator TensorBase() const { return *tensorBase; } }; -//template -//Scalar::Scalar(std::string name) : LinalgBase(name, Type(type(), {})) {} template Scalar::Scalar(std::string name) : LinalgBase(name, Type(type(), {}) , type(), {}, Format(), false) {} From ee18452f14ffc0c62870784062893049dfd6dcc4 Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Mon, 21 Dec 2020 20:09:25 -0500 Subject: [PATCH 56/61] Add in test that sometimes fails --- include/taco/linalg.h | 1 - test/tests-linalg.cpp | 49 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/include/taco/linalg.h b/include/taco/linalg.h index f7c2222ad..3664547b0 100644 --- a/include/taco/linalg.h +++ b/include/taco/linalg.h @@ -244,7 +244,6 @@ class Scalar : public LinalgBase { Datatype ctype; public: explicit Scalar(std::string name); - Scalar(std::string name, bool useTensorBase); LinalgAssignment operator=(const LinalgExpr &expr) { return LinalgBase::operator=(expr); diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 90b5dabc6..785fa555d 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -293,12 +293,59 @@ TEST(linalg, compound_sparse_matmul_transpose_outer) { te(0) = 43; tf(1) = 2; - IndexVar i,j, k, l, m, n; + IndexVar i,j, k, l; tA(i,j) = ((tB(i,k) * tC(k,l)) * tD(l,j)) + (te(j)*tf(i)); ASSERT_TENSOR_EQ(tA, A); } +TEST(linalg, compound_ATCA) { + // WORKS + Matrix A("A", 16, 16, sparse, sparse); // Works: {dense, sparse} and {dense, dense} + Matrix B("B", 16, 16, dense, dense); + Matrix C("C", 16, 16, dense, sparse); + // Tensor API equivalent + Tensor tA("tA", {16,16}, {sparse, sparse}); + Tensor tB("tB", {16,16}, {dense, dense}); + Tensor tC("tC", {16,16}, {dense, sparse}); + + for (int i = 0; i < 16; i++) { + for (int j = 0; j < 16; j++) { + C(i, j) = i*j; + } + } + + for (int i = 0; i < 16; i++) { + A(i, i) = i; + } + + + B = (transpose(A) * C) * A; + cout << B.getIndexAssignment() << endl; + for (int i = 1; i < 16; i++) { + for (int j = 1; j < 16; j++) { + cout << i << ", " << j << ": "; + cout << B(i,j) << endl; + } + } + + for (int i = 0; i < 16; i++) { + for (int j = 0; j < 16; j++) { + tC(i, j) = i*j; + } + } + + for (int i = 0; i < 16; i++) { + tA(i, i) = i; + } + IndexVar i, j, k, l; + tB(i, j) = (tA(k, i) * tC(k, l)) * tA(l, j); + + cout << tB << endl; + + ASSERT_TENSOR_EQ(tB, B); +} + TEST(linalg, matrix_constructors) { Matrix A("A"); Matrix B("B", {2, 2}); From 0e39c8a3220d8067a3cb236591b65736d1d1400e Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Tue, 5 Jan 2021 16:21:53 -0500 Subject: [PATCH 57/61] WIP debugging linalg and tensor api --- src/index_notation/transformations.cpp | 8 +++++++- test/tests-linalg.cpp | 21 +++++++++++---------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/index_notation/transformations.cpp b/src/index_notation/transformations.cpp index 955687f46..5eb2467c9 100644 --- a/src/index_notation/transformations.cpp +++ b/src/index_notation/transformations.cpp @@ -844,7 +844,13 @@ IndexStmt reorderLoopsTopologically(IndexStmt stmt) { varOrderFromTensorLevels(tensorLevelVar.second); } const auto hardDeps = depsFromVarOrders(tensorVarOrders); - + cout << "Debug ---" << endl; + for (auto it = hardDeps.begin(); it != hardDeps.end(); it++) { + cout << it->first << ", "; + for (auto its = it->second.begin(); its != it->second.end(); its++) { + cout << *its << endl; + } + } struct CollectSoftDependencies : public IndexNotationVisitor { using IndexNotationVisitor::visit; diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 785fa555d..215a08fd2 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -302,11 +302,11 @@ TEST(linalg, compound_sparse_matmul_transpose_outer) { TEST(linalg, compound_ATCA) { // WORKS Matrix A("A", 16, 16, sparse, sparse); // Works: {dense, sparse} and {dense, dense} - Matrix B("B", 16, 16, dense, dense); + Matrix B("B", 16, 16, sparse, sparse); Matrix C("C", 16, 16, dense, sparse); // Tensor API equivalent Tensor tA("tA", {16,16}, {sparse, sparse}); - Tensor tB("tB", {16,16}, {dense, dense}); + Tensor tB("tB", {16,16}, {sparse, sparse}); Tensor tC("tC", {16,16}, {dense, sparse}); for (int i = 0; i < 16; i++) { @@ -322,12 +322,13 @@ TEST(linalg, compound_ATCA) { B = (transpose(A) * C) * A; cout << B.getIndexAssignment() << endl; - for (int i = 1; i < 16; i++) { - for (int j = 1; j < 16; j++) { - cout << i << ", " << j << ": "; - cout << B(i,j) << endl; - } - } + cout << B << endl; +// for (int i = 1; i < 16; i++) { +// for (int j = 1; j < 16; j++) { +// cout << i << ", " << j << ": "; +// cout << B(i,j) << endl; +// } +// } for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { @@ -342,8 +343,8 @@ TEST(linalg, compound_ATCA) { tB(i, j) = (tA(k, i) * tC(k, l)) * tA(l, j); cout << tB << endl; - - ASSERT_TENSOR_EQ(tB, B); + //cout << B << endl; + //ASSERT_TENSOR_EQ(tB, B); } TEST(linalg, matrix_constructors) { From da4c9ef0ed9032a7b6df501a08681b1c82d9ca0d Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Thu, 14 Jan 2021 10:05:23 -0800 Subject: [PATCH 58/61] Make LinalgTensorBaseNode subclass of LinalgVarNode Fixes assertion failure in LinalgBase::operator= (compiled in debug mode) --- include/taco/linalg_notation/linalg_notation_nodes.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/include/taco/linalg_notation/linalg_notation_nodes.h b/include/taco/linalg_notation/linalg_notation_nodes.h index 5c47de49e..44e3342ea 100644 --- a/include/taco/linalg_notation/linalg_notation_nodes.h +++ b/include/taco/linalg_notation/linalg_notation_nodes.h @@ -35,18 +35,17 @@ namespace taco { TensorVar tensorVar; }; - struct LinalgTensorBaseNode : public LinalgExprNode { + struct LinalgTensorBaseNode : public LinalgVarNode { LinalgTensorBaseNode(TensorVar tensorVar, TensorBase *tensorBase) - : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder()), tensorVar(tensorVar), tensorBase(tensorBase) {} + : LinalgVarNode(tensorVar), tensorBase(tensorBase) {} LinalgTensorBaseNode(TensorVar tensorVar, TensorBase *tensorBase, bool isColVec) - : LinalgExprNode(tensorVar.getType().getDataType(), tensorVar.getOrder(), isColVec), tensorVar(tensorVar), tensorBase(tensorBase) {} + : LinalgVarNode(tensorVar, isColVec), tensorBase(tensorBase) {} void accept(LinalgExprVisitorStrict* v) const override { v->visit(this); } virtual void setAssignment(const LinalgAssignment& assignment) {} - TensorVar tensorVar; TensorBase* tensorBase; }; From 172624646c1e8fbce1028f59c38bb8c1fcca5ffd Mon Sep 17 00:00:00 2001 From: Matthew Lee Date: Thu, 14 Jan 2021 11:02:30 -0800 Subject: [PATCH 59/61] test printing explicitly, fix extra newline when printing Linalg --- src/index_notation/transformations.cpp | 7 --- src/linalg.cpp | 2 +- test/tests-linalg.cpp | 84 ++++++++++++++++++++------ 3 files changed, 66 insertions(+), 27 deletions(-) diff --git a/src/index_notation/transformations.cpp b/src/index_notation/transformations.cpp index 5eb2467c9..a769879b9 100644 --- a/src/index_notation/transformations.cpp +++ b/src/index_notation/transformations.cpp @@ -844,13 +844,6 @@ IndexStmt reorderLoopsTopologically(IndexStmt stmt) { varOrderFromTensorLevels(tensorLevelVar.second); } const auto hardDeps = depsFromVarOrders(tensorVarOrders); - cout << "Debug ---" << endl; - for (auto it = hardDeps.begin(); it != hardDeps.end(); it++) { - cout << it->first << ", "; - for (auto its = it->second.begin(); its != it->second.end(); its++) { - cout << *its << endl; - } - } struct CollectSoftDependencies : public IndexNotationVisitor { using IndexNotationVisitor::visit; diff --git a/src/linalg.cpp b/src/linalg.cpp index 00ed6c263..5cf3eb861 100644 --- a/src/linalg.cpp +++ b/src/linalg.cpp @@ -89,7 +89,7 @@ std::ostream& operator<<(std::ostream& os, const LinalgBase& linalg) { // If TensorBase exists, print the storage if (linalg.tensorBase != nullptr) { - return os << *(linalg.tensorBase) << endl; + return os << *(linalg.tensorBase); } if (!assignment.defined()) return os << getNode(linalg)->tensorVar.getName(); diff --git a/test/tests-linalg.cpp b/test/tests-linalg.cpp index 215a08fd2..1e1cc613e 100644 --- a/test/tests-linalg.cpp +++ b/test/tests-linalg.cpp @@ -300,14 +300,9 @@ TEST(linalg, compound_sparse_matmul_transpose_outer) { } TEST(linalg, compound_ATCA) { - // WORKS - Matrix A("A", 16, 16, sparse, sparse); // Works: {dense, sparse} and {dense, dense} - Matrix B("B", 16, 16, sparse, sparse); - Matrix C("C", 16, 16, dense, sparse); - // Tensor API equivalent - Tensor tA("tA", {16,16}, {sparse, sparse}); - Tensor tB("tB", {16,16}, {sparse, sparse}); - Tensor tC("tC", {16,16}, {dense, sparse}); + Matrix A("A", 16, 16, dense, dense); + Matrix B("B", 16, 16, dense, dense); + Matrix C("C", 16, 16, dense, dense); for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { @@ -319,16 +314,60 @@ TEST(linalg, compound_ATCA) { A(i, i) = i; } + B = (transpose(A) * C) * A; + + // Tensor API equivalent + Tensor tA("tA", {16,16}, {dense, dense}); + Tensor tB("tB", {16,16}, {dense, dense}); + Tensor tC("tC", {16,16}, {dense, dense}); + + for (int i = 0; i < 16; i++) { + for (int j = 0; j < 16; j++) { + tC(i, j) = i*j; + } + } + + for (int i = 0; i < 16; i++) { + tA(i, i) = i; + } + + IndexVar i, j, k, l; + tB(i, j) = (tA(k, i) * tC(k, l)) * tA(l, j); + + ASSERT_TENSOR_EQ(tB, B); +} + +TEST(linalg, print) { + Matrix A("A", 16, 16, dense, dense); + Matrix B("B", 16, 16, dense, dense); + Matrix C("C", 16, 16, dense, dense); + + for (int i = 0; i < 16; i++) { + for (int j = 0; j < 16; j++) { + C(i, j) = i*j; + } + } + + for (int i = 0; i < 16; i++) { + A(i, i) = i; + } B = (transpose(A) * C) * A; - cout << B.getIndexAssignment() << endl; - cout << B << endl; -// for (int i = 1; i < 16; i++) { -// for (int j = 1; j < 16; j++) { -// cout << i << ", " << j << ": "; -// cout << B(i,j) << endl; -// } -// } + + std::stringstream linalgBuffer; + linalgBuffer << B << endl; + for (int i = 1; i < 16; i++) { + for (int j = 1; j < 16; j++) { + linalgBuffer << i << ", " << j << ": "; + linalgBuffer << B(i,j) << endl; + } + } + linalgBuffer << B << endl; + + // Tensor API equivalent + Tensor tA("A", {16,16}, {dense, dense}); + Tensor tB("B", {16,16}, {dense, dense}); + Tensor tC("C", {16,16}, {dense, dense}); for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { @@ -342,9 +381,16 @@ TEST(linalg, compound_ATCA) { IndexVar i, j, k, l; tB(i, j) = (tA(k, i) * tC(k, l)) * tA(l, j); - cout << tB << endl; - //cout << B << endl; - //ASSERT_TENSOR_EQ(tB, B); + std::stringstream tensorBuffer; + tensorBuffer << tB << endl; + for (int i = 1; i < 16; i++) { + for (int j = 1; j < 16; j++) { + tensorBuffer << i << ", " << j << ": "; + tensorBuffer << tB(i,j) << endl; + } + } + tensorBuffer << tB << endl; + ASSERT_EQ(tensorBuffer.str(), linalgBuffer.str()); } TEST(linalg, matrix_constructors) { From e0fe427ed3852f8f0a57b9f9b8456c3e7794050c Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Thu, 14 Jan 2021 18:28:25 -0500 Subject: [PATCH 60/61] Fix command line flags for linalg to be correct --- tools/taco.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/taco.cpp b/tools/taco.cpp index 9b4c0b669..40a6290f8 100644 --- a/tools/taco.cpp +++ b/tools/taco.cpp @@ -197,11 +197,12 @@ static void printUsageInfo() { cout << endl; printFlag("linalg", "Specify if the input should be in Linear Algebra (not index) Notation"); cout << endl; - printFlag("k=:,", + printFlag("k=:,", "[LINALG NOTATION ONLY -linalg] Specify the shape of the linear algebra var. " - "Specify the number of dimensions, shape, (0, 1, or 2) and an optional flag of " - "if the var is a column vector for the cases where order == 1 (1 or 0) " + "Specify the number of dimensions, shape (0, 1, or 2), and an optional is col vec" + "flag when order == 1 (1 or 0). " "Examples: A:2, A:0, A:1,1, A:1,0"); + cout << endl; } static int reportError(string errorMessage, int errorCode) { From d5fcc3dc767532815cd2f513766dd2de5dad928a Mon Sep 17 00:00:00 2001 From: Olivia Hsu Date: Thu, 14 Jan 2021 18:33:03 -0500 Subject: [PATCH 61/61] Remove print statements in linalg parser --- tools/taco.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/taco.cpp b/tools/taco.cpp index 40a6290f8..f492b8ccf 100644 --- a/tools/taco.cpp +++ b/tools/taco.cpp @@ -922,8 +922,6 @@ int main(int argc, char* argv[]) { try { parser->parse(); tensor = parser->getResultTensor(); - cout << "getResultTensor!!" << endl; - cout << tensor; } catch (parser::ParseError& e) { return reportError(e.getMessage(), 6); }