Skip to content

Commit

Permalink
Build: Remove Eigen patch from code base
Browse files Browse the repository at this point in the history
Pivotal tracker: 102425066

Apache license recommends MPL code to not be updated directly in the
apache software. Hence, we move the Eigen patch to a separate repository.

We also update the UseLATEX.cmake file to eliminate some
cmake warnings regarding missing dependencies.
  • Loading branch information
Rahul Iyer committed Sep 23, 2015
1 parent 92b19b9 commit 7fdf2b8
Show file tree
Hide file tree
Showing 7 changed files with 982 additions and 818 deletions.
1,664 changes: 954 additions & 710 deletions cmake/UseLATEX.cmake

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion doc/design/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ if(LATEX_BINARY AND BIBTEX_BINARY AND MAKEINDEX_BINARY)
BIBFILES ../literature.bib
INPUTS
${DESIGN_DOC_MODULES}
MANGLE_TARGET_NAMES
EXCLUDE_FROM_ALL
)
else()
message(STATUS "A complete LaTeX installation could not be found. "
Expand Down
21 changes: 10 additions & 11 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ set(BITBUCKET_BASE_URL
"${MADLIB_REDIRECT_PREFIX}https://bitbucket.org"
CACHE STRING
"Base URL for Bitbucket projects. May be overridden for testing purposes.")
set(GITHUB_MADLIB_BASE_URL
"${MADLIB_REDIRECT_PREFIX}https://github.com/madlib"
CACHE STRING
"Base URL for MADlib Github projects. May be overridden for testing purposes.")

# Boost might not be present on the system (or simply too old). In this case, we
# download the following version (unless it is already present in
Expand Down Expand Up @@ -47,16 +51,16 @@ endif (NOT BOOST_TAR_SOURCE)
# specify an alternative path to the Eigen tarball:
# -DEIGEN_TAR_SOURCE=/path/to/eigen-x.x.x.tar.gz

set(EIGEN_VERSION "3.2.2")
set(EIGEN_TAR_MD5 f8817c258b1495cb474d412e528aedb6)
set(EIGEN_VERSION "branches/3.2")
set(EIGEN_URL "${GITHUB_MADLIB_BASE_URL}/eigen/archive/${EIGEN_VERSION}.tar.gz")
set(EIGEN_TAR_MD5 52cd37725d0a1852325d9325674423fa)
set(EIGEN_MPL2_ONLY TRUE)

set(EIGEN_TAR "eigen-${EIGEN_VERSION}.tar.gz")
set(EIGEN_URL "${BITBUCKET_BASE_URL}/eigen/eigen/get/${EIGEN_VERSION}.tar.gz")

if(NOT EIGEN_TAR_SOURCE)
STRING(REGEX REPLACE "/" "-" EIGEN_VERSION_NO_SLASH ${EIGEN_VERSION})
set(EIGEN_TAR "eigen-${EIGEN_VERSION_NO_SLASH}.tar.gz")
find_file(EIGEN_TAR_SOURCE ${EIGEN_TAR}
PATHS ${MAD_THIRD_PARTY}/downloads)
PATHS ${MAD_THIRD_PARTY}/downloads)
endif(NOT EIGEN_TAR_SOURCE)

if(NOT EIGEN_TAR_SOURCE)
Expand Down Expand Up @@ -129,15 +133,10 @@ endif(Boost_FOUND)


# -- Third-party dependencies: Download the C++ linear-algebra library Eigen ---

# FIXME: Eigen is a third-party source that is patched in-place. Other
# third-party headers are patched in the patch directory.
ExternalProject_Add(EP_eigen
PREFIX ${MAD_THIRD_PARTY}
DOWNLOAD_DIR ${MAD_THIRD_PARTY}/downloads
URL ${EIGEN_TAR_SOURCE}
URL_MD5 ${EIGEN_TAR_MD5}
PATCH_COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/patch/Eigen.sh"
CMAKE_COMMAND /usr/bin/env echo Ignored: cmake
BUILD_COMMAND /usr/bin/env echo Ignored: make
INSTALL_COMMAND /usr/bin/env echo Ignored: make
Expand Down
28 changes: 14 additions & 14 deletions src/modules/convex/task/l1.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------- *//**
*
*
* @file l1.hpp
*
*//* ----------------------------------------------------------------------- */
Expand All @@ -24,19 +24,19 @@ class L1 {
typedef Model model_type;

static void gradient(
const model_type &model,
const double &lambda,
model_type &gradient);
const model_type &model,
const double &lambda,
model_type &gradient);

static void clipping(
model_type &incrModel,
const double &lambda,
const int &n_tuples,
const double &stepsize);
model_type &incrModel,
const double &lambda,
const int &n_tuples,
const double &stepsize);

static double loss(
const model_type &model,
const double &lambda);
const model_type &model,
const double &lambda);
};

template <class Model>
Expand All @@ -58,7 +58,7 @@ template <class Model>
void
L1<Model>::clipping(
model_type &incrModel,
const double &lambda,
const double &lambda,
const int &n_tuples,
const double &stepsize) {
// implement the Clipping method mentioned in Tsuruoka et al. 2009
Expand All @@ -73,13 +73,13 @@ L1<Model>::clipping(
}

template <class Model>
double
double
L1<Model>::loss(
const model_type &model,
const model_type &model,
const double &lambda) {
double s = 0.;
for (Index i = 0; i < model.size(); i++) {
s += abs(model(i));
s += std::abs(model(i));
}
return lambda * s;
}
Expand Down
7 changes: 3 additions & 4 deletions src/modules/lda/lda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -378,9 +378,9 @@ AnyType lda_count_topic_sfunc::run(AnyType & args)
// to store a voc_size x (topic_num+1) integer matrix in
// bigint[] (the +1 is for a flag of ceiling the count),
// we need padding if the size is odd.
// 1. when voc_size * (topic_num + 1) is (2n+1), gives (n+1)
// 2. when voc_size * (topic_num + 1) is (2n), gives (n)
int dims[1] = {(voc_size * (topic_num + 1) + 1) * sizeof(int32_t) / sizeof(int64_t)};
// 1. when voc_size * (topic_num + 1) is (2n+1), gives (n+1)
// 2. when voc_size * (topic_num + 1) is (2n), gives (n)
int dims[1] = {static_cast<int>( (voc_size * (topic_num + 1) + 1) * sizeof(int32_t) / sizeof(int64_t) )};
int lbs[1] = {1};
state = madlib_construct_md_array(
NULL, NULL, 1, dims, lbs, INT8TI.oid, INT8TI.len, INT8TI.byval,
Expand All @@ -407,7 +407,6 @@ AnyType lda_count_topic_sfunc::run(AnyType & args)
word_index++;
}
}

return state;
}

Expand Down
71 changes: 0 additions & 71 deletions src/patch/Eigen.sh

This file was deleted.

7 changes: 0 additions & 7 deletions src/ports/postgres/modules/linalg/matrix_ops.py_in
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,6 @@ def _validate_block(matrix, matrix_args):
"Matrix error: Invalid block column - array expected")
_assert(not table_is_empty(matrix),
"Matrix error: Input table {0} is empty".format(matrix))
# ------------------------------------------------------------------------------

# ------------------------------------------------------------------------------
# -- Transformation operations -------------------------------------------------
Expand Down Expand Up @@ -563,7 +562,6 @@ def matrix_densify(schema_madlib, matrix_in, in_args, matrix_out, out_args):
FROM generate_series(1, {col_dim}) AS val
) t2
""".format(**locals()))
# ------------------------------------------------------------------------------

# ------------------------------------------------------------------------------
# -- Element-wise operations ---------------------------------------------------
Expand Down Expand Up @@ -822,7 +820,6 @@ def matrix_elem_mult(schema_madlib, matrix_a, a_args,
matrix_elem_op(schema_madlib, matrix_a, a_args,
matrix_b, b_args, matrix_out, out_args,
elem_ops)
# ------------------------------------------------------------------------------

# ------------------------------------------------------------------------------
# -- Block operations ------------------------------------------------------
Expand Down Expand Up @@ -1027,7 +1024,6 @@ def matrix_unblockize(schema_madlib, matrix_in, in_args, matrix_out, out_args):
m4_ifdef(`__POSTGRESQL__', `',
`DISTRIBUTED BY ({out_args[row]})')
""".format(**locals()))
# ------------------------------------------------------------------------------

# ------------------------------------------------------------------------------
# -- Visitor operations --------------------------------------------------------
Expand Down Expand Up @@ -1145,7 +1141,6 @@ def _matrix_extract_col_dense(schema_madlib, matrix_in, in_args, index=1):
_assert(len(r) == 1,
"Matrix error: Invalid row index ({0}) for matrix {1}".format(index, matrix_in))
return r[0]['res']
# ------------------------------------------------------------------------------

# ------------------------------------------------------------------------------
# -- Extreme value operations --------------------------------------------------
Expand Down Expand Up @@ -1361,7 +1356,6 @@ def matrix_min(schema_madlib, matrix_in, in_args, dim, matrix_out,
fetch_index=False):
matrix_extremum(schema_madlib, matrix_in, in_args, dim,
matrix_out, 'min', fetch_index)
# ------------------------------------------------------------------------------

# ------------------------------------------------------------------------------
# -- Reduction operations ------------------------------------------------------
Expand Down Expand Up @@ -1524,7 +1518,6 @@ def matrix_mean(schema_madlib, matrix_in, in_args, dim):
'dense_agg_op': 'avg',
'dense_array_op': 'array_mean'}
return matrix_reduction_op(schema_madlib, matrix_in, in_args, dim, vector_ops)
# ------------------------------------------------------------------------------

# ------------------------------------------------------------------------------
# -- Mathematical operations ------------------------------------------------------
Expand Down

0 comments on commit 7fdf2b8

Please sign in to comment.