From 004eee026a16f887ea212435b8e6c4065b862959 Mon Sep 17 00:00:00 2001 From: phschiele Date: Fri, 21 Jul 2023 00:35:33 +0200 Subject: [PATCH] Draft --- .gitignore | 1 + cvxpygen/cpg.py | 863 +++++++++++++++++-------------------------- cvxpygen/mappings.py | 68 ++++ cvxpygen/solvers.py | 416 +++++++++++++++++++++ 4 files changed, 824 insertions(+), 524 deletions(-) create mode 100644 cvxpygen/mappings.py create mode 100644 cvxpygen/solvers.py diff --git a/.gitignore b/.gitignore index dd13ba1..aab2b12 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ *code/ tests/*/ +.python-version *.so __pycache__ .idea diff --git a/cvxpygen/cpg.py b/cvxpygen/cpg.py index 7e2a591..ff32142 100644 --- a/cvxpygen/cpg.py +++ b/cvxpygen/cpg.py @@ -16,14 +16,16 @@ import shutil import pickle import warnings -import osqp + from cvxpygen import utils +from cvxpygen.mappings import PrimalVariableInfo, DualVariableInfo, ConstraintInfo, ParameterCanon, \ + ParameterInfo +from cvxpygen.solvers import get_interface_class from cvxpygen.utils import C import cvxpy as cp import numpy as np from scipy import sparse from subprocess import call -from platform import system from cvxpy.problems.objective import Maximize from cvxpy.cvxcore.python import canonInterface as cI from cvxpy.expressions.variable import upper_tri_to_full @@ -38,28 +40,7 @@ def generate_code(problem, code_dir='CPG_code', solver=None, unroll=False, prefi sys.stdout.write('Generating code with CVXPYgen ...\n') - cvxpygen_directory = os.path.dirname(os.path.realpath(__file__)) - solver_code_dir = os.path.join(code_dir, 'c', 'solver_code') - - # adjust problem_name - if prefix != '': - if not prefix[0].isalpha(): - prefix = '_' + prefix - prefix = prefix + '_' - - # create code directory and copy template files - if os.path.isdir(code_dir): - shutil.rmtree(code_dir) - os.mkdir(code_dir) - os.mkdir(os.path.join(code_dir, 'c')) - for d in ['src', 'include', 'build']: - os.mkdir(os.path.join(code_dir, 'c', d)) - os.mkdir(os.path.join(code_dir, 'cpp')) - for d in ['src', 'include']: - os.mkdir(os.path.join(code_dir, 'cpp', d)) - shutil.copy(os.path.join(cvxpygen_directory, 'template', 'CMakeLists.txt'), os.path.join(code_dir, 'c')) - for file in ['setup.py', 'README.html', '__init__.py']: - shutil.copy(os.path.join(cvxpygen_directory, 'template', file), code_dir) + create_folder_structure(code_dir) # problem data solver_opts = {} @@ -73,45 +54,183 @@ def generate_code(problem, code_dir='CPG_code', solver=None, unroll=False, prefi verbose=False, solver_opts=solver_opts, ) + param_prob = data['param_prob'] - # catch non-supported cone types solver_name = solving_chain.solver.name() - p_prob = data['param_prob'] - if solver_name == 'ECOS': - if p_prob.cone_dims.exp > 0: - raise ValueError('Code generation with ECOS and exponential cones is not supported yet.') - elif solver_name == 'SCS': - if p_prob.cone_dims.exp > 0 or len(p_prob.cone_dims.psd) > 0 or len(p_prob.cone_dims.p3d) > 0: - raise ValueError('Code generation with SCS and exponential, positive semidefinite, or power cones ' - 'is not supported yet.') + interface_class = get_interface_class(solver_name) + + # for cone problems, check if all cones are supported + if hasattr(param_prob, 'cone_dims'): + cone_dims = param_prob.cone_dims + interface_class.check_unsupported_cones(cone_dims) # checks in sparsity - for p in p_prob.parameters: - if p.attributes['sparsity'] is not None: - if p.size == 1: - warnings.warn('Ignoring sparsity pattern for scalar parameter %s!' % p.name()) - p.attributes['sparsity'] = None - elif max(p.shape) == p.size: - warnings.warn('Ignoring sparsity pattern for vector parameter %s!' % p.name()) - p.attributes['sparsity'] = None - else: - for coord in p.attributes['sparsity']: - if coord[0] < 0 or coord[1] < 0 or coord[0] >= p.shape[0] or coord[1] >= p.shape[1]: - warnings.warn('Invalid sparsity pattern for parameter %s - out of range! ' - 'Ignoring sparsity pattern.' % p.name()) - p.attributes['sparsity'] = None - break - p.attributes['sparsity'] = list(set(p.attributes['sparsity'])) - elif p.attributes['diag']: - p.attributes['sparsity'] = [(i, i) for i in range(p.shape[0])] - if p.attributes['sparsity'] is not None and p.value is not None: - for i in range(p.shape[0]): - for j in range(p.shape[1]): - if (i, j) not in p.attributes['sparsity'] and p.value[i, j] != 0: - warnings.warn('Ignoring nonzero value outside of sparsity pattern for parameter %s!' % p.name()) - p.value[i, j] = 0 + handle_sparsity(param_prob) # variable information + variable_info = get_variable_info(problem, inverse_data) + + # dual variable information + dual_variable_info = get_dual_variable_info(inverse_data, solver_name) + + # user parameters + parameter_info = get_parameter_info(param_prob) + + # dimensions and information specific to solver + solver_interface = interface_class(data, param_prob) # noqa + + constraint_info = get_constraint_info(solver_interface) + + adjacency, parameter_canon = process_canonical_parameters(constraint_info, param_prob, + parameter_info, solver_interface, + solver_name) + + cvxpygen_directory = os.path.dirname(os.path.realpath(__file__)) + solver_code_dir = os.path.join(code_dir, 'c', 'solver_code') + solver_interface.generate_code(code_dir, solver_code_dir, cvxpygen_directory, parameter_canon) + + param_name_to_canon_outdated = { + user_p_name: [solver_interface.canon_p_ids[j] for j in np.nonzero(adjacency[:, i])[0]] + for i, user_p_name in enumerate(parameter_info.names)} + + # avoid non-alphanumeric first character in problem name + prefix = adjust_problem_name(prefix) + + # summarize information on options, codegen, user parameters / variables, + # canonicalization in dictionaries + info_opt = {C.CODE_DIR: code_dir, + C.SOLVER_NAME: solver_name, + C.UNROLL: unroll, + C.PREFIX: prefix} + + info_cg = {C.RET_PRIM_FUNC_EXISTS: solver_interface.ret_prim_func_exists(variable_info), + C.RET_DUAL_FUNC_EXISTS: solver_interface.ret_dual_func_exists(dual_variable_info), + C.NONZERO_D: parameter_canon.nonzero_d, + C.IS_MAXIMIZATION: type(problem.objective) == Maximize} + + info_usr = {C.P_WRITABLE: parameter_info.writable, + C.P_FLAT_USP: parameter_info.flat_usp, + C.P_COL_TO_NAME_USP: parameter_info.col_to_name_usp, + C.P_NAME_TO_SHAPE: parameter_info.name_to_shape, + C.P_NAME_TO_SIZE: parameter_info.name_to_size_usp, + C.P_NAME_TO_CANON_OUTDATED: param_name_to_canon_outdated, + C.P_NAME_TO_SPARSITY: parameter_info.name_to_sparsity, + C.P_NAME_TO_SPARSITY_TYPE: parameter_info.name_to_sparsity_type, + C.V_NAME_TO_INDICES: variable_info.name_to_indices, + C.V_NAME_TO_SIZE: variable_info.name_to_size, + C.V_NAME_TO_SHAPE: variable_info.name_to_shape, + C.V_NAME_TO_INIT: variable_info.name_to_init, + C.V_NAME_TO_SYM: variable_info.name_to_sym, + C.V_NAME_TO_OFFSET: variable_info.name_to_offset, + C.D_NAME_TO_INIT: dual_variable_info.name_to_init, + C.D_NAME_TO_VEC: dual_variable_info.name_to_vec, + C.D_NAME_TO_OFFSET: dual_variable_info.name_to_offset, + C.D_NAME_TO_SIZE: dual_variable_info.name_to_size, + C.D_NAME_TO_SHAPE: dual_variable_info.name_to_shape, + C.D_NAME_TO_INDICES: dual_variable_info.name_to_indices} + + info_can = {C.P: parameter_canon.p, + C.P_ID_TO_SIZE: parameter_canon.p_id_to_size, + C.P_ID_TO_CHANGES: parameter_canon.p_id_to_changes, + C.P_ID_TO_MAPPING: parameter_canon.p_id_to_mapping, + C.CONSTANTS: solver_interface.canon_constants, + C.SETTINGS_NAMES_TO_TYPE: solver_interface.settings_names_to_type, + C.SETTINGS_NAMES_TO_DEFAULT: solver_interface.settings_names_to_default, + C.SETTINGS_LIST: solver_interface.settings_names} + + write_c_code(code_dir, info_opt, info_cg, info_can, info_usr, problem) + + sys.stdout.write('CVXPYgen finished generating code.\n') + + if wrapper: + compile_python_module(code_dir) + + +def process_canonical_parameters(constraint_info, param_prob, parameter_info, solver_interface, solver_name): + adjacency = np.zeros(shape=(len(solver_interface.canon_p_ids), parameter_info.num), dtype=bool) + parameter_canon = ParameterCanon() + # compute affine mapping for each canonical parameter + for i, p_id in enumerate(solver_interface.canon_p_ids): + + affine_map = solver_interface.get_affine_map(p_id, param_prob, constraint_info) + + if p_id in solver_interface.canon_p_ids_constr_vec: + affine_map = update_to_dense_mapping(affine_map, param_prob, solver_interface) + + if p_id == 'd': + parameter_canon.nonzero_d = affine_map.mapping.nnz > 0 + + adjacency = update_adjacency_matrix(adjacency, i, parameter_info, affine_map.mapping) + + # take sparsity into account + affine_map.mapping = affine_map.mapping[:, parameter_info.sparsity_mask] + + # compute default values of canonical parameters + affine_map, parameter_canon = set_default_values(affine_map, p_id, parameter_canon, parameter_info, solver_interface, solver_name) + + parameter_canon.p_id_to_mapping[p_id] = affine_map.mapping.tocsr() + parameter_canon.p_id_to_changes[p_id] = affine_map.mapping[:, :-1].nnz > 0 + parameter_canon.p_id_to_size[p_id] = affine_map.mapping.shape[0] + return adjacency, parameter_canon + + +def update_to_dense_mapping(affine_map, param_prob, solver_interface): + mapping_to_sparse = solver_interface.sign_constr_vec * param_prob.reduced_A.reduced_mat[ + affine_map.mapping_rows] + mapping_to_dense = sparse.lil_matrix( + np.zeros((affine_map.shape[0], mapping_to_sparse.shape[1]))) + for i_data in range(mapping_to_sparse.shape[0]): + mapping_to_dense[affine_map.indices[i_data], :] = mapping_to_sparse[i_data, :] + affine_map.mapping = sparse.csc_matrix(mapping_to_dense) + return affine_map + + +def set_default_values(affine_map, p_id, parameter_canon, parameter_info, solver_interface, + solver_name): + if p_id.isupper(): + rows_nonzero, _ = affine_map.mapping.nonzero() + canon_p_data_nonzero = np.sort(np.unique(rows_nonzero)) + affine_map.mapping = affine_map.mapping[canon_p_data_nonzero, :] + canon_p_data = affine_map.mapping @ parameter_info.flat_usp + # compute 'indptr' to construct sparse matrix from 'canon_p_data' and 'indices' + if solver_name in ['OSQP', 'SCS']: + if p_id == 'P': + affine_map.indptr = solver_interface.indptr_obj + elif p_id == 'A': + affine_map.indptr = solver_interface.indptr_constr[:-1] + elif solver_name == 'ECOS': + indptr_original = solver_interface.indptr_constr[:-1] + affine_map.indptr = 0 * indptr_original + for r in affine_map.mapping_rows: + for c in range(affine_map.shape[1]): + if indptr_original[c] <= r < indptr_original[c + 1]: + affine_map.indptr[c + 1:] += 1 + break + # compute 'indices_usp' and 'indptr_usp' + indices_usp = affine_map.indices[canon_p_data_nonzero] + indptr_usp = 0 * affine_map.indptr + for r in canon_p_data_nonzero: + for c in range(affine_map.shape[1]): + if affine_map.indptr[c] <= r < affine_map.indptr[c + 1]: + indptr_usp[c + 1:] += 1 + break + csc_mat = sparse.csc_matrix((canon_p_data, indices_usp, indptr_usp), + shape=affine_map.shape) + if solver_name == 'OSQP': + parameter_canon.p_csc[p_id] = csc_mat + parameter_canon.p[p_id] = utils.csc_to_dict(csc_mat) + else: + canon_p_data = affine_map.mapping @ parameter_info.flat_usp + if solver_name == 'OSQP' and p_id == 'l': + parameter_canon.p[p_id] = np.concatenate( + (canon_p_data, -np.inf * np.ones(solver_interface.n_ineq)), axis=0) + else: + parameter_canon.p[p_id] = canon_p_data + + return affine_map, parameter_canon + + +def get_variable_info(problem, inverse_data) -> PrimalVariableInfo: variables = problem.variables() var_names = [var.name() for var in variables] var_ids = [var.id for var in variables] @@ -119,7 +238,8 @@ def generate_code(problem, code_dir='CPG_code', solver=None, unroll=False, prefi var_name_to_offset = {n: o for n, o in zip(var_names, var_offsets)} var_shapes = [var.shape for var in variables] var_sizes = [var.size for var in variables] - var_sym = [var.attributes['symmetric'] or var.attributes['PSD'] or var.attributes['NSD'] for var in variables] + var_sym = [var.attributes['symmetric'] or var.attributes['PSD'] or var.attributes['NSD'] for var + in variables] var_name_to_sym = {n: s for n, s in zip(var_names, var_sym)} var_name_to_indices = {} for var_name, offset, shape, sym in zip(var_names, var_offsets, var_shapes, var_sym): @@ -128,7 +248,7 @@ def generate_code(problem, code_dir='CPG_code', solver=None, unroll=False, prefi (_, col) = fill_coefficient.nonzero() var_name_to_indices[var_name] = offset + col else: - var_name_to_indices[var_name] = np.arange(offset, offset+np.prod(shape)) + var_name_to_indices[var_name] = np.arange(offset, offset + np.prod(shape)) var_name_to_size = {var.name(): var.size for var in variables} var_name_to_shape = {var.name(): var.shape for var in variables} var_name_to_init = dict() @@ -138,7 +258,13 @@ def generate_code(problem, code_dir='CPG_code', solver=None, unroll=False, prefi else: var_name_to_init[var.name()] = np.zeros(shape=var.shape) - # dual variable information + variable_info = PrimalVariableInfo(var_name_to_offset, var_name_to_indices, var_name_to_size, + var_sizes, var_name_to_shape, var_name_to_init, + var_name_to_sym, var_sym) + return variable_info + + +def get_dual_variable_info(inverse_data, solver_name) -> DualVariableInfo: # get chain of constraint id maps for 'CvxAttr2Constr' and 'Canonicalization' objects dual_id_maps = [] if solver_name == 'OSQP': @@ -166,10 +292,10 @@ def generate_code(problem, code_dir='CPG_code', solver=None, unroll=False, prefi con_canon_dict = {c.id: c for c in con_canon} d_canon_offsets = np.cumsum([0] + [c.args[0].size for c in con_canon[:-1]]) if solver_name in ['OSQP', 'SCS']: - d_vectors = ['y']*len(d_canon_offsets) + d_vectors = ['y'] * len(d_canon_offsets) else: n_split_yz = len(inverse_data[-1][ECOS.EQ_CONSTR]) - d_vectors = ['y']*n_split_yz + ['z']*(len(d_canon_offsets)-n_split_yz) + d_vectors = ['y'] * n_split_yz + ['z'] * (len(d_canon_offsets) - n_split_yz) d_canon_offsets[n_split_yz:] -= d_canon_offsets[n_split_yz] d_canon_offsets_dict = {c.id: off for c, off in zip(con_canon, d_canon_offsets)} # select for user-defined constraints @@ -192,510 +318,76 @@ def generate_code(problem, code_dir='CPG_code', solver=None, unroll=False, prefi else: d_name_to_init[name] = np.zeros(shape=shape) - # user parameters - user_p_num = len(p_prob.parameters) - user_p_names = [par.name() for par in p_prob.parameters] - user_p_ids = list(p_prob.param_id_to_col.keys()) - user_p_id_to_col = p_prob.param_id_to_col - user_p_id_to_size = p_prob.param_id_to_size - user_p_id_to_param = p_prob.id_to_param - user_p_total_size = p_prob.total_param_size - user_p_name_to_shape = {user_p_id_to_param[p_id].name(): user_p_id_to_param[p_id].shape - for p_id in user_p_id_to_size.keys()} - user_p_name_to_size_usp = {user_p_id_to_param[p_id].name(): size for p_id, size in user_p_id_to_size.items()} - user_p_name_to_sparsity = {} - user_p_name_to_sparsity_type = {} - user_p_sparsity_mask = np.ones(user_p_total_size + 1, dtype=bool) - for p in p_prob.parameters: - if p.attributes['sparsity'] is not None: - user_p_name_to_size_usp[p.name()] = len(p.attributes['sparsity']) - user_p_name_to_sparsity[p.name()] = np.sort([coord[0]+p.shape[0]*coord[1] - for coord in p.attributes['sparsity']]) - if p.attributes['diag']: - user_p_name_to_sparsity_type[p.name()] = 'diag' - else: - user_p_name_to_sparsity_type[p.name()] = 'general' - user_p_sparsity_mask[user_p_id_to_col[p.id]:user_p_id_to_col[p.id]+user_p_id_to_size[p.id]] = False - user_p_sparsity_mask[user_p_id_to_col[p.id] + user_p_name_to_sparsity[p.name()]] = True - user_p_col_to_name_usp = {} - cum_sum = 0 - for name, size in user_p_name_to_size_usp.items(): - user_p_col_to_name_usp[cum_sum] = name - cum_sum += size - user_p_writable = dict() - for p_name, p in zip(user_p_names, p_prob.parameters): - if p.value is None: - p.project_and_assign(np.random.randn(*p.shape)) - if type(p.value) is sparse.dia_matrix: - p.value = p.value.toarray() - if len(p.shape) < 2: - # dealing with scalar or vector - user_p_writable[p_name] = p.value - else: - # dealing with matrix - if p_name in user_p_name_to_sparsity.keys(): - dense_value = p.value.flatten(order='F') - sparse_value = np.zeros(len(user_p_name_to_sparsity[p_name])) - for i, idx in enumerate(user_p_name_to_sparsity[p_name]): - sparse_value[i] = dense_value[idx] - user_p_writable[p_name] = sparse_value - else: - user_p_writable[p_name] = p.value.flatten(order='F') - - def user_p_value(user_p_id): - return np.array(user_p_id_to_param[user_p_id].value) - user_p_flat = cI.get_parameter_vector(user_p_total_size, user_p_id_to_col, user_p_id_to_size, user_p_value) - user_p_flat_usp = user_p_flat[user_p_sparsity_mask] - - canon_p = {} - canon_p_csc = {} - canon_p_id_to_mapping = {} - canon_p_id_to_changes = {} - canon_p_id_to_size = {} - nonzero_d = True - - # dimensions and information specific to solver - - if solver_name == 'OSQP': - - canon_p_ids = ['P', 'q', 'd', 'A', 'l', 'u'] - canon_p_ids_constr_vec = ['l', 'u'] - sign_constr_vec = -1 - n_var = data['n_var'] - n_eq = data['n_eq'] - n_ineq = data['n_ineq'] - - indices_obj, indptr_obj, shape_obj = p_prob.reduced_P.problem_data_index - indices_constr, indptr_constr, shape_constr = p_prob.reduced_A.problem_data_index - - canon_constants = {} - - elif solver_name == 'SCS': - - canon_p_ids = ['c', 'd', 'A', 'b'] - canon_p_ids_constr_vec = ['b'] - sign_constr_vec = 1 - n_var = p_prob.x.size - n_eq = data['A'].shape[0] - n_ineq = 0 - - indices_obj, indptr_obj, shape_obj = None, None, None - indices_constr, indptr_constr, shape_constr = p_prob.reduced_A.problem_data_index - - canon_constants = {'n': n_var, 'm': n_eq, 'z': p_prob.cone_dims.zero, 'l': p_prob.cone_dims.nonneg, - 'q': np.array(p_prob.cone_dims.soc), 'qsize': len(p_prob.cone_dims.soc)} - - elif solver_name == 'ECOS': - - canon_p_ids = ['c', 'd', 'A', 'b', 'G', 'h'] - canon_p_ids_constr_vec = ['b', 'h'] - sign_constr_vec = 1 - n_var = p_prob.x.size - n_eq = p_prob.cone_dims.zero - n_ineq = data['G'].shape[0] - - indices_obj, indptr_obj, shape_obj = None, None, None - indices_constr, indptr_constr, shape_constr = p_prob.reduced_A.problem_data_index + dual_variable_info = DualVariableInfo(d_name_to_offset, d_name_to_indices, d_name_to_size, + d_sizes, d_name_to_shape, d_name_to_init, d_name_to_vec) + return dual_variable_info - canon_constants = {'n': n_var, 'm': n_ineq, 'p': n_eq, 'l': p_prob.cone_dims.nonneg, - 'n_cones': len(p_prob.cone_dims.soc), 'q': np.array(p_prob.cone_dims.soc), - 'e': p_prob.cone_dims.exp} - else: - raise ValueError("Problem class cannot be addressed by the OSQP, SCS, or ECOS solver!") - - n_data_constr = len(indices_constr) - n_data_constr_vec = indptr_constr[-1] - indptr_constr[-2] +def get_constraint_info(solver_interface) -> ConstraintInfo: + n_data_constr = len(solver_interface.indices_constr) + n_data_constr_vec = solver_interface.indptr_constr[-1] - solver_interface.indptr_constr[-2] n_data_constr_mat = n_data_constr - n_data_constr_vec - mapping_rows_eq = np.nonzero(indices_constr < n_eq)[0] - mapping_rows_ineq = np.nonzero(indices_constr >= n_eq)[0] - - adjacency = np.zeros(shape=(len(canon_p_ids), user_p_num), dtype=bool) - - for i, p_id in enumerate(canon_p_ids): - - # compute affine mapping for each canonical parameter - mapping_rows = [] - mapping = [] - indices = [] - indptr = [] - shape = () - - if solver_name == 'OSQP': - - if p_id == 'P': - mapping = p_prob.reduced_P.reduced_mat - indices = indices_obj - shape = (n_var, n_var) - elif p_id == 'q': - mapping = p_prob.q[:-1] - elif p_id == 'd': - mapping = p_prob.q[-1] - elif p_id == 'A': - mapping = p_prob.reduced_A.reduced_mat[:n_data_constr_mat] - indices = indices_constr[:n_data_constr_mat] - shape = (n_eq+n_ineq, n_var) - elif p_id == 'l': - mapping_rows_eq = np.nonzero(indices_constr < n_eq)[0] - mapping_rows = mapping_rows_eq[mapping_rows_eq >= n_data_constr_mat] # mapping to the finite part of l - indices = indices_constr[mapping_rows] - shape = (n_eq, 1) - elif p_id == 'u': - mapping_rows = np.arange(n_data_constr_mat, n_data_constr) - indices = indices_constr[mapping_rows] - shape = (n_eq+n_ineq, 1) - else: - raise ValueError('Unknown OSQP parameter name: "%s"' % p_id) - - elif solver_name in ['SCS', 'ECOS']: - - if p_id == 'c': - mapping = p_prob.c[:-1] - elif p_id == 'd': - mapping = p_prob.c[-1] - elif p_id == 'A': - mapping_rows = mapping_rows_eq[mapping_rows_eq < n_data_constr_mat] - shape = (n_eq, n_var) - elif p_id == 'G': - mapping_rows = mapping_rows_ineq[mapping_rows_ineq < n_data_constr_mat] - shape = (n_ineq, n_var) - elif p_id == 'b': - mapping_rows = mapping_rows_eq[mapping_rows_eq >= n_data_constr_mat] - shape = (n_eq, 1) - elif p_id == 'h': - mapping_rows = mapping_rows_ineq[mapping_rows_ineq >= n_data_constr_mat] - shape = (n_ineq, 1) - else: - raise ValueError('Unknown %s parameter name: "%s"' % (solver_name, p_id)) - - if p_id in ['A', 'b']: - indices = indices_constr[mapping_rows] - elif p_id in ['G', 'h']: - indices = indices_constr[mapping_rows] - n_eq - - if p_id.isupper(): - mapping = -p_prob.reduced_A.reduced_mat[mapping_rows] - - if p_id in canon_p_ids_constr_vec: - mapping_to_sparse = sign_constr_vec*p_prob.reduced_A.reduced_mat[mapping_rows] - mapping_to_dense = sparse.lil_matrix(np.zeros((shape[0], mapping_to_sparse.shape[1]))) - for i_data in range(mapping_to_sparse.shape[0]): - mapping_to_dense[indices[i_data], :] = mapping_to_sparse[i_data, :] - mapping = sparse.csc_matrix(mapping_to_dense) - - if p_id == 'd': - nonzero_d = mapping.nnz > 0 - - # compute adjacency matrix - for j in range(user_p_num): - column_slice = slice(user_p_id_to_col[user_p_ids[j]], user_p_id_to_col[user_p_ids[j + 1]]) - if mapping[:, column_slice].nnz > 0: - adjacency[i, j] = True - - # take sparsity into account - mapping = mapping[:, user_p_sparsity_mask] - - # compute default values of canonical parameters - if p_id.isupper(): - rows_nonzero, _ = mapping.nonzero() - canon_p_data_nonzero = np.sort(np.unique(rows_nonzero)) - mapping = mapping[canon_p_data_nonzero, :] - canon_p_data = mapping @ user_p_flat_usp - # compute 'indptr' to construct sparse matrix from 'canon_p_data' and 'indices' - if solver_name in ['OSQP', 'SCS']: - if p_id == 'P': - indptr = indptr_obj - elif p_id == 'A': - indptr = indptr_constr[:-1] - elif solver_name == 'ECOS': - indptr_original = indptr_constr[:-1] - indptr = 0 * indptr_original - for r in mapping_rows: - for c in range(shape[1]): - if indptr_original[c] <= r < indptr_original[c + 1]: - indptr[c + 1:] += 1 - break - # compute 'indices_usp' and 'indptr_usp' - indices_usp = indices[canon_p_data_nonzero] - indptr_usp = 0 * indptr - for r in canon_p_data_nonzero: - for c in range(shape[1]): - if indptr[c] <= r < indptr[c + 1]: - indptr_usp[c + 1:] += 1 - break - csc_mat = sparse.csc_matrix((canon_p_data, indices_usp, indptr_usp), shape=shape) - if solver_name == 'OSQP': - canon_p_csc[p_id] = csc_mat - canon_p[p_id] = utils.csc_to_dict(csc_mat) - else: - canon_p_data = mapping @ user_p_flat_usp - if solver_name == 'OSQP' and p_id == 'l': - canon_p[p_id] = np.concatenate((canon_p_data, -np.inf * np.ones(n_ineq)), axis=0) - else: - canon_p[p_id] = canon_p_data - - canon_p_id_to_mapping[p_id] = mapping.tocsr() - canon_p_id_to_changes[p_id] = mapping[:, :-1].nnz > 0 - canon_p_id_to_size[p_id] = mapping.shape[0] - - if solver_name == 'OSQP': - - # solver settings - settings_names = ['rho', 'max_iter', 'eps_abs', 'eps_rel', 'eps_prim_inf', 'eps_dual_inf', 'alpha', - 'scaled_termination', 'check_termination', 'warm_start'] - settings_types = ['c_float', 'c_int', 'c_float', 'c_float', 'c_float', 'c_float', 'c_float', 'c_int', 'c_int', - 'c_int'] - settings_defaults = [] - - # OSQP codegen - osqp_obj = osqp.OSQP() - osqp_obj.setup(P=canon_p_csc['P'], q=canon_p['q'], A=canon_p_csc['A'], l=canon_p['l'], u=canon_p['u']) - if system() == 'Windows': - cmake_generator = 'MinGW Makefiles' - elif system() == 'Linux' or system() == 'Darwin': - cmake_generator = 'Unix Makefiles' - else: - raise OSError('Unknown operating system!') - osqp_obj.codegen(os.path.join(code_dir, 'c', 'solver_code'), project_type=cmake_generator, - parameters='matrices', force_rewrite=True) - - # copy license files - shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'osqp-python', 'LICENSE'), - os.path.join(solver_code_dir, 'LICENSE')) - shutil.copy(os.path.join(cvxpygen_directory, 'template', 'LICENSE'), code_dir) - - elif solver_name == 'SCS': - - # solver settings - settings_names = ['normalize', 'scale', 'adaptive_scale', 'rho_x', 'max_iters', 'eps_abs', 'eps_rel', - 'eps_infeas', 'alpha', 'time_limit_secs', 'verbose', 'warm_start', 'acceleration_lookback', - 'acceleration_interval', 'write_data_filename', 'log_csv_filename'] - settings_types = ['c_int', 'c_float', 'c_int', 'c_float', 'c_int', 'c_float', 'c_float', 'c_float', 'c_float', - 'c_float', 'c_int', 'c_int', 'c_int', 'c_int', 'const char*', 'const char*'] - settings_defaults = ['1', '0.1', '1', '1e-6', '1e5', '1e-4', '1e-4', '1e-7', '1.5', '0', '0', '0', '0', '1', - 'SCS_NULL', 'SCS_NULL'] - - # copy sources - if os.path.isdir(solver_code_dir): - shutil.rmtree(solver_code_dir) - os.mkdir(solver_code_dir) - dirs_to_copy = ['src', 'include', 'linsys', 'cmake'] - for dtc in dirs_to_copy: - shutil.copytree(os.path.join(cvxpygen_directory, 'solvers', 'scs', dtc), os.path.join(solver_code_dir, dtc)) - files_to_copy = ['scs.mk', 'CMakeLists.txt', 'LICENSE.txt'] - for fl in files_to_copy: - shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'scs', fl), - os.path.join(solver_code_dir, fl)) - shutil.copy(os.path.join(cvxpygen_directory, 'template', 'LICENSE'), code_dir) - - # disable BLAS and LAPACK - with open(os.path.join(code_dir, 'c', 'solver_code', 'scs.mk'), 'r') as f: - scs_mk_data = f.read() - scs_mk_data = scs_mk_data.replace('USE_LAPACK = 1', 'USE_LAPACK = 0') - with open(os.path.join(code_dir, 'c', 'solver_code', 'scs.mk'), 'w') as f: - f.write(scs_mk_data) - - # modify CMakeLists.txt - with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'r') as f: - cmake_data = f.read() - cmake_data = cmake_data.replace(' include/', ' ${CMAKE_CURRENT_SOURCE_DIR}/include/') - cmake_data = cmake_data.replace(' src/', ' ${CMAKE_CURRENT_SOURCE_DIR}/src/') - cmake_data = cmake_data.replace(' ${LINSYS}/', ' ${CMAKE_CURRENT_SOURCE_DIR}/${LINSYS}/') - with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'w') as f: - f.write(cmake_data) - - # adjust top-level CMakeLists.txt - with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'r') as f: - cmake_data = f.read() - indent = ' ' * 6 - sdir = '${CMAKE_CURRENT_SOURCE_DIR}/solver_code/' - cmake_data = cmake_data.replace(sdir + 'include', - sdir + 'include\n' + - indent + sdir + 'linsys') - with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'w') as f: - f.write(cmake_data) - - # adjust setup.py - with open(os.path.join(code_dir, 'setup.py'), 'r') as f: - setup_text = f.read() - indent = ' ' * 30 - setup_text = setup_text.replace("os.path.join('c', 'solver_code', 'include'),", - "os.path.join('c', 'solver_code', 'include'),\n" + - indent + "os.path.join('c', 'solver_code', 'linsys'),") - with open(os.path.join(code_dir, 'setup.py'), 'w') as f: - f.write(setup_text) - - elif solver_name == 'ECOS': - - # solver settings - settings_names = ['feastol', 'abstol', 'reltol', 'feastol_inacc', 'abstol_inacc', 'reltol_inacc', 'maxit'] - settings_types = ['c_float', 'c_float', 'c_float', 'c_float', 'c_float', 'c_float', 'c_int'] - settings_defaults = ['1e-8', '1e-8', '1e-8', '1e-4', '5e-5', '5e-5', '100'] - - # copy sources - if os.path.isdir(solver_code_dir): - shutil.rmtree(solver_code_dir) - os.mkdir(solver_code_dir) - dirs_to_copy = ['src', 'include', 'external', 'ecos_bb'] - for dtc in dirs_to_copy: - shutil.copytree(os.path.join(cvxpygen_directory, 'solvers', 'ecos', dtc), os.path.join(solver_code_dir, dtc)) - shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'ecos', 'CMakeLists.txt'), - os.path.join(solver_code_dir, 'CMakeLists.txt')) - shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'ecos', 'COPYING'), - os.path.join(solver_code_dir, 'COPYING')) - shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'ecos', 'COPYING'), - os.path.join(code_dir, 'COPYING')) - - # adjust print level - with open(os.path.join(code_dir, 'c', 'solver_code', 'include', 'glblopts.h'), 'r') as f: - glbl_opts_data = f.read() - glbl_opts_data = glbl_opts_data.replace('#define PRINTLEVEL (2)', '#define PRINTLEVEL (0)') - with open(os.path.join(code_dir, 'c', 'solver_code', 'include', 'glblopts.h'), 'w') as f: - f.write(glbl_opts_data) - - # adjust top-level CMakeLists.txt - with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'r') as f: - cmake_data = f.read() - indent = ' ' * 6 - sdir = '${CMAKE_CURRENT_SOURCE_DIR}/solver_code/' - cmake_data = cmake_data.replace(sdir + 'include', - sdir + 'include\n' + - indent + sdir + 'external/SuiteSparse_config\n' + - indent + sdir + 'external/amd/include\n' + - indent + sdir + 'external/ldl/include') - with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'w') as f: - f.write(cmake_data) - - # remove library target from ECOS CMakeLists.txt - with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'r') as f: - lines = f.readlines() - with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'w') as f: - for line in lines: - if '# ECOS library' in line: - break - f.write(line) - - # adjust setup.py - with open(os.path.join(code_dir, 'setup.py'), 'r') as f: - setup_text = f.read() - indent = ' ' * 30 - setup_text = setup_text.replace("os.path.join('c', 'solver_code', 'include'),", - "os.path.join('c', 'solver_code', 'include'),\n" + - indent+"os.path.join('c', 'solver_code', 'external', 'SuiteSparse_config'),\n" + - indent+"os.path.join('c', 'solver_code', 'external', 'amd', 'include'),\n" + - indent+"os.path.join('c', 'solver_code', 'external', 'ldl', 'include'),") - setup_text = setup_text.replace("license='Apache 2.0'", "license='GPL 3.0'") - with open(os.path.join(code_dir, 'setup.py'), 'w') as f: - f.write(setup_text) - - else: - raise ValueError("Problem class cannot be addressed by the OSQP, SCS, or ECOS solver!") - - user_p_name_to_canon_outdated = {user_p_name: [canon_p_ids[j] for j in np.nonzero(adjacency[:, i])[0]] - for i, user_p_name in enumerate(user_p_names)} - settings_names_to_type = {name: typ for name, typ in zip(settings_names, settings_types)} - settings_names_to_default = {name: typ for name, typ in zip(settings_names, settings_defaults)} - - ret_prim_func_exists = any(var_sym) or any([s == 1 for s in var_sizes]) or solver_name == 'ECOS' - ret_dual_func_exists = any([s == 1 for s in d_sizes]) or solver_name == 'ECOS' + mapping_rows_eq = np.nonzero(solver_interface.indices_constr < solver_interface.n_eq)[0] + mapping_rows_ineq = np.nonzero(solver_interface.indices_constr >= solver_interface.n_eq)[0] - # summarize information on options, codegen, user parameters / variables, canonicalization in dictionaries + return ConstraintInfo(n_data_constr, n_data_constr_mat, mapping_rows_eq, mapping_rows_ineq) - info_opt = {C.CODE_DIR: code_dir, - C.SOLVER_NAME: solver_name, - C.UNROLL: unroll, - C.PREFIX: prefix} - info_cg = {C.RET_PRIM_FUNC_EXISTS: ret_prim_func_exists, - C.RET_DUAL_FUNC_EXISTS: ret_dual_func_exists, - C.NONZERO_D: nonzero_d, - C.IS_MAXIMIZATION: type(problem.objective) == Maximize} +def update_adjacency_matrix(adjacency, i, parameter_info, mapping) -> np.ndarray: + # compute adjacency matrix + for j in range(parameter_info.num): + column_slice = slice(parameter_info.id_to_col[parameter_info.ids[j]], + parameter_info.id_to_col[parameter_info.ids[j + 1]]) + if mapping[:, column_slice].nnz > 0: + adjacency[i, j] = True + return adjacency - info_usr = {C.P_WRITABLE: user_p_writable, - C.P_FLAT_USP: user_p_flat_usp, - C.P_COL_TO_NAME_USP: user_p_col_to_name_usp, - C.P_NAME_TO_SHAPE: user_p_name_to_shape, - C.P_NAME_TO_SIZE: user_p_name_to_size_usp, - C.P_NAME_TO_CANON_OUTDATED: user_p_name_to_canon_outdated, - C.P_NAME_TO_SPARSITY: user_p_name_to_sparsity, - C.P_NAME_TO_SPARSITY_TYPE: user_p_name_to_sparsity_type, - C.V_NAME_TO_INDICES: var_name_to_indices, - C.V_NAME_TO_SIZE: var_name_to_size, - C.V_NAME_TO_SHAPE: var_name_to_shape, - C.V_NAME_TO_INIT: var_name_to_init, - C.V_NAME_TO_SYM: var_name_to_sym, - C.V_NAME_TO_OFFSET: var_name_to_offset, - C.D_NAME_TO_INIT: d_name_to_init, - C.D_NAME_TO_VEC: d_name_to_vec, - C.D_NAME_TO_OFFSET: d_name_to_offset, - C.D_NAME_TO_SIZE: d_name_to_size, - C.D_NAME_TO_SHAPE: d_name_to_shape, - C.D_NAME_TO_INDICES: d_name_to_indices} - - info_can = {C.P: canon_p, - C.P_ID_TO_SIZE: canon_p_id_to_size, - C.P_ID_TO_CHANGES: canon_p_id_to_changes, - C.P_ID_TO_MAPPING: canon_p_id_to_mapping, - C.CONSTANTS: canon_constants, - C.SETTINGS_NAMES_TO_TYPE: settings_names_to_type, - C.SETTINGS_NAMES_TO_DEFAULT: settings_names_to_default, - C.SETTINGS_LIST: settings_names} +def write_c_code(code_dir: str, info_opt: dict, info_cg: dict, info_can: dict, info_usr: dict, + problem: cp.Problem) -> None: # 'workspace' prototypes with open(os.path.join(code_dir, 'c', 'include', 'cpg_workspace.h'), 'w') as f: utils.write_workspace_prot(f, info_opt, info_usr, info_can) - # 'workspace' definitions with open(os.path.join(code_dir, 'c', 'src', 'cpg_workspace.c'), 'w') as f: utils.write_workspace_def(f, info_opt, info_usr, info_can) - # 'solve' prototypes with open(os.path.join(code_dir, 'c', 'include', 'cpg_solve.h'), 'w') as f: utils.write_solve_prot(f, info_opt, info_cg, info_usr, info_can) - # 'solve' definitions with open(os.path.join(code_dir, 'c', 'src', 'cpg_solve.c'), 'w') as f: utils.write_solve_def(f, info_opt, info_cg, info_usr, info_can) - # 'example' definitions with open(os.path.join(code_dir, 'c', 'src', 'cpg_example.c'), 'w') as f: utils.write_example_def(f, info_opt, info_usr) - # adapt top-level CMakeLists.txt with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'r') as f: cmake_data = f.read() cmake_data = utils.replace_cmake_data(cmake_data, info_opt) with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'w') as f: f.write(cmake_data) - # adapt solver CMakeLists.txt with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'a') as f: utils.write_canon_cmake(f, info_opt) - # binding module prototypes with open(os.path.join(code_dir, 'cpp', 'include', 'cpg_module.hpp'), 'w') as f: utils.write_module_prot(f, info_opt, info_usr) - # binding module definition with open(os.path.join(code_dir, 'cpp', 'src', 'cpg_module.cpp'), 'w') as f: utils.write_module_def(f, info_opt, info_usr, info_can) - # adapt setup.py with open(os.path.join(code_dir, 'setup.py'), 'r') as f: setup_data = f.read() setup_data = utils.replace_setup_data(setup_data) with open(os.path.join(code_dir, 'setup.py'), 'w') as f: f.write(setup_data) - # custom CVXPY solve method with open(os.path.join(code_dir, 'cpg_solver.py'), 'w') as f: utils.write_method(f, info_opt, info_usr) - # serialize problem formulation with open(os.path.join(code_dir, 'problem.pickle'), 'wb') as f: pickle.dump(cp.Problem(problem.objective, problem.constraints), f) - # html documentation file with open(os.path.join(code_dir, 'README.html'), 'r') as f: html_data = f.read() @@ -703,13 +395,136 @@ def user_p_value(user_p_id): with open(os.path.join(code_dir, 'README.html'), 'w') as f: f.write(html_data) - sys.stdout.write('CVXPYgen finished generating code.\n') - # compile python module - if wrapper: - sys.stdout.write('Compiling python wrapper with CVXPYgen ... \n') - p_dir = os.getcwd() - os.chdir(code_dir) - call([sys.executable, 'setup.py', '--quiet', 'build_ext', '--inplace']) - os.chdir(p_dir) - sys.stdout.write("CVXPYgen finished compiling python wrapper.\n") +def adjust_problem_name(prefix): + if prefix != '': + if not prefix[0].isalpha(): + prefix = '_' + prefix + prefix = prefix + '_' + return prefix + + +def get_parameter_info(p_prob) -> ParameterInfo: + user_p_num = len(p_prob.parameters) + user_p_names = [par.name() for par in p_prob.parameters] + user_p_ids = list(p_prob.param_id_to_col.keys()) + user_p_id_to_col = p_prob.param_id_to_col + user_p_id_to_size = p_prob.param_id_to_size + user_p_id_to_param = p_prob.id_to_param + user_p_total_size = p_prob.total_param_size + user_p_name_to_shape = {user_p_id_to_param[p_id].name(): user_p_id_to_param[p_id].shape + for p_id in user_p_id_to_size.keys()} + user_p_name_to_size_usp = {user_p_id_to_param[p_id].name(): size for p_id, size in + user_p_id_to_size.items()} + user_p_name_to_sparsity = {} + user_p_name_to_sparsity_type = {} + user_p_sparsity_mask = np.ones(user_p_total_size + 1, dtype=bool) + for p in p_prob.parameters: + if p.attributes['sparsity'] is not None: + user_p_name_to_size_usp[p.name()] = len(p.attributes['sparsity']) + user_p_name_to_sparsity[p.name()] = np.sort([coord[0] + p.shape[0] * coord[1] + for coord in p.attributes['sparsity']]) + if p.attributes['diag']: + user_p_name_to_sparsity_type[p.name()] = 'diag' + else: + user_p_name_to_sparsity_type[p.name()] = 'general' + user_p_sparsity_mask[ + user_p_id_to_col[p.id]:user_p_id_to_col[p.id] + user_p_id_to_size[p.id]] = False + user_p_sparsity_mask[user_p_id_to_col[p.id] + user_p_name_to_sparsity[p.name()]] = True + user_p_col_to_name_usp = {} + cum_sum = 0 + for name, size in user_p_name_to_size_usp.items(): + user_p_col_to_name_usp[cum_sum] = name + cum_sum += size + user_p_writable = dict() + for p_name, p in zip(user_p_names, p_prob.parameters): + if p.value is None: + p.project_and_assign(np.random.randn(*p.shape)) + if type(p.value) is sparse.dia_matrix: + p.value = p.value.toarray() + if len(p.shape) < 2: + # dealing with scalar or vector + user_p_writable[p_name] = p.value + else: + # dealing with matrix + if p_name in user_p_name_to_sparsity.keys(): + dense_value = p.value.flatten(order='F') + sparse_value = np.zeros(len(user_p_name_to_sparsity[p_name])) + for i, idx in enumerate(user_p_name_to_sparsity[p_name]): + sparse_value[i] = dense_value[idx] + user_p_writable[p_name] = sparse_value + else: + user_p_writable[p_name] = p.value.flatten(order='F') + + def user_p_value(user_p_id): + """ + Returns the value of the parameter with the given ID. + """ + return np.array(user_p_id_to_param[user_p_id].value) + + user_p_flat = cI.get_parameter_vector(user_p_total_size, user_p_id_to_col, user_p_id_to_size, + user_p_value) + user_p_flat_usp = user_p_flat[user_p_sparsity_mask] + parameter_info = ParameterInfo(user_p_col_to_name_usp, user_p_flat_usp, user_p_id_to_col, + user_p_ids, user_p_name_to_shape, user_p_name_to_size_usp, + user_p_name_to_sparsity, user_p_name_to_sparsity_type, + user_p_names, user_p_num, user_p_sparsity_mask, user_p_writable) + return parameter_info + + +def handle_sparsity(p_prob: cp.Problem) -> None: + for p in p_prob.parameters: + if p.attributes['sparsity'] is not None: + if p.size == 1: + warnings.warn('Ignoring sparsity pattern for scalar parameter %s!' % p.name()) + p.attributes['sparsity'] = None + elif max(p.shape) == p.size: + warnings.warn('Ignoring sparsity pattern for vector parameter %s!' % p.name()) + p.attributes['sparsity'] = None + else: + for coord in p.attributes['sparsity']: + if coord[0] < 0 or coord[1] < 0 or coord[0] >= p.shape[0] or coord[1] >= \ + p.shape[1]: + warnings.warn('Invalid sparsity pattern for parameter %s - out of range! ' + 'Ignoring sparsity pattern.' % p.name()) + p.attributes['sparsity'] = None + break + p.attributes['sparsity'] = list(set(p.attributes['sparsity'])) + elif p.attributes['diag']: + p.attributes['sparsity'] = [(i, i) for i in range(p.shape[0])] + if p.attributes['sparsity'] is not None and p.value is not None: + for i in range(p.shape[0]): + for j in range(p.shape[1]): + if (i, j) not in p.attributes['sparsity'] and p.value[i, j] != 0: + warnings.warn( + 'Ignoring nonzero value outside of sparsity pattern for parameter %s!' % p.name()) + p.value[i, j] = 0 + + +def compile_python_module(code_dir: str): + sys.stdout.write('Compiling python wrapper with CVXPYgen ... \n') + p_dir = os.getcwd() + os.chdir(code_dir) + call([sys.executable, 'setup.py', '--quiet', 'build_ext', '--inplace']) + os.chdir(p_dir) + sys.stdout.write("CVXPYgen finished compiling python wrapper.\n") + + +def create_folder_structure(code_dir: str): + cvxpygen_directory = os.path.dirname(os.path.realpath(__file__)) + + # create code directory and copy template files + if os.path.isdir(code_dir): + shutil.rmtree(code_dir) + os.mkdir(code_dir) + os.mkdir(os.path.join(code_dir, 'c')) + for d in ['src', 'include', 'build']: + os.mkdir(os.path.join(code_dir, 'c', d)) + os.mkdir(os.path.join(code_dir, 'cpp')) + for d in ['src', 'include']: + os.mkdir(os.path.join(code_dir, 'cpp', d)) + shutil.copy(os.path.join(cvxpygen_directory, 'template', 'CMakeLists.txt'), + os.path.join(code_dir, 'c')) + for file in ['setup.py', 'README.html', '__init__.py']: + shutil.copy(os.path.join(cvxpygen_directory, 'template', file), code_dir) + return cvxpygen_directory diff --git a/cvxpygen/mappings.py b/cvxpygen/mappings.py new file mode 100644 index 0000000..91b6238 --- /dev/null +++ b/cvxpygen/mappings.py @@ -0,0 +1,68 @@ +from dataclasses import dataclass, field + +import scipy.sparse as sp +import numpy as np + + +@dataclass +class AffineMap: + mapping_rows: list = field(default_factory=list) + mapping: list = field(default_factory=list) + indices: list = field(default_factory=list) + indptr: list = field(default_factory=list) + shape = () + + +@dataclass +class ParameterCanon: + p: dict = field(default_factory=dict) + p_csc: dict[str, sp.csc_matrix] = field(default_factory=dict) + p_id_to_mapping: dict[str, sp.csr_matrix] = field(default_factory=dict) + p_id_to_changes: dict[str, bool] = field(default_factory=dict) + p_id_to_size: dict[str, int] = field(default_factory=dict) + nonzero_d = True + + +@dataclass +class ParameterInfo: + col_to_name_usp: dict[int, str] + flat_usp: np.ndarray + id_to_col: dict[int, int] + ids: list[int] + name_to_shape: dict[str, tuple] + name_to_size_usp: dict[str, int] + name_to_sparsity: dict[str, np.ndarray] + name_to_sparsity_type: dict[str, str] + names: list[str] + num: int + sparsity_mask: np.ndarray + writable: dict[str, np.ndarray] + + +@dataclass +class VariableInfo: + name_to_offset: dict[str, int] + name_to_indices: dict[str, np.ndarray] + name_to_size: dict[str, int] + sizes: list[int] + name_to_shape: dict[str, tuple] + name_to_init: dict[str, np.ndarray] + + +@dataclass +class PrimalVariableInfo(VariableInfo): + name_to_sym: dict[str, bool] + sym: list[bool] + + +@dataclass +class DualVariableInfo(VariableInfo): + name_to_vec: dict[str, str] + + +@dataclass +class ConstraintInfo: + n_data_constr: int + n_data_constr_mat: int + mapping_rows_eq: np.ndarray + mapping_rows_ineq: np.ndarray diff --git a/cvxpygen/solvers.py b/cvxpygen/solvers.py new file mode 100644 index 0000000..8fe3236 --- /dev/null +++ b/cvxpygen/solvers.py @@ -0,0 +1,416 @@ +import os +import shutil +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from platform import system + +import numpy as np + +from cvxpygen.mappings import PrimalVariableInfo, DualVariableInfo, ConstraintInfo, AffineMap, \ + ParameterCanon + + +def get_interface_class(solver_name: str) -> "SolverInterface": + mapping = { + 'OSQP': OSQPInterface, + 'SCS': SCSInterface, + 'ECOS': ECOSInterface, + } + interface = mapping.get(solver_name, None) + if interface is None: + raise ValueError(f'Unsupported solver: {solver_name}.') + return interface + + +class SolverInterface(ABC): + + def __init__(self, n_var, n_eq, n_ineq, indices_obj, indptr_obj, shape_obj, indices_constr, + indptr_constr, shape_constr, canon_constants): + self.n_var = n_var + self.n_eq = n_eq + self.n_ineq = n_ineq + self.indices_obj = indices_obj + self.indptr_obj = indptr_obj + self.shape_obj = shape_obj + self.indices_constr = indices_constr + self.indptr_constr = indptr_constr + self.shape_constr = shape_constr + self.canon_constants = canon_constants + + @property + @abstractmethod + def canon_p_ids(self): + pass + + @property + @abstractmethod + def canon_p_ids_constr_vec(self): + pass + + @property + @abstractmethod + def sign_constr_vec(self): + pass + + @property + @abstractmethod + def settings_names(self): + pass + + @property + @abstractmethod + def settings_types(self): + pass + + @property + @abstractmethod + def settings_defaults(self): + pass + + @staticmethod + def ret_prim_func_exists(variable_info: PrimalVariableInfo) -> bool: + return any(variable_info.sym) or any([s == 1 for s in variable_info.sizes]) + + @staticmethod + def ret_dual_func_exists(dual_variable_info: DualVariableInfo) -> bool: + return any([s == 1 for s in dual_variable_info.sizes]) + + def get_affine_map(self, p_id, param_prob, constraint_info: ConstraintInfo) -> AffineMap: + affine_map = AffineMap() + + if p_id == 'c': + affine_map.mapping = param_prob.c[:-1] + elif p_id == 'd': + affine_map.mapping = param_prob.c[-1] + elif p_id == 'A': + affine_map.mapping_rows = constraint_info.mapping_rows_eq[ + constraint_info.mapping_rows_eq < constraint_info.n_data_constr_mat] + affine_map.shape = (self.n_eq, self.n_var) + elif p_id == 'G': + affine_map.mapping_rows = constraint_info.mapping_rows_ineq[ + constraint_info.mapping_rows_ineq < constraint_info.n_data_constr_mat] + affine_map.shape = (self.n_ineq, self.n_var) + elif p_id == 'b': + affine_map.mapping_rows = constraint_info.mapping_rows_eq[ + constraint_info.mapping_rows_eq >= constraint_info.n_data_constr_mat] + affine_map.shape = (self.n_eq, 1) + elif p_id == 'h': + affine_map.mapping_rows = constraint_info.mapping_rows_ineq[ + constraint_info.mapping_rows_ineq >= constraint_info.n_data_constr_mat] + affine_map.shape = (self.n_ineq, 1) + else: + raise ValueError(f'Unknown parameter name: {p_id}.') + + if p_id in ['A', 'b']: + affine_map.indices = self.indices_constr[affine_map.mapping_rows] + elif p_id in ['G', 'h']: + affine_map.indices = self.indices_constr[affine_map.mapping_rows] - self.n_eq + + if p_id.isupper(): + affine_map.mapping = -param_prob.reduced_A.reduced_mat[affine_map.mapping_rows] + + return affine_map + + @property + def settings_names_to_type(self): + return {name: typ for name, typ in zip(self.settings_names, self.settings_types)} + + @property + def settings_names_to_default(self): + return {name: typ for name, typ in zip(self.settings_names, self.settings_defaults)} + + @staticmethod + def check_unsupported_cones(cone_dims: "ConeDims") -> None: + pass + + @abstractmethod + def generate_code(self, code_dir, solver_code_dir, cvxpygen_directory, + parameter_canon: ParameterCanon) -> None: + pass + + +class OSQPInterface(SolverInterface): + canon_p_ids = ['P', 'q', 'd', 'A', 'l', 'u'] + canon_p_ids_constr_vec = ['l', 'u'] + sign_constr_vec = -1 + + # solver settings + settings_names = ['rho', 'max_iter', 'eps_abs', 'eps_rel', 'eps_prim_inf', 'eps_dual_inf', + 'alpha', + 'scaled_termination', 'check_termination', 'warm_start'] + settings_types = ['c_float', 'c_int', 'c_float', 'c_float', 'c_float', 'c_float', 'c_float', + 'c_int', 'c_int', + 'c_int'] + settings_defaults = [] + + def __init__(self, data, p_prob): + n_var = data['n_var'] + n_eq = data['n_eq'] + n_ineq = data['n_ineq'] + + indices_obj, indptr_obj, shape_obj = p_prob.reduced_P.problem_data_index + indices_constr, indptr_constr, shape_constr = p_prob.reduced_A.problem_data_index + + canon_constants = {} + + super().__init__(n_var, n_eq, n_ineq, indices_obj, indptr_obj, shape_obj, + indices_constr, + indptr_constr, shape_constr, canon_constants) + + def generate_code(self, code_dir, solver_code_dir, cvxpygen_directory, + parameter_canon: ParameterCanon) -> None: + import osqp + + # OSQP codegen + osqp_obj = osqp.OSQP() + osqp_obj.setup(P=parameter_canon.p_csc['P'], q=parameter_canon.p['q'], + A=parameter_canon.p_csc['A'], l=parameter_canon.p['l'], + u=parameter_canon.p['u']) + if system() == 'Windows': + cmake_generator = 'MinGW Makefiles' + elif system() == 'Linux' or system() == 'Darwin': + cmake_generator = 'Unix Makefiles' + else: + raise ValueError(f'Unsupported OS {system()}.') + + osqp_obj.codegen(os.path.join(code_dir, 'c', 'solver_code'), project_type=cmake_generator, + parameters='matrices', force_rewrite=True) + + # copy license files + shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'osqp-python', 'LICENSE'), + os.path.join(solver_code_dir, 'LICENSE')) + shutil.copy(os.path.join(cvxpygen_directory, 'template', 'LICENSE'), code_dir) + + def get_affine_map(self, p_id, param_prob, constraint_info: ConstraintInfo) -> AffineMap: + affine_map = AffineMap() + + if p_id == 'P': + affine_map.mapping = param_prob.reduced_P.reduced_mat + affine_map.indices = self.indices_obj + affine_map.shape = (self.n_var, self.n_var) + elif p_id == 'q': + affine_map.mapping = param_prob.q[:-1] + elif p_id == 'd': + affine_map.mapping = param_prob.q[-1] + elif p_id == 'A': + affine_map.mapping = param_prob.reduced_A.reduced_mat[ + :constraint_info.n_data_constr_mat] + affine_map.indices = self.indices_constr[:constraint_info.n_data_constr_mat] + affine_map.shape = (self.n_eq + self.n_ineq, self.n_var) + elif p_id == 'l': + mapping_rows_eq = np.nonzero(self.indices_constr < self.n_eq)[0] + affine_map.mapping_rows = mapping_rows_eq[ + mapping_rows_eq >= constraint_info.n_data_constr_mat] # mapping to the finite part of l + affine_map.indices = self.indices_constr[affine_map.mapping_rows] + affine_map.shape = (self.n_eq, 1) + elif p_id == 'u': + affine_map.mapping_rows = np.arange(constraint_info.n_data_constr_mat, + constraint_info.n_data_constr) + affine_map.indices = self.indices_constr[affine_map.mapping_rows] + affine_map.shape = (self.n_eq + self.n_ineq, 1) + else: + raise ValueError(f'Unknown OSQP parameter name {p_id}.') + + return affine_map + + +class SCSInterface(SolverInterface): + canon_p_ids = ['c', 'd', 'A', 'b'] + canon_p_ids_constr_vec = ['b'] + sign_constr_vec = 1 + + # solver settings + settings_names = ['normalize', 'scale', 'adaptive_scale', 'rho_x', 'max_iters', 'eps_abs', + 'eps_rel', + 'eps_infeas', 'alpha', 'time_limit_secs', 'verbose', 'warm_start', + 'acceleration_lookback', + 'acceleration_interval', 'write_data_filename', 'log_csv_filename'] + settings_types = ['c_int', 'c_float', 'c_int', 'c_float', 'c_int', 'c_float', 'c_float', + 'c_float', 'c_float', + 'c_float', 'c_int', 'c_int', 'c_int', 'c_int', 'const char*', 'const char*'] + settings_defaults = ['1', '0.1', '1', '1e-6', '1e5', '1e-4', '1e-4', '1e-7', '1.5', '0', '0', + '0', '0', '1', + 'SCS_NULL', 'SCS_NULL'] + + def __init__(self, data, p_prob): + n_var = p_prob.x.size + n_eq = data['A'].shape[0] + n_ineq = 0 + + indices_obj, indptr_obj, shape_obj = None, None, None + + indices_constr, indptr_constr, shape_constr = p_prob.reduced_A.problem_data_index + + canon_constants = {'n': n_var, 'm': n_eq, 'z': p_prob.cone_dims.zero, + 'l': p_prob.cone_dims.nonneg, + 'q': np.array(p_prob.cone_dims.soc), + 'qsize': len(p_prob.cone_dims.soc)} + + super().__init__(n_var, n_eq, n_ineq, indices_obj, indptr_obj, shape_obj, + indices_constr, + indptr_constr, shape_constr, canon_constants) + + @staticmethod + def check_unsupported_cones(cone_dims: "ConeDims") -> None: + if cone_dims.exp > 0 or len(cone_dims.psd) > 0 or len(cone_dims.p3d) > 0: + raise ValueError( + 'Code generation with SCS and exponential, positive semidefinite, or power cones ' + 'is not supported yet.') + + def generate_code(self, code_dir, solver_code_dir, cvxpygen_directory, + parameter_canon: ParameterCanon) -> None: + + # copy sources + if os.path.isdir(solver_code_dir): + shutil.rmtree(solver_code_dir) + os.mkdir(solver_code_dir) + dirs_to_copy = ['src', 'include', 'linsys', 'cmake'] + for dtc in dirs_to_copy: + shutil.copytree(os.path.join(cvxpygen_directory, 'solvers', 'scs', dtc), + os.path.join(solver_code_dir, dtc)) + files_to_copy = ['scs.mk', 'CMakeLists.txt', 'LICENSE.txt'] + for fl in files_to_copy: + shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'scs', fl), + os.path.join(solver_code_dir, fl)) + shutil.copy(os.path.join(cvxpygen_directory, 'template', 'LICENSE'), code_dir) + + # disable BLAS and LAPACK + with open(os.path.join(code_dir, 'c', 'solver_code', 'scs.mk'), 'r') as f: + scs_mk_data = f.read() + scs_mk_data = scs_mk_data.replace('USE_LAPACK = 1', 'USE_LAPACK = 0') + with open(os.path.join(code_dir, 'c', 'solver_code', 'scs.mk'), 'w') as f: + f.write(scs_mk_data) + + # modify CMakeLists.txt + with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'r') as f: + cmake_data = f.read() + cmake_data = cmake_data.replace(' include/', ' ${CMAKE_CURRENT_SOURCE_DIR}/include/') + cmake_data = cmake_data.replace(' src/', ' ${CMAKE_CURRENT_SOURCE_DIR}/src/') + cmake_data = cmake_data.replace(' ${LINSYS}/', ' ${CMAKE_CURRENT_SOURCE_DIR}/${LINSYS}/') + with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'w') as f: + f.write(cmake_data) + + # adjust top-level CMakeLists.txt + with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'r') as f: + cmake_data = f.read() + indent = ' ' * 6 + sdir = '${CMAKE_CURRENT_SOURCE_DIR}/solver_code/' + cmake_data = cmake_data.replace(sdir + 'include', + sdir + 'include\n' + + indent + sdir + 'linsys') + with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'w') as f: + f.write(cmake_data) + + # adjust setup.py + with open(os.path.join(code_dir, 'setup.py'), 'r') as f: + setup_text = f.read() + indent = ' ' * 30 + setup_text = setup_text.replace("os.path.join('c', 'solver_code', 'include'),", + "os.path.join('c', 'solver_code', 'include'),\n" + + indent + "os.path.join('c', 'solver_code', 'linsys'),") + with open(os.path.join(code_dir, 'setup.py'), 'w') as f: + f.write(setup_text) + + +class ECOSInterface(SolverInterface): + canon_p_ids = ['c', 'd', 'A', 'b', 'G', 'h'] + canon_p_ids_constr_vec = ['b', 'h'] + sign_constr_vec = 1 + + # solver settings + settings_names = ['feastol', 'abstol', 'reltol', 'feastol_inacc', 'abstol_inacc', + 'reltol_inacc', 'maxit'] + settings_types = ['c_float', 'c_float', 'c_float', 'c_float', 'c_float', 'c_float', 'c_int'] + settings_defaults = ['1e-8', '1e-8', '1e-8', '1e-4', '5e-5', '5e-5', '100'] + + def __init__(self, data, p_prob): + n_var = p_prob.x.size + n_eq = p_prob.cone_dims.zero + n_ineq = data['G'].shape[0] + + indices_obj, indptr_obj, shape_obj = None, None, None + + indices_constr, indptr_constr, shape_constr = p_prob.reduced_A.problem_data_index + + canon_constants = {'n': n_var, 'm': n_ineq, 'p': n_eq, + 'l': p_prob.cone_dims.nonneg, + 'n_cones': len(p_prob.cone_dims.soc), + 'q': np.array(p_prob.cone_dims.soc), + 'e': p_prob.cone_dims.exp} + + super().__init__(n_var, n_eq, n_ineq, indices_obj, indptr_obj, shape_obj, + indices_constr, indptr_constr, shape_constr, canon_constants) + + @staticmethod + def check_unsupported_cones(cone_dims: "ConeDims") -> None: + if cone_dims.exp > 0: + raise ValueError( + 'Code generation with ECOS and exponential cones is not supported yet.') + + @staticmethod + def ret_prim_func_exists(variable_info: PrimalVariableInfo) -> bool: + return True + + @staticmethod + def ret_dual_func_exists(dual_variable_info: DualVariableInfo) -> bool: + return True + + def generate_code(self, code_dir, solver_code_dir, cvxpygen_directory, + parameter_canon: ParameterCanon) -> None: + + # copy sources + if os.path.isdir(solver_code_dir): + shutil.rmtree(solver_code_dir) + os.mkdir(solver_code_dir) + dirs_to_copy = ['src', 'include', 'external', 'ecos_bb'] + for dtc in dirs_to_copy: + shutil.copytree(os.path.join(cvxpygen_directory, 'solvers', 'ecos', dtc), + os.path.join(solver_code_dir, dtc)) + shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'ecos', 'CMakeLists.txt'), + os.path.join(solver_code_dir, 'CMakeLists.txt')) + shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'ecos', 'COPYING'), + os.path.join(solver_code_dir, 'COPYING')) + shutil.copyfile(os.path.join(cvxpygen_directory, 'solvers', 'ecos', 'COPYING'), + os.path.join(code_dir, 'COPYING')) + + # adjust print level + with open(os.path.join(code_dir, 'c', 'solver_code', 'include', 'glblopts.h'), 'r') as f: + glbl_opts_data = f.read() + glbl_opts_data = glbl_opts_data.replace('#define PRINTLEVEL (2)', '#define PRINTLEVEL (0)') + with open(os.path.join(code_dir, 'c', 'solver_code', 'include', 'glblopts.h'), 'w') as f: + f.write(glbl_opts_data) + + # adjust top-level CMakeLists.txt + with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'r') as f: + cmake_data = f.read() + indent = ' ' * 6 + sdir = '${CMAKE_CURRENT_SOURCE_DIR}/solver_code/' + cmake_data = cmake_data.replace(sdir + 'include', + sdir + 'include\n' + + indent + sdir + 'external/SuiteSparse_config\n' + + indent + sdir + 'external/amd/include\n' + + indent + sdir + 'external/ldl/include') + with open(os.path.join(code_dir, 'c', 'CMakeLists.txt'), 'w') as f: + f.write(cmake_data) + + # remove library target from ECOS CMakeLists.txt + with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'r') as f: + lines = f.readlines() + with open(os.path.join(code_dir, 'c', 'solver_code', 'CMakeLists.txt'), 'w') as f: + for line in lines: + if '# ECOS library' in line: + break + f.write(line) + + # adjust setup.py + with open(os.path.join(code_dir, 'setup.py'), 'r') as f: + setup_text = f.read() + indent = ' ' * 30 + setup_text = setup_text.replace("os.path.join('c', 'solver_code', 'include'),", + "os.path.join('c', 'solver_code', 'include'),\n" + + indent + "os.path.join('c', 'solver_code', 'external', 'SuiteSparse_config'),\n" + + indent + "os.path.join('c', 'solver_code', 'external', 'amd', 'include'),\n" + + indent + "os.path.join('c', 'solver_code', 'external', 'ldl', 'include'),") + setup_text = setup_text.replace("license='Apache 2.0'", "license='GPL 3.0'") + with open(os.path.join(code_dir, 'setup.py'), 'w') as f: + f.write(setup_text)