mpi - OpenMDAO1+ : variable trees in parallel

标签 mpi openmdao

我有一大组模型参数控制着几个不同的组件。该模型正在并行运行。模型参数在运行期间保持不变。问题是我必须在并行运行时为每个模型参数添加一个 IndepVarComp(),即使我想按对象传递它们。我需要能够在运行模型之前(在设置和运行之间)编辑我的运行脚本中的值。有这样做的好方法吗?我认识到由于在没有参数“源”的情况下在 MPI 下运行而导致的数据传递问题。

如果我为每个模型参数添加一个 IndepVarComp(),只要我不传递对象,它就会起作用。这是有道理的,如果我告诉 OpenMDAO 我希望能够更改值并跟踪模型如何更改,那么通过对象传递是矛盾的。但是,我需要能够在设置后传递参数值,如果不为每个模型参数创建一个 IndepVarComp(),我就无法在 MPI 下执行此操作。

我附上了一个基于 OpenMDAO 文档中的 Sellar 问题的示例,说明我想做什么。通过取消注释第 28 行、注释掉第 27 行和取消注释 src.py 中的第 139 行,该示例可以并行运行。

运行 $ mpirun -np 4 python call.py

调用.py
from __future__ import print_function

from openmdao.api import Problem, ScipyOptimizer

from src import SellarDerivativesSuperGroup

import numpy as np

if __name__ == "__main__":

    ######################### for MPI functionality #########################
    from openmdao.core.mpi_wrap import MPI

    # if MPI: # pragma: no cover
    #     if you called this script with 'mpirun', then use the petsc data passing

    if MPI:
        from openmdao.core.petsc_impl import PetscImpl as impl
    else:
        from openmdao.api import BasicImpl as impl
    # else:
    #     if you didn't use 'mpirun', then use the numpy data passing
        # from openmdao.api import BasicImpl as impl

    def mpi_print(prob, *args):
        """ helper function to only print on rank 0 """
        if prob.root.comm.rank == 0:
            print(*args)

    ##################
    nProblems = 4
    datasize = 10
    top = Problem(impl=impl)
    top.root = SellarDerivativesSuperGroup(nProblems=nProblems, datasize=datasize)

    top.driver = ScipyOptimizer()
    top.driver.options['optimizer'] = 'SLSQP'
    top.driver.options['tol'] = 1.0e-8



    top.driver.add_desvar('z', lower=np.array([-10.0, 0.0]),
                         upper=np.array([10.0, 10.0]))
    top.driver.add_desvar('x', lower=0.0, upper=10.0)

    top.driver.add_objective('obj')
    top.driver.add_constraint('con1', upper=0.0)
    top.driver.add_constraint('con2', upper=0.0)

    top.setup(check=True)

    # Setting initial values for design variables
    top['x'] = 1.0
    top['z'] = np.array([5.0, 2.0])
    top['varTree:leaf1'] = np.ones(datasize)

    top.run()

    if top.root.comm.rank == 0:
        print("\n")
        print("Minimum found at (%f, %f, %f)" % (top['z'][0],
                                                 top['z'][1],
                                                 top['x']))
        print("Coupling vars: %f, %f" % (top['y1_0'], top['y2_0']))
        print("Minimum objective: ", top['obj']/nProblems)

源文件

from __future__ import print_function

from openmdao.api import ExecComp, IndepVarComp, Group, NLGaussSeidel, \
                         Component, ParallelGroup, ScipyGMRES

import numpy as np


class SellarDis1(Component):
    """Component containing Discipline 1."""

    def __init__(self, problem_id=0, datasize=0):
        super(SellarDis1, self).__init__()

        self.problem_id = problem_id

        # Global Design Variable
        self.add_param('z', val=np.zeros(2))

        # Local Design Variable
        self.add_param('x', val=0.)

        # Coupling parameter
        self.add_param('y2_%i' % problem_id, val=1.0)

        # Dummy variable tree element
        self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True)
        # self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=False)

        # Coupling output
        self.add_output('y1_%i' % problem_id, val=1.0)

    def solve_nonlinear(self, params, unknowns, resids):
        """Evaluates the equation
        y1 = z1**2 + z2 + x1 - 0.2*y2"""

        problem_id = self.problem_id

        z1 = params['z'][0]
        z2 = params['z'][1]
        x1 = params['x']
        y2 = params['y2_%i' % problem_id]

        unknowns['y1_%i' % problem_id] = z1**2 + z2 + x1 - 0.2*y2

    def linearize(self, params, unknowns, resids):
        """ Jacobian for Sellar discipline 1."""

        problem_id = self.problem_id

        J = {}

        J['y1_%i' % problem_id, 'y2_%i' % problem_id] = -0.2
        J['y1_%i' % problem_id, 'z'] = np.array([[2*params['z'][0], 1.0]])
        J['y1_%i' % problem_id, 'x'] = 1.0

        return J


class SellarDis2(Component):
    """Component containing Discipline 2."""

    def __init__(self, problem_id=0):
        super(SellarDis2, self).__init__()

        self.problem_id = problem_id

        # Global Design Variable
        self.add_param('z', val=np.zeros(2))

        # Coupling parameter
        self.add_param('y1_%i' % problem_id, val=1.0)

        # Coupling output
        self.add_output('y2_%i' % problem_id, val=1.0)

    def solve_nonlinear(self, params, unknowns, resids):
        """Evaluates the equation
        y2 = y1**(.5) + z1 + z2"""

        problem_id = self.problem_id

        z1 = params['z'][0]
        z2 = params['z'][1]
        y1 = params['y1_%i' % problem_id]

        # Note: this may cause some issues. However, y1 is constrained to be
        # above 3.16, so lets just let it converge, and the optimizer will
        # throw it out
        y1 = abs(y1)

        unknowns['y2_%i' % problem_id] = y1**.5 + z1 + z2

    def linearize(self, params, unknowns, resids):
        """ Jacobian for Sellar discipline 2."""

        problem_id = self.problem_id

        J = {}

        J['y2_%i' % problem_id, 'y1_%i' % problem_id] = .5*params['y1_%i' % problem_id]**-.5
        J['y2_%i' % problem_id, 'z'] = np.array([[1.0, 1.0]])

        return J


class SellarDerivativesSubGroup(Group):

    def __init__(self, problem_id=0, datasize=0):
        super(SellarDerivativesSubGroup, self).__init__()

        self.add('d1', SellarDis1(problem_id=problem_id, datasize=datasize), promotes=['*'])
        self.add('d2', SellarDis2(problem_id=problem_id), promotes=['*'])

        self.nl_solver = NLGaussSeidel()
        self.nl_solver.options['atol'] = 1.0e-12

        self.ln_solver = ScipyGMRES()


class SellarDerivatives(Group):
    """ Group containing the Sellar MDA. This version uses the disciplines
    with derivatives."""

    def __init__(self, problem_id=0, datasize=0):
        super(SellarDerivatives, self).__init__()

        self.add('d', SellarDerivativesSubGroup(problem_id=problem_id, datasize=datasize), promotes=['*'])


class SellarDerivativesSuperGroup(Group):

    def __init__(self, nProblems=0, datasize=0):

        super(SellarDerivativesSuperGroup, self).__init__()

        self.add('px', IndepVarComp('x', 1.0), promotes=['*'])
        self.add('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['*'])
        # self.add('vt', IndepVarComp('varTree:leaf1', val=np.zeros(datasize)), promotes=['*'])

        pg = self.add('manySellars', ParallelGroup(), promotes=['*'])
        print(nProblems)
        for problem_id in np.arange(0, nProblems):
            pg.add('Sellar%i' % problem_id, SellarDerivatives(problem_id=problem_id, datasize=datasize), promotes=['*'])

        self.add('obj_cmp', ExecComp('obj = (x**2 + z[1] + y1_0 + exp(-y2_0)) + (x**2 + z[1] + y1_1 + exp(-y2_1)) + '
                                     '(x**2 + z[1] + y1_2 + exp(-y2_2)) + (x**2 + z[1] + y1_3 + exp(-y2_3))',
                                     z=np.array([0.0, 0.0]), x=0.0,
                                     y1_0=0.0, y2_0=0.0,
                                     y1_1=0.0, y2_1=0.0,
                                     y1_2=0.0, y2_2=0.0,
                                     y1_3=0.0, y2_3=0.0),
                 promotes=['*'])

        self.add('con_cmp1', ExecComp('con1 = 3.16 - y1_0'), promotes=['*'])
        self.add('con_cmp2', ExecComp('con2 = y2_0 - 24.0'), promotes=['*'])

最佳答案

如果这些参数永远不会用作优化设计变量,则您不必将它们声明为 OpenMDAO 变量。您可以在 init 方法中将这些东西声明为常规 python 属性,然后编写一个小方法在层次结构上循环并将属性值设置为您想要的任何值。

这可能比通过对象传递添加 IndepVarComps 更简单一些,尽管您自己提出的解决方案也可行。

关于mpi - OpenMDAO1+ : variable trees in parallel,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/35805007/

相关文章:

multi-level - 如何在 OpenMDAO 1.x 中使用嵌套问题?

openmdao - SimpleGA 或 DifferentialEvolution 驱动程序中的设计变量是如何初始化的?

openmdao - 如何将离散时间估计集成到 OpenMDAO 组件中?

c - MPI 分散/收集变量范围

python - 在Python中使用MPI(mpi4py)的分散来分割向量处理

c++ - boost 测试单元无法调用 mpi 函数

c++ - 在单个节点上,我是否应该选择 MPI 而不是其他进程间机制?

fortran - 在 slurm 中使用 srun 或 mpirun 时作业失败

python - 运行 openmdao 1.7.0 GUI