{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "29dae327", "metadata": {}, "outputs": [], "source": [ "import shutil\n", "import os\n", "import platform\n", "from pathlib import Path\n", "from cadet import Cadet\n", "\n", "import os\n", "\n", "from IPython.core.display import display, HTML, clear_output\n", "#display(HTML(\"\"))\n", "\n", "from IPython.display import Image\n", "\n", "# python numeric library\n", "import numpy as np\n", "\n", "# scientific library for python\n", "import scipy\n", "\n", "# addict is a library that makes it easier to create nested dictionaries\n", "from addict import Dict\n", "\n", "# json is a standard text based format and it used in CADETMatch for the configuration file\n", "import json\n", "\n", "# python plotting library\n", "import matplotlib.pyplot as plt\n", "%config InlineBackend.figure_format='svg'\n", "%matplotlib inline\n", "\n", "# jupyter widget support\n", "from ipywidgets import interact, interactive\n", "import ipywidgets as widgets\n", "\n", "# Temporary files for simulation objects\n", "import tempfile\n", "tempfile.tempdir = os.path.join(Path.home())\n", "\n", "import subprocess\n", "import pandas as pd" ] }, { "cell_type": "code", "execution_count": 2, "id": "45e9e8cb", "metadata": {}, "outputs": [], "source": [ "# # Convert lab result to accepted CSV file\n", "# lab_result = pd.read_excel('./u=2.7.xlsx')\n", "# c_ini = 4.762\n", "\n", "# time_min = lab_result['min']\n", "# c = lab_result['c']\n", "\n", "# # Extract targeted time index\n", "# index_list = np.concatenate(([0], np.arange(29, len(time_min)+1, 30)))\n", "\n", "# time_min_select = np.array(time_min[index_list])\n", "# c_select = np.array(c[index_list])\n", "\n", "# # Replace index 0 with 0 ICs\n", "# time_min_select[0] = 0\n", "# c_select[0] = 0\n", "# c_select = c_select.clip(min=0)\n", "# # Convert mins to seconds\n", "# time_sec = time_min_select * 60\n", "\n", "# # denormalize c\n", "# c_sec = c_select*c_ini\n", "\n", "# # Create csv file for Cadet Match\n", "# result_df = pd.DataFrame()\n", "# result_df['Time'] = time_sec\n", "# result_df['c'] = c_sec\n", "\n", "# result_df.to_csv('./u=2.7.csv', index=False, header=False)" ] }, { "cell_type": "code", "execution_count": 3, "id": "8a9feb60", "metadata": {}, "outputs": [], "source": [ "def get_cadet_template(n_units=3, split_components_data=False):\n", " cadet_template = Cadet()\n", " \n", " cadet_template.root.input.model.nunits = n_units\n", " \n", " # Store solution\n", " cadet_template.root.input['return'].split_components_data = split_components_data\n", " cadet_template.root.input['return'].split_ports_data = 0\n", " cadet_template.root.input['return'].unit_000.write_solution_inlet = 1\n", " cadet_template.root.input['return'].unit_000.write_solution_outlet = 1\n", " cadet_template.root.input['return'].unit_000.write_solution_bulk = 1\n", " cadet_template.root.input['return'].unit_000.write_solution_particle = 1\n", " cadet_template.root.input['return'].unit_000.write_solution_solid = 1\n", " cadet_template.root.input['return'].unit_000.write_solution_flux = 1\n", " cadet_template.root.input['return'].unit_000.write_solution_volume = 1\n", " cadet_template.root.input['return'].unit_000.write_coordinates = 1\n", " cadet_template.root.input['return'].unit_000.write_sens_outlet = 1\n", " \n", " for unit in range(n_units):\n", " cadet_template.root.input['return']['unit_{0:03d}'.format(unit)] = cadet_template.root.input['return'].unit_000\n", " \n", " # Tolerances for the time integrator\n", " cadet_template.root.input.solver.time_integrator.abstol = 1e-6\n", " cadet_template.root.input.solver.time_integrator.algtol = 1e-10\n", " cadet_template.root.input.solver.time_integrator.reltol = 1e-6\n", " cadet_template.root.input.solver.time_integrator.init_step_size = 1e-6\n", " cadet_template.root.input.solver.time_integrator.max_steps = 1000000\n", " \n", " # Solver settings\n", " cadet_template.root.input.model.solver.gs_type = 1\n", " cadet_template.root.input.model.solver.max_krylov = 0\n", " cadet_template.root.input.model.solver.max_restarts = 10\n", " cadet_template.root.input.model.solver.schur_safety = 1e-8\n", "\n", " # Run the simulation on single thread\n", " cadet_template.root.input.solver.nthreads = 1\n", " \n", " return cadet_template\n", "\n", "def set_discretization(model, n_bound=None, n_col=20, n_par_types=1):\n", " columns = {'GENERAL_RATE_MODEL', 'LUMPED_RATE_MODEL_WITH_PORES', 'LUMPED_RATE_MODEL_WITHOUT_PORES'}\n", " \n", " \n", " for unit_name, unit in model.root.input.model.items():\n", " if 'unit_' in unit_name and unit.unit_type in columns:\n", " unit.discretization.ncol = n_col\n", " unit.discretization.npar = 5\n", " unit.discretization.npartype = n_par_types\n", " \n", " if n_bound is None:\n", " n_bound = unit.ncomp*[0]\n", " unit.discretization.nbound = n_bound\n", " \n", " unit.discretization.par_disc_type = 'EQUIDISTANT_PAR'\n", " unit.discretization.use_analytic_jacobian = 1\n", " unit.discretization.reconstruction = 'WENO'\n", " unit.discretization.gs_type = 1\n", " unit.discretization.max_krylov = 0\n", " unit.discretization.max_restarts = 10\n", " unit.discretization.schur_safety = 1.0e-8\n", "\n", " unit.discretization.weno.boundary_model = 0\n", " unit.discretization.weno.weno_eps = 1e-10\n", " unit.discretization.weno.weno_order = 3\n", " \n", "def run_simulation(cadet, file_name=None):\n", " if file_name is None:\n", " f = next(tempfile._get_candidate_names())\n", " cadet.filename = os.path.join(tempfile.tempdir, f + '.h5')\n", " else:\n", " cadet.filename = file_name\n", " # save the simulation\n", " cadet.save()\n", "\n", " # run the simulation and load results\n", " data = cadet.run()\n", " cadet.load()\n", " \n", " # Remove files \n", " if file_name is None:\n", " os.remove(os.path.join(tempfile.tempdir, f + '.h5'))\n", "\n", " # Raise error if simulation fails\n", " if data.returncode == 0:\n", " print(\"Simulation completed successfully\")\n", " else:\n", " print(data)\n", " raise Exception(\"Simulation failed\")" ] }, { "cell_type": "code", "execution_count": 4, "id": "68bc856c", "metadata": {}, "outputs": [], "source": [ "def create_langmuir_model(L, u, e, Da, ka, kd, qmax, c0, tmax, col_step, time_step):\n", "\n", " langmuir_model = get_cadet_template(n_units=3)\n", "\n", " # INLET\n", " langmuir_model.root.input.model.unit_000.unit_type = 'INLET'\n", " langmuir_model.root.input.model.unit_000.ncomp = 1\n", " langmuir_model.root.input.model.unit_000.inlet_type = 'PIECEWISE_CUBIC_POLY'\n", "\n", " # Column\n", " langmuir_model.root.input.model.unit_001.unit_type = 'LUMPED_RATE_MODEL_WITHOUT_PORES'\n", " langmuir_model.root.input.model.unit_001.ncomp = 1\n", "\n", " langmuir_model.root.input.model.unit_001.col_length = L\n", " langmuir_model.root.input.model.unit_001.velocity = u / e\n", " langmuir_model.root.input.model.unit_001.total_porosity = e\n", " langmuir_model.root.input.model.unit_001.col_dispersion = Da\n", " \n", " langmuir_model.root.input.model.unit_001.adsorption_model = 'MULTI_COMPONENT_LANGMUIR'\n", "\n", " langmuir_model.root.input.model.unit_001.adsorption.is_kinetic = True\n", " langmuir_model.root.input.model.unit_001.adsorption.mcl_ka = [ka,]\n", " langmuir_model.root.input.model.unit_001.adsorption.mcl_kd = [kd,]\n", " langmuir_model.root.input.model.unit_001.adsorption.mcl_qmax = [qmax,]\n", "\n", " langmuir_model.root.input.model.unit_001.init_c = [0.0,]\n", " langmuir_model.root.input.model.unit_001.init_q = [0.0,]\n", "\n", " ## Outlet\n", " langmuir_model.root.input.model.unit_002.ncomp = 1\n", " langmuir_model.root.input.model.unit_002.unit_type = 'OUTLET'\n", " \n", " set_discretization(langmuir_model, n_bound=[1,], n_col=col_step)\n", "\n", " # Sections and connections\n", " langmuir_model.root.input.solver.sections.nsec = 1\n", " langmuir_model.root.input.solver.sections.section_times = [0.0, tmax]\n", " langmuir_model.root.input.solver.sections.section_continuity = [0,]\n", " \n", " ## Inlet Profile\n", " langmuir_model.root.input.model.unit_000.sec_000.const_coeff = [c0,]\n", " \n", " Q = langmuir_model.root.input.model.unit_001.velocity\n", " \n", " ## Switches\n", " langmuir_model.root.input.model.connections.nswitches = 1\n", " langmuir_model.root.input.model.connections.switch_000.section = 0\n", " langmuir_model.root.input.model.connections.switch_000.connections = [\n", " 0, 1, -1, -1, Q,\n", " 1, 2, -1, -1, Q\n", " ]\n", "\n", " # set the times that the simulator writes out data for\n", " langmuir_model.root.input.solver.user_solution_times = np.linspace(0, tmax, time_step) \n", "\n", " return langmuir_model" ] }, { "cell_type": "code", "execution_count": 5, "id": "5b6c00c9", "metadata": {}, "outputs": [], "source": [ "from addict import Dict\n", "\n", "base_dir = Path('./').absolute()\n", "\n", "match_config = Dict()\n", "match_config.CADETPath = Cadet.cadet_path\n", "match_config.baseDir = base_dir.as_posix()\n", "match_config.resultsDir = 'results'" ] }, { "cell_type": "code", "execution_count": 6, "id": "a2eaaffa", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
0.00.0.1
06.00.000000
112.00.000000
218.00.000000
324.00.000000
430.00.000000
.........
11456876.04.642390
11466882.04.642946
11476888.04.643960
11486894.04.644739
11496900.04.645557
\n", "

1150 rows × 2 columns

\n", "
" ], "text/plain": [ " 0.0 0.0.1\n", "0 6.0 0.000000\n", "1 12.0 0.000000\n", "2 18.0 0.000000\n", "3 24.0 0.000000\n", "4 30.0 0.000000\n", "... ... ...\n", "1145 6876.0 4.642390\n", "1146 6882.0 4.642946\n", "1147 6888.0 4.643960\n", "1148 6894.0 4.644739\n", "1149 6900.0 4.645557\n", "\n", "[1150 rows x 2 columns]" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Get tmax and time_step\n", "result_df = pd.read_csv('./u=2.7.csv')\n", "result_df" ] }, { "cell_type": "code", "execution_count": 7, "id": "ca48f059", "metadata": {}, "outputs": [], "source": [ "# Known parameters\n", "L = 0.18\n", "u = 2.7 / 3600\n", "c0 = 4.762\n", "\n", "# Unknown parameters --- Initial guess\n", "e = 0.5\n", "Da = 2e-5\n", "ka = 1e-3\n", "kd = 1e-3\n", "qmax = 100" ] }, { "cell_type": "code", "execution_count": 8, "id": "d3ffc5b8", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "20\n", "1150\n", "6900.0\n" ] } ], "source": [ "# get time_step\n", "time_step = len(result_df['0.0'])\n", "tmax = np.array(result_df['0.0'])[-1]\n", "col_step = 20\n", "\n", "print(col_step)\n", "print(time_step)\n", "print(tmax)" ] }, { "cell_type": "code", "execution_count": 9, "id": "511e4c99", "metadata": {}, "outputs": [], "source": [ "Cadet.cadet_path = 'C:/Users/yuan_yunhao/Miniconda3/envs/tf2/bin/cadet-cli.exe'\n", "\n", "from addict import Dict\n", "\n", "base_dir = Path('./').absolute()\n", "\n", "match_config = Dict()\n", "match_config.CADETPath = Cadet.cadet_path\n", "match_config.baseDir = base_dir.as_posix()\n", "match_config.resultsDir = 'results'" ] }, { "cell_type": "code", "execution_count": 10, "id": "96760787", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Simulation completed successfully\n" ] } ], "source": [ "langumir_model = create_langmuir_model(L, u, e, Da, ka, kd, qmax, c0, tmax, col_step, time_step)\n", "run_simulation(langumir_model, 'langmuir_reference.h5')" ] }, { "cell_type": "code", "execution_count": 11, "id": "2c5f2408", "metadata": {}, "outputs": [], "source": [ "# e\n", "parameter1 = Dict()\n", "parameter1.location = '/input/model/unit_001/COL_POROSITY'\n", "parameter1.min = 0.5\n", "parameter1.max = 0.9\n", "parameter1.component = -1\n", "parameter1.bound = -1\n", "parameter1.transform = 'null'\n", "\n", "# Da\n", "parameter2 = Dict()\n", "parameter2.location = '/input/model/unit_001/COL_DISPERSION'\n", "parameter2.min = 1e-9\n", "parameter2.max = 1e-2\n", "parameter2.component = -1\n", "parameter2.bound = -1\n", "parameter2.transform = 'auto'\n", "\n", "# ka\n", "parameter3 = Dict()\n", "parameter3.transform = 'auto'\n", "parameter3.component = 0\n", "parameter3.bound = 0\n", "parameter3.location = '/input/model/unit_001/adsorption/MCL_KA'\n", "parameter3.min = 1e-5\n", "parameter3.max = 1e5\n", "\n", "# kd\n", "parameter4 = Dict()\n", "parameter4.transform = 'auto'\n", "parameter4.component = 0\n", "parameter4.bound = 0\n", "parameter4.location = '/input/model/unit_001/adsorption/MCL_KD'\n", "parameter4.min = 1e-5\n", "parameter4.max = 1e5\n", "\n", "# qmax\n", "parameter5 = Dict()\n", "parameter5.transform = 'auto'\n", "parameter5.component = 0\n", "parameter5.bound = 0\n", "parameter5.location = '/input/model/unit_001/adsorption/MCL_QMAX'\n", "parameter5.min = 0.5\n", "parameter5.max = 200\n", "\n", "\n", "match_config.parameters = [parameter1, parameter2, parameter3, parameter4, parameter5]" ] }, { "cell_type": "code", "execution_count": 12, "id": "7a6d9004", "metadata": {}, "outputs": [], "source": [ "experiment1 = Dict()\n", "experiment1.csv = './u=2.7.csv'\n", "experiment1.output_path = '/output/solution/unit_002/SOLUTION_OUTLET_COMP_000'\n", "experiment1.HDF5 = 'langmuir_reference.h5'\n", "experiment1.name = 'main'\n", "\n", "feature1 = Dict()\n", "feature1.name = 'Pulse'\n", "feature1.type = 'SSE'\n", "\n", "experiment1.features = [feature1,]\n", "\n", "match_config.experiments = [experiment1,]\n", "\n", "match_config.searchMethod = 'NSGA3'\n", "match_config.population = 12\n", "match_config.stallGenerations = 10\n", "match_config.finalGradRefinement = True\n", "match_config.gradVector = True" ] }, { "cell_type": "code", "execution_count": 13, "id": "1134f06c", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\importlib\\_bootstrap.py:219: RuntimeWarning: numpy.ndarray size changed, may indicate binary incompatibility. Expected 16 from C header, got 88 from PyObject\n", " return f(*args, **kwds)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "2023-04-10 16:07:48,915 match.py print_version 122 CADETMatch starting up version: 0.8.16\n", "\n", "2023-04-10 16:07:48,915 match.py print_version 154 attrs version: 22.1.0 tested with 21.2.0\n", "\n", "2023-04-10 16:07:48,915 match.py print_version 154 joblib version: 1.2.0 tested with 1.0.1\n", "\n", "2023-04-10 16:07:48,915 match.py print_version 154 addict version: 2.4.0 tested with 2.4.0\n", "\n", "2023-04-10 16:07:48,915 match.py print_version 154 corner version: 2.2.1 tested with 2.2.1\n", "\n", "2023-04-10 16:07:48,915 match.py print_version 154 emcee version: 3.1.4 tested with 3.0.2\n", "\n", "2023-04-10 16:07:48,915 match.py print_version 154 SALib version: 1.4.5 tested with 1.3.11\n", "\n", "2023-04-10 16:07:48,930 match.py print_version 154 psutil version: 5.9.4 tested with 5.8.0\n", "\n", "2023-04-10 16:07:48,930 match.py print_version 154 numpy version: 1.21.5 tested with 1.21.1\n", "\n", "2023-04-10 16:07:48,930 match.py print_version 154 openpyxl version: 3.1.2 tested with 3.0.7\n", "\n", "2023-04-10 16:07:48,930 match.py print_version 154 scipy version: 1.7.3 tested with 1.7.0\n", "\n", "2023-04-10 16:07:48,930 match.py print_version 154 matplotlib version: 3.5.3 tested with 3.4.2\n", "\n", "2023-04-10 16:07:48,930 match.py print_version 154 pandas version: 1.3.5 tested with 1.3.0\n", "\n", "2023-04-10 16:07:48,930 match.py print_version 154 h5py version: 3.6.0 tested with 3.3.0\n", "\n", "2023-04-10 16:07:48,930 match.py print_version 154 cadet-python version: 0.14 tested with 0.11\n", "\n", "2023-04-10 16:07:48,946 match.py print_version 154 seaborn version: 0.12.2 tested with 0.11.1\n", "\n", "2023-04-10 16:07:48,946 match.py print_version 154 scikit-learn version: 1.0.2 tested with 0.24.2\n", "\n", "2023-04-10 16:07:48,946 match.py print_version 154 jstyleson version: 0.0.2 tested with 0.2.0\n", "\n", "2023-04-10 16:07:48,946 match.py print_version 154 filelock version: 3.8.2 tested with 3.0.12\n", "\n", "2023-04-10 16:07:48,946 match.py print_version 154 pymoo version: 0.6.0.1 tested with 0.6.0.1\n", "\n", "2023-04-10 16:07:50,593 auto.py getHeaders 181 parameter /input/model/unit_001/COL_DISPERSION log\n", "\n", "2023-04-10 16:07:50,593 auto.py getHeaders 181 parameter /input/model/unit_001/COL_DISPERSION log\n", "\n", "2023-04-10 16:07:50,593 auto.py getHeaders 181 parameter /input/model/unit_001/adsorption/MCL_KA log\n", "\n", "2023-04-10 16:07:50,593 auto.py getHeaders 181 parameter /input/model/unit_001/adsorption/MCL_KA log\n", "\n", "2023-04-10 16:07:50,593 auto.py getHeaders 181 parameter /input/model/unit_001/adsorption/MCL_KD log\n", "\n", "2023-04-10 16:07:50,593 auto.py getHeaders 181 parameter /input/model/unit_001/adsorption/MCL_KD log\n", "\n", "2023-04-10 16:07:50,593 auto.py getHeaders 185 parameter /input/model/unit_001/adsorption/MCL_QMAX linear\n", "\n", "2023-04-10 16:07:50,593 auto.py getHeaders 185 parameter /input/model/unit_001/adsorption/MCL_QMAX linear\n", "\n", "2023-04-10 16:07:50,648 util.py setupSimulation 1259 langmuir_reference.h5 abstol=1e-06 reltol=1e-06\n", "\n", "2023-04-10 16:07:50,801 match.py setupTemplates 284 simulation took 0.1525583267211914\n", "\n", "2023-04-10 16:07:50,955 match.py setupTemplates 341 simulation final took 0.13499116897583008\n", "\n", "2023-04-10 16:07:51,133 gradFD.py create_template 50 grad simulation took 0.13098430633544922\n", "\n", "2023-04-10 16:07:51,133 gradFD.py create_template 56 grad D:/yuan_yunhao/Process modeling/LKM model/Inverse_mapping/Cadet Match/results/misc/template_main_grad.h5 abstol=1e-06 reltol=1e-06\n", "\n", "2023-04-10 16:07:51,142 loggerwriter.py write 10 Traceback (most recent call last):\n", "\n", "\n", "\n", "2023-04-10 16:07:51,142 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\CADETMatch\\match.py\", line 363, in \n", "\n", "\n", "\n", "2023-04-10 16:07:51,142 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,142 loggerwriter.py write 10 main(map_function=map_function)\n", "\n", "2023-04-10 16:07:51,142 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\CADETMatch\\match.py\", line 28, in main\n", "\n", "\n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 hof = evo.run(cache)\n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\CADETMatch\\evo.py\", line 153, in run\n", "\n", "\n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 return cache.search[searchMethod].run(cache)\n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\CADETMatch\\search\\nsga3.py\", line 9, in run\n", "\n", "\n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 return CADETMatch.pymoo_config.run(cache, 'nsga3')\n", "\n", "2023-04-10 16:07:51,143 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\CADETMatch\\pymoo_config.py\", line 185, in run\n", "\n", "\n", "\n", "2023-04-10 16:07:51,144 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,144 loggerwriter.py write 10 algorithm.next()\n", "\n", "2023-04-10 16:07:51,144 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\core\\algorithm.py\", line 157, in next\n", "\n", "\n", "\n", "2023-04-10 16:07:51,144 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,144 loggerwriter.py write 10 infills = self.infill()\n", "\n", "2023-04-10 16:07:51,144 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\core\\algorithm.py\", line 193, in infill\n", "\n", "\n", "\n", "2023-04-10 16:07:51,144 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 infills = self._infill()\n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\algorithms\\base\\genetic.py\", line 85, in _infill\n", "\n", "\n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 off = self.mating.do(self.problem, self.pop, self.n_offsprings, algorithm=self)\n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\core\\infill.py\", line 38, in do\n", "\n", "\n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 _off = self._do(problem, pop, n_remaining, **kwargs)\n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\core\\mating.py\", line 31, in _do\n", "\n", "\n", "\n", "2023-04-10 16:07:51,145 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,146 loggerwriter.py write 10 off = self.crossover(problem, parents, **kwargs)\n", "\n", "2023-04-10 16:07:51,146 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\core\\operator.py\", line 27, in __call__\n", "\n", "\n", "\n", "2023-04-10 16:07:51,146 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,146 loggerwriter.py write 10 out = self.do(problem, elem, *args, **kwargs)\n", "\n", "2023-04-10 16:07:51,146 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\core\\crossover.py\", line 48, in do\n", "\n", "\n", "\n", "2023-04-10 16:07:51,146 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,146 loggerwriter.py write 10 Q = self._do(problem, X, **kwargs)\n", "\n", "2023-04-10 16:07:51,146 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\operators\\crossover\\sbx.py\", line 114, in _do\n", "\n", "\n", "\n", "2023-04-10 16:07:51,147 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,147 loggerwriter.py write 10 Q = cross_sbx(X.astype(float), problem.xl, problem.xu, eta, prob_var, prob_bin)\n", "\n", "2023-04-10 16:07:51,147 loggerwriter.py write 10 File \"C:\\Users\\yuan_yunhao\\Miniconda3\\envs\\tf2\\lib\\site-packages\\pymoo\\operators\\crossover\\sbx.py\", line 26, in cross_sbx\n", "\n", "\n", "\n", "2023-04-10 16:07:51,147 loggerwriter.py write 10 \n", "\n", "2023-04-10 16:07:51,147 loggerwriter.py write 10 cross[:, xl == xu] = False\n", "\n", "2023-04-10 16:07:51,147 loggerwriter.py write 10 IndexError\n", "\n", "2023-04-10 16:07:51,147 loggerwriter.py write 10 : \n", "\n", "2023-04-10 16:07:51,147 loggerwriter.py write 10 boolean index did not match indexed array along dimension 1; dimension is 2 but corresponding boolean dimension is 5\n", "\n", "2023-04-10 16:07:51,147 util.py info 54 process shutting down\n", "\n" ] } ], "source": [ "from CADETMatch.jupyter import Match\n", "\n", "match_file = base_dir / 'langmuir.json'\n", "\n", "with open(match_file, 'w') as json_file:\n", " json.dump(match_config.to_dict(), json_file, indent='\\t')\n", "\n", "match = Match(match_file)\n", "match.start_sim()" ] }, { "cell_type": "code", "execution_count": null, "id": "e84c0b3d", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.15" } }, "nbformat": 4, "nbformat_minor": 5 }