@article{LizarazuHarirchianShaiketal., author = {Lizarazu, Jorge and Harirchian, Ehsan and Shaik, Umar Arif and Shareef, Mohammed and Antoni-Zdziobek, Annie and Lahmer, Tom}, title = {Application of machine learning-based algorithms to predict the stress-strain curves of additively manufactured mild steel out of its microstructural characteristics}, series = {Results in Engineering}, volume = {2023}, journal = {Results in Engineering}, number = {Volume 20 (2023)}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.rineng.2023.101587}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20231207-65028}, pages = {1 -- 12}, abstract = {The study presents a Machine Learning (ML)-based framework designed to forecast the stress-strain relationship of arc-direct energy deposited mild steel. Based on microstructural characteristics previously extracted using microscopy and X-ray diffraction, approximately 1000 new parameter sets are generated by applying the Latin Hypercube Sampling Method (LHSM). For each parameter set, a Representative Volume Element (RVE) is synthetically created via Voronoi Tessellation. Input raw data for ML-based algorithms comprises these parameter sets or RVE-images, while output raw data includes their corresponding stress-strain relationships calculated after a Finite Element (FE) procedure. Input data undergoes preprocessing involving standardization, feature selection, and image resizing. Similarly, the stress-strain curves, initially unsuitable for training traditional ML algorithms, are preprocessed using cubic splines and occasionally Principal Component Analysis (PCA). The later part of the study focuses on employing multiple ML algorithms, utilizing two main models. The first model predicts stress-strain curves based on microstructural parameters, while the second model does so solely from RVE images. The most accurate prediction yields a Root Mean Squared Error of around 5 MPa, approximately 1\% of the yield stress. This outcome suggests that ML models offer precise and efficient methods for characterizing dual-phase steels, establishing a framework for accurate results in material analysis.}, subject = {Maschinelles Lernen}, language = {en} } @article{AbdelnourZabel, author = {Abdelnour, Mena and Zabel, Volkmar}, title = {Modal identification of structures with a dynamic behaviour characterised by global and local modes at close frequencies}, series = {Acta Mechanica}, volume = {2023}, journal = {Acta Mechanica}, publisher = {Springer}, address = {Wien}, doi = {10.1007/s00707-023-03598-z}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230525-63822}, pages = {1 -- 21}, abstract = {Identification of modal parameters of a space frame structure is a complex assignment due to a large number of degrees of freedom, close natural frequencies, and different vibrating mechanisms. Research has been carried out on the modal identification of rather simple truss structures. So far, less attention has been given to complex three-dimensional truss structures. This work develops a vibration-based methodology for determining modal information of three-dimensional space truss structures. The method uses a relatively complex space truss structure for its verification. Numerical modelling of the system gives modal information about the expected vibration behaviour. The identification process involves closely spaced modes that are characterised by local and global vibration mechanisms. To distinguish between local and global vibrations of the system, modal strain energies are used as an indicator. The experimental validation, which incorporated a modal analysis employing the stochastic subspace identification method, has confirmed that considering relatively high model orders is required to identify specific mode shapes. Especially in the case of the determination of local deformation modes of space truss members, higher model orders have to be taken into account than in the modal identification of most other types of structures.}, subject = {Fachwerkbau}, language = {en} } @article{AlemuHabteLahmeretal., author = {Alemu, Yohannes L. and Habte, Bedilu and Lahmer, Tom and Urgessa, Girum}, title = {Topologically preoptimized ground structure (TPOGS) for the optimization of 3D RC buildings}, series = {Asian Journal of Civil Engineering}, volume = {2023}, journal = {Asian Journal of Civil Engineering}, publisher = {Springer International Publishing}, address = {Cham}, doi = {10.1007/s42107-023-00640-2}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230517-63677}, pages = {1 -- 11}, abstract = {As an optimization that starts from a randomly selected structure generally does not guarantee reasonable optimality, the use of a systemic approach, named the ground structure, is widely accepted in steel-made truss and frame structural design. However, in the case of reinforced concrete (RC) structural optimization, because of the orthogonal orientation of structural members, randomly chosen or architect-sketched framing is used. Such a one-time fixed layout trend, in addition to its lack of a systemic approach, does not necessarily guarantee optimality. In this study, an approach for generating a candidate ground structure to be used for cost or weight minimization of 3D RC building structures with included slabs is developed. A multiobjective function at the floor optimization stage and a single objective function at the frame optimization stage are considered. A particle swarm optimization (PSO) method is employed for selecting the optimal ground structure. This method enables generating a simple, yet potential, real-world representation of topologically preoptimized ground structure while both structural and main architectural requirements are considered. This is supported by a case study for different floor domain sizes.}, subject = {Bodenmechanik}, language = {en} } @article{AlaladeReichertKoehnetal., author = {Alalade, Muyiwa and Reichert, Ina and K{\"o}hn, Daniel and Wuttke, Frank and Lahmer, Tom}, title = {A Cyclic Multi-Stage Implementation of the Full-Waveform Inversion for the Identification of Anomalies in Dams}, series = {Infrastructures}, volume = {2022}, journal = {Infrastructures}, number = {Volume 7, issue 12, article 161}, editor = {Qu, Chunxu and Gao, Chunxu and Zhang, Rui and Jia, Ziguang and Li, Jiaxiang}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/infrastructures7120161}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221201-48396}, pages = {19}, abstract = {For the safe and efficient operation of dams, frequent monitoring and maintenance are required. These are usually expensive, time consuming, and cumbersome. To alleviate these issues, we propose applying a wave-based scheme for the location and quantification of damages in dams. To obtain high-resolution "interpretable" images of the damaged regions, we drew inspiration from non-linear full-multigrid methods for inverse problems and applied a new cyclic multi-stage full-waveform inversion (FWI) scheme. Our approach is less susceptible to the stability issues faced by the standard FWI scheme when dealing with ill-posed problems. In this paper, we first selected an optimal acquisition setup and then applied synthetic data to demonstrate the capability of our approach in identifying a series of anomalies in dams by a mixture of reflection and transmission tomography. The results had sufficient robustness, showing the prospects of application in the field of non-destructive testing of dams.}, subject = {Damm}, language = {en} } @phdthesis{Hanna, author = {Hanna, John}, title = {Computational Fracture Modeling and Design of Encapsulation-Based Self-Healing Concrete Using XFEM and Cohesive Surface Technique}, doi = {10.25643/bauhaus-universitaet.4746}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221124-47467}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {125}, abstract = {Encapsulation-based self-healing concrete (SHC) is the most promising technique for providing a self-healing mechanism to concrete. This is due to its capacity to heal fractures effectively without human interventions, extending the operational life and lowering maintenance costs. The healing mechanism is created by embedding capsules containing the healing agent inside the concrete. The healing agent will be released once the capsules are fractured and the healing occurs in the vicinity of the damaged part. The healing efficiency of the SHC is still not clear and depends on several factors; in the case of microcapsules SHC the fracture of microcapsules is the most important aspect to release the healing agents and hence heal the cracks. This study contributes to verifying the healing efficiency of SHC and the fracture mechanism of the microcapsules. Extended finite element method (XFEM) is a flexible, and powerful discrete crack method that allows crack propagation without the requirement for re-meshing and has been shown high accuracy for modeling fracture in concrete. In this thesis, a computational fracture modeling approach of Encapsulation-based SHC is proposed based on the XFEM and cohesive surface technique (CS) to study the healing efficiency and the potential of fracture and debonding of the microcapsules or the solidified healing agents from the concrete matrix as well. The concrete matrix and a microcapsule shell both are modeled by the XFEM and combined together by CS. The effects of the healed-crack length, the interfacial fracture properties, and microcapsule size on the load carrying capability and fracture pattern of the SHC have been studied. The obtained results are compared to those obtained from the zero thickness cohesive element approach to demonstrate the significant accuracy and the validity of the proposed simulation. The present fracture simulation is developed to study the influence of the capsular clustering on the fracture mechanism by varying the contact surface area of the CS between the microcapsule shell and the concrete matrix. The proposed fracture simulation is expanded to 3D simulations to validate the 2D computational simulations and to estimate the accuracy difference ratio between 2D and 3D simulations. In addition, a proposed design method is developed to design the size of the microcapsules consideration of a sufficient volume of healing agent to heal the expected crack width. This method is based on the configuration of the unit cell (UC), Representative Volume Element (RVE), Periodic Boundary Conditions (PBC), and associated them to the volume fraction (Vf) and the crack width as variables. The proposed microcapsule design is verified through computational fracture simulations.}, subject = {Beton}, language = {en} } @phdthesis{Jenabidehkordi, author = {Jenabidehkordi, Ali}, title = {An Efficient Adaptive PD Formulation for Complex Microstructures}, doi = {10.25643/bauhaus-universitaet.4742}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221124-47422}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {118}, abstract = {The computational costs of newly developed numerical simulation play a critical role in their acceptance within both academic use and industrial employment. Normally, the refinement of a method in the area of interest reduces the computational cost. This is unfortunately not true for most nonlocal simulation, since refinement typically increases the size of the material point neighborhood. Reducing the discretization size while keep- ing the neighborhood size will often require extra consideration. Peridy- namic (PD) is a newly developed numerical method with nonlocal nature. Its straightforward integral form equation of motion allows simulating dy- namic problems without any extra consideration required. The formation of crack and its propagation is known as natural to peridynamic. This means that discontinuity is a result of the simulation and does not demand any post-processing. As with other nonlocal methods, PD is considered an expensive method. The refinement of the nodal spacing while keeping the neighborhood size (i.e., horizon radius) constant, emerges to several nonphysical phenomena. This research aims to reduce the peridynamic computational and imple- mentation costs. A novel refinement approach is introduced. The pro- posed approach takes advantage of the PD flexibility in choosing the shape of the horizon by introducing multiple domains (with no intersections) to the nodes of the refinement zone. It will be shown that no ghost forces will be created when changing the horizon sizes in both subdomains. The approach is applied to both bond-based and state-based peridynamic and verified for a simple wave propagation refinement problem illustrating the efficiency of the method. Further development of the method for higher dimensions proves to have a direct relationship with the mesh sensitivity of the PD. A method for solving the mesh sensitivity of the PD is intro- duced. The application of the method will be examined by solving a crack propagation problem similar to those reported in the literature. New software architecture is proposed considering both academic and in- dustrial use. The available simulation tools for employing PD will be collected, and their advantages and drawbacks will be addressed. The challenges of implementing any node base nonlocal methods while max- imizing the software flexibility to further development and modification will be discussed and addressed. A software named Relation-Based Sim- ulator (RBS) is developed for examining the proposed architecture. The exceptional capabilities of RBS will be explored by simulating three dis- tinguished models. RBS is available publicly and open to further develop- ment. The industrial acceptance of the RBS will be tested by targeting its performance on one Mac and two Linux distributions.}, subject = {Peridynamik}, language = {en} } @phdthesis{Jenabidehkordi, author = {Jenabidehkordi, Ali}, title = {An efficient adaptive PD formulation for complex microstructures}, doi = {10.25643/bauhaus-universitaet.4738}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221116-47389}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {118}, abstract = {The computational costs of newly developed numerical simulation play a critical role in their acceptance within both academic use and industrial employment. Normally, the refinement of a method in the area of interest reduces the computational cost. This is unfortunately not true for most nonlocal simulation, since refinement typically increases the size of the material point neighborhood. Reducing the discretization size while keep- ing the neighborhood size will often require extra consideration. Peridynamic (PD) is a newly developed numerical method with nonlocal nature. Its straightforward integral form equation of motion allows simulating dynamic problems without any extra consideration required. The formation of crack and its propagation is known as natural to peridynamic. This means that discontinuity is a result of the simulation and does not demand any post-processing. As with other nonlocal methods, PD is considered an expensive method. The refinement of the nodal spacing while keeping the neighborhood size (i.e., horizon radius) constant, emerges to several nonphysical phenomena. This research aims to reduce the peridynamic computational and imple- mentation costs. A novel refinement approach is introduced. The pro- posed approach takes advantage of the PD flexibility in choosing the shape of the horizon by introducing multiple domains (with no intersections) to the nodes of the refinement zone. It will be shown that no ghost forces will be created when changing the horizon sizes in both subdomains. The approach is applied to both bond-based and state-based peridynamic and verified for a simple wave propagation refinement problem illustrating the efficiency of the method. Further development of the method for higher dimensions proves to have a direct relationship with the mesh sensitivity of the PD. A method for solving the mesh sensitivity of the PD is intro- duced. The application of the method will be examined by solving a crack propagation problem similar to those reported in the literature. New software architecture is proposed considering both academic and in- dustrial use. The available simulation tools for employing PD will be collected, and their advantages and drawbacks will be addressed. The challenges of implementing any node base nonlocal methods while max- imizing the software flexibility to further development and modification will be discussed and addressed. A software named Relation-Based Sim- ulator (RBS) is developed for examining the proposed architecture. The exceptional capabilities of RBS will be explored by simulating three distinguished models. RBS is available publicly and open to further develop- ment. The industrial acceptance of the RBS will be tested by targeting its performance on one Mac and two Linux distributions.}, subject = {Peridynamik}, language = {en} } @phdthesis{Zacharias, author = {Zacharias, Christin}, title = {Numerical Simulation Models for Thermoelastic Damping Effects}, doi = {10.25643/bauhaus-universitaet.4735}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221116-47352}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {191}, abstract = {Finite Element Simulations of dynamically excited structures are mainly influenced by the mass, stiffness, and damping properties of the system, as well as external loads. The prediction quality of dynamic simulations of vibration-sensitive components depends significantly on the use of appropriate damping models. Damping phenomena have a decisive influence on the vibration amplitude and the frequencies of the vibrating structure. However, developing realistic damping models is challenging due to the multiple sources that cause energy dissipation, such as material damping, different types of friction, or various interactions with the environment. This thesis focuses on thermoelastic damping, which is the main cause of material damping in homogeneous materials. The effect is caused by temperature changes due to mechanical strains. In vibrating structures, temperature gradients arise in adjacent tension and compression areas. Depending on the vibration frequency, they result in heat flows, leading to increased entropy and the irreversible transformation of mechanical energy into thermal energy. The central objective of this thesis is the development of efficient simulation methods to incorporate thermoelastic damping in finite element analyses based on modal superposition. The thermoelastic loss factor is derived from the structure's mechanical mode shapes and eigenfrequencies. In subsequent analyses that are performed in the time and frequency domain, it is applied as modal damping. Two approaches are developed to determine the thermoelastic loss in thin-walled plate structures, as well as three-dimensional solid structures. The realistic representation of the dissipation effects is verified by comparing the simulation results with experimentally determined data. Therefore, an experimental setup is developed to measure material damping, excluding other sources of energy dissipation. The three-dimensional solid approach is based on the determination of the generated entropy and therefore the generated heat per vibration cycle, which is a measure for thermoelastic loss in relation to the total strain energy. For thin plate structures, the amount of bending energy in a modal deformation is calculated and summarized in the so-called Modal Bending Factor (MBF). The highest amount of thermoelastic loss occurs in the state of pure bending. Therefore, the MBF enables a quantitative classification of the mode shapes concerning the thermoelastic damping potential. The results of the developed simulations are in good agreement with the experimental results and are appropriate to predict thermoelastic loss factors. Both approaches are based on modal superposition with the advantage of a high computational efficiency. Overall, the modeling of thermoelastic damping represents an important component in a comprehensive damping model, which is necessary to perform realistic simulations of vibration processes.}, subject = {Werkstoffd{\"a}mpfung}, language = {en} } @article{ChowdhuryZabel, author = {Chowdhury, Sharmistha and Zabel, Volkmar}, title = {Influence of loading sequence on wind induced fatigue assessment of bolts in TV-tower connection block}, series = {Results in Engineering}, volume = {2022}, journal = {Results in Engineering}, number = {Volume 16, article 100603}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.rineng.2022.100603}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221028-47303}, pages = {1 -- 18}, abstract = {Bolted connections are widely employed in structures like transmission poles, wind turbines, and television (TV) towers. The behaviour of bolted connections is often complex and plays a significant role in the overall dynamic characteristics of the structure. The goal of this work is to conduct a fatigue lifecycle assessment of such a bolted connection block of a 193 m tall TV tower, for which 205 days of real measurement data have been obtained from the installed monitoring devices. Based on the recorded data, the best-fit stochastic wind distribution for 50 years, the decisive wind action, and the locations to carry out the fatigue analysis have been decided. A 3D beam model of the entire tower is developed to extract the nodal forces corresponding to the connection block location under various mean wind speeds, which is later coupled with a detailed complex finite element model of the connection block, with over three million degrees of freedom, for acquiring stress histories on some pre-selected bolts. The random stress histories are analysed using the rainflow counting algorithm (RCA) and the damage is estimated using Palmgren-Miner's damage accumulation law. A modification is proposed to integrate the loading sequence effect into the RCA, which otherwise is ignored, and the differences between the two RCAs are investigated in terms of the accumulated damage.}, subject = {Schadensakkumulation}, language = {en} } @article{HarirchianIsik, author = {Harirchian, Ehsan and Isik, Ercan}, title = {A Comparative Probabilistic Seismic Hazard Analysis for Eastern Turkey (Bitlis) Based on Updated Hazard Map and Its Effect on Regular RC Structures}, series = {Buildings}, volume = {2022}, journal = {Buildings}, number = {Volume 12, issue 10, article 1573}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/buildings12101573}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221028-47283}, pages = {1 -- 19}, abstract = {Determining the earthquake hazard of any settlement is one of the primary studies for reducing earthquake damage. Therefore, earthquake hazard maps used for this purpose must be renewed over time. Turkey Earthquake Hazard Map has been used instead of Turkey Earthquake Zones Map since 2019. A probabilistic seismic hazard was performed by using these last two maps and different attenuation relationships for Bitlis Province (Eastern Turkey) were located in the Lake Van Basin, which has a high seismic risk. The earthquake parameters were determined by considering all districts and neighborhoods in the province. Probabilistic seismic hazard analyses were carried out for these settlements using seismic sources and four different attenuation relationships. The obtained values are compared with the design spectrum stated in the last two earthquake maps. Significant differences exist between the design spectrum obtained according to the different exceedance probabilities. In this study, adaptive pushover analyses of sample-reinforced concrete buildings were performed using the design ground motion level. Structural analyses were carried out using three different design spectra, as given in the last two seismic design codes and the mean spectrum obtained from attenuation relationships. Different design spectra significantly change the target displacements predicted for the performance levels of the buildings.}, subject = {Erbeben}, language = {en} } @phdthesis{Zhang, author = {Zhang, Yongzheng}, title = {A Nonlocal Operator Method for Quasi-static and Dynamic Fracture Modeling}, doi = {10.25643/bauhaus-universitaet.4732}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221026-47321}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Material failure can be tackled by so-called nonlocal models, which introduce an intrinsic length scale into the formulation and, in the case of material failure, restore the well-posedness of the underlying boundary value problem or initial boundary value problem. Among nonlocal models, peridynamics (PD) has attracted a lot of attention as it allows the natural transition from continuum to discontinue and thus allows modeling of discrete cracks without the need to describe and track the crack topology, which has been a major obstacle in traditional discrete crack approaches. This is achieved by replacing the divergence of the Cauchy stress tensor through an integral over so-called bond forces, which account for the interaction of particles. A quasi-continuum approach is then used to calibrate the material parameters of the bond forces, i.e., equating the PD energy with the energy of a continuum. One major issue for the application of PD to general complex problems is that they are limited to fairly simple material behavior and pure mechanical problems based on explicit time integration. PD has been extended to other applications but losing simultaneously its simplicity and ease in modeling material failure. Furthermore, conventional PD suffers from instability and hourglass modes that require stabilization. It also requires the use of constant horizon sizes, which drastically reduces its computational efficiency. The latter issue was resolved by the so-called dual-horizon peridynamics (DH-PD) formulation and the introduction of the duality of horizons. Within the nonlocal operator method (NOM), the concept of nonlocality is further extended and can be considered a generalization of DH-PD. Combined with the energy functionals of various physical models, the nonlocal forms based on the dual-support concept can be derived. In addition, the variation of the energy functional allows implicit formulations of the nonlocal theory. While traditional integral equations are formulated in an integral domain, the dual-support approaches are based on dual integral domains. One prominent feature of NOM is its compatibility with variational and weighted residual methods. The NOM yields a direct numerical implementation based on the weighted residual method for many physical problems without the need for shape functions. Only the definition of the energy or boundary value problem is needed to drastically facilitate the implementation. The nonlocal operator plays an equivalent role to the derivatives of the shape functions in meshless methods and finite element methods (FEM). Based on the variational principle, the residual and the tangent stiffness matrix can be obtained with ease by a series of matrix multiplications. In addition, NOM can be used to derive many nonlocal models in strong form. The principal contributions of this dissertation are the implementation and application of NOM, and also the development of approaches for dealing with fractures within the NOM, mostly for dynamic fractures. The primary coverage and results of the dissertation are as follows: -The first/higher-order implicit NOM and explicit NOM, including a detailed description of the implementation, are presented. The NOM is based on so-called support, dual-support, nonlocal operators, and an operate energy functional ensuring stability. The nonlocal operator is a generalization of the conventional differential operators. Combining with the method of weighted residuals and variational principles, NOM establishes the residual and tangent stiffness matrix of operate energy functional through some simple matrix without the need of shape functions as in other classical computational methods such as FEM. NOM only requires the definition of the energy drastically simplifying its implementation. For the sake of conciseness, the implementation in this chapter is focused on linear elastic solids only, though the NOM can handle more complex nonlinear problems. An explicit nonlocal operator method for the dynamic analysis of elasticity solid problems is also presented. The explicit NOM avoids the calculation of the tangent stiffness matrix as in the implicit NOM model. The explicit scheme comprises the Verlet-velocity algorithm. The NOM can be very flexible and efficient for solving partial differential equations (PDEs). It's also quite easy for readers to use the NOM and extend it to solve other complicated physical phenomena described by one or a set of PDEs. Several numerical examples are presented to show the capabilities of this method. -A nonlocal operator method for the dynamic analysis of (thin) Kirchhoff plates is proposed. The nonlocal Hessian operator is derived from a second-order Taylor series expansion. NOM is higher-order continuous, which is exploited for thin plate analysis that requires \$C^1\$ continuity. The nonlocal dynamic governing formulation and operator energy functional for Kirchhoff plates are derived from a variational principle. The Verlet-velocity algorithm is used for time discretization. After confirming the accuracy of the nonlocal Hessian operator, several numerical examples are simulated by the nonlocal dynamic Kirchhoff plate formulation. -A nonlocal fracture modeling is developed and applied to the simulation of quasi-static and dynamic fractures using the NOM. The phase field's nonlocal weak and associated strong forms are derived from a variational principle. The NOM requires only the definition of energy. We present both a nonlocal implicit phase field model and a nonlocal explicit phase field model for fracture; the first approach is better suited for quasi-static fracture problems, while the key application of the latter one is dynamic fracture. To demonstrate the performance of the underlying approach, several benchmark examples for quasi-static and dynamic fracture are solved.}, subject = {Variationsprinzip}, language = {en} } @phdthesis{Yousefi, author = {Yousefi, Hassan}, title = {Discontinuous propagating fronts: linear and nonlinear systems}, doi = {10.25643/bauhaus-universitaet.4717}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220922-47178}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {356}, abstract = {The aim of this study is controlling of spurious oscillations developing around discontinuous solutions of both linear and non-linear wave equations or hyperbolic partial differential equations (PDEs). The equations include both first-order and second-order (wave) hyperbolic systems. In these systems even smooth initial conditions, or smoothly varying source (load) terms could lead to discontinuous propagating solutions (fronts). For the first order hyperbolic PDEs, the concept of central high resolution schemes is integrated with the multiresolution-based adaptation to capture properly both discontinuous propagating fronts and effects of fine-scale responses on those of larger scales in the multiscale manner. This integration leads to using central high resolution schemes on non-uniform grids; however, such simulation is unstable, as the central schemes are originally developed to work properly on uniform cells/grids. Hence, the main concern is stable collaboration of central schemes and multiresoltion-based cell adapters. Regarding central schemes, the considered approaches are: 1) Second order central and central-upwind schemes; 2) Third order central schemes; 3) Third and fourth order central weighted non-oscillatory schemes (central-WENO or CWENO); 4) Piece-wise parabolic methods (PPMs) obtained with two different local stencils. For these methods, corresponding (nonlinear) stability conditions are studied and modified, as well. Based on these stability conditions several limiters are modified/developed as follows: 1) Several second-order limiters with total variation diminishing (TVD) feature, 2) Second-order uniformly high order accurate non-oscillatory (UNO) limiters, 3) Two third-order nonlinear scaling limiters, 4) Two new limiters for PPMs. Numerical results show that adaptive solvers lead to cost-effective computations (e.g., in some 1-D problems, number of adapted grid points are less than 200 points during simulations, while in the uniform-grid case, to have the same accuracy, using of 2049 points is essential). Also, in some cases, it is confirmed that fine scale responses have considerable effects on higher scales. In numerical simulation of nonlinear first order hyperbolic systems, the two main concerns are: convergence and uniqueness. The former is important due to developing of the spurious oscillations, the numerical dispersion and the numerical dissipation. Convergence in a numerical solution does not guarantee that it is the physical/real one (the uniqueness feature). Indeed, a nonlinear systems can converge to several numerical results (which mathematically all of them are true). In this work, the convergence and uniqueness are directly studied on non-uniform grids/cells by the concepts of local numerical truncation error and numerical entropy production, respectively. Also, both of these concepts have been used for cell/grid adaptations. So, the performance of these concepts is also compared by the multiresolution-based method. Several 1-D and 2-D numerical examples are examined to confirm the efficiency of the adaptive solver. Examples involve problems with convex and non-convex fluxes. In the latter case, due to developing of complex waves, proper capturing of real answers needs more attention. For this purpose, using of method-adaptation seems to be essential (in parallel to the cell/grid adaptation). This new type of adaptation is also performed in the framework of the multiresolution analysis. Regarding second order hyperbolic PDEs (mechanical waves), the regularization concept is used to cure artificial (numerical) oscillation effects, especially for high-gradient or discontinuous solutions. There, oscillations are removed by the regularization concept acting as a post-processor. Simulations will be performed directly on the second-order form of wave equations. It should be mentioned that it is possible to rewrite second order wave equations as a system of first-order waves, and then simulated the new system by high resolution schemes. However, this approach ends to increasing of variable numbers (especially for 3D problems). The numerical discretization is performed by the compact finite difference (FD) formulation with desire feature; e.g., methods with spectral-like or optimized-error properties. These FD methods are developed to handle high frequency waves (such as waves near earthquake sources). The performance of several regularization approaches is studied (both theoretically and numerically); at last, a proper regularization approach controlling the Gibbs phenomenon is recommended. At the end, some numerical results are provided to confirm efficiency of numerical solvers enhanced by the regularization concept. In this part, shock-like responses due to local and abrupt changing of physical properties, and also stress wave propagation in stochastic-like domains are studied.}, subject = {Partielle Differentialgleichung}, language = {en} } @article{AlYasiriMutasharGuerlebecketal., author = {Al-Yasiri, Zainab Riyadh Shaker and Mutashar, Hayder Majid and G{\"u}rlebeck, Klaus and Lahmer, Tom}, title = {Damage Sensitive Signals for the Assessment of the Conditions of Wind Turbine Rotor Blades Using Electromagnetic Waves}, series = {Infrastructures}, volume = {2022}, journal = {Infrastructures}, number = {Volume 7, Issue 8 (August 2022), article 104}, editor = {Shafiullah, GM}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/infrastructures7080104}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220831-47093}, pages = {18}, abstract = {One of the most important renewable energy technologies used nowadays are wind power turbines. In this paper, we are interested in identifying the operating status of wind turbines, especially rotor blades, by means of multiphysical models. It is a state-of-the-art technology to test mechanical structures with ultrasonic-based methods. However, due to the density and the required high resolution, the testing is performed with high-frequency waves, which cannot penetrate the structure in depth. Therefore, there is a need to adopt techniques in the fields of multiphysical model-based inversion schemes or data-driven structural health monitoring. Before investing effort in the development of such approaches, further insights and approaches are necessary to make the techniques applicable to structures such as wind power plants (blades). Among the expected developments, further accelerations of the so-called "forward codes" for a more efficient implementation of the wave equation could be envisaged. Here, we employ electromagnetic waves for the early detection of cracks. Because in many practical situations, it is not possible to apply techniques from tomography (characterized by multiple sources and sensor pairs), we focus here on the question of whether the existence of cracks can be determined by using only one source for the sent waves.}, subject = {Windkraftwerk}, language = {en} } @article{GuoZhuangChenetal., author = {Guo, Hongwei and Zhuang, Xiaoying and Chen, Pengwan and Alajlan, Naif and Rabczuk, Timon}, title = {Analysis of three-dimensional potential problems in non-homogeneous media with physics-informed deep collocation method using material transfer learning and sensitivity analysis}, series = {Engineering with Computers}, volume = {2022}, journal = {Engineering with Computers}, doi = {10.1007/s00366-022-01633-6}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220811-46764}, pages = {1 -- 22}, abstract = {In this work, we present a deep collocation method (DCM) for three-dimensional potential problems in non-homogeneous media. This approach utilizes a physics-informed neural network with material transfer learning reducing the solution of the non-homogeneous partial differential equations to an optimization problem. We tested different configurations of the physics-informed neural network including smooth activation functions, sampling methods for collocation points generation and combined optimizers. A material transfer learning technique is utilized for non-homogeneous media with different material gradations and parameters, which enhance the generality and robustness of the proposed method. In order to identify the most influential parameters of the network configuration, we carried out a global sensitivity analysis. Finally, we provide a convergence proof of our DCM. The approach is validated through several benchmark problems, also testing different material variations.}, subject = {Deep learning}, language = {en} } @article{ChakrabortyAnitescuZhuangetal., author = {Chakraborty, Ayan and Anitescu, Cosmin and Zhuang, Xiaoying and Rabczuk, Timon}, title = {Domain adaptation based transfer learning approach for solving PDEs on complex geometries}, series = {Engineering with Computers}, volume = {2022}, journal = {Engineering with Computers}, doi = {10.1007/s00366-022-01661-2}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220811-46776}, pages = {1 -- 20}, abstract = {In machine learning, if the training data is independently and identically distributed as the test data then a trained model can make an accurate predictions for new samples of data. Conventional machine learning has a strong dependence on massive amounts of training data which are domain specific to understand their latent patterns. In contrast, Domain adaptation and Transfer learning methods are sub-fields within machine learning that are concerned with solving the inescapable problem of insufficient training data by relaxing the domain dependence hypothesis. In this contribution, this issue has been addressed and by making a novel combination of both the methods we develop a computationally efficient and practical algorithm to solve boundary value problems based on nonlinear partial differential equations. We adopt a meshfree analysis framework to integrate the prevailing geometric modelling techniques based on NURBS and present an enhanced deep collocation approach that also plays an important role in the accuracy of solutions. We start with a brief introduction on how these methods expand upon this framework. We observe an excellent agreement between these methods and have shown that how fine-tuning a pre-trained network to a specialized domain may lead to an outstanding performance compare to the existing ones. As proof of concept, we illustrate the performance of our proposed model on several benchmark problems.}, subject = {Maschinelles Lernen}, language = {en} } @article{Hanna, author = {Hanna, John}, title = {Computational Modelling for the Effects of Capsular Clustering on Fracture of Encapsulation-Based Self-Healing Concrete Using XFEM and Cohesive Surface Technique}, series = {Applied Sciences}, volume = {2022}, journal = {Applied Sciences}, number = {Volume 12, issue 10, article 5112}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app12105112}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220721-46717}, pages = {1 -- 17}, abstract = {The fracture of microcapsules is an important issue to release the healing agent for healing the cracks in encapsulation-based self-healing concrete. The capsular clustering generated from the concrete mixing process is considered one of the critical factors in the fracture mechanism. Since there is a lack of studies in the literature regarding this issue, the design of self-healing concrete cannot be made without an appropriate modelling strategy. In this paper, the effects of microcapsule size and clustering on the fractured microcapsules are studied computationally. A simple 2D computational modelling approach is developed based on the eXtended Finite Element Method (XFEM) and cohesive surface technique. The proposed model shows that the microcapsule size and clustering have significant roles in governing the load-carrying capacity and the crack propagation pattern and determines whether the microcapsule will be fractured or debonded from the concrete matrix. The higher the microcapsule circumferential contact length, the higher the load-carrying capacity. When it is lower than 25\% of the microcapsule circumference, it will result in a greater possibility for the debonding of the microcapsule from the concrete. The greater the core/shell ratio (smaller shell thickness), the greater the likelihood of microcapsules being fractured.}, subject = {Beton}, language = {en} } @phdthesis{Nouri, author = {Nouri, Hamidreza}, title = {Mechanical Behavior of two dimensional sheets and polymer compounds based on molecular dynamics and continuum mechanics approach}, doi = {10.25643/bauhaus-universitaet.4670}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220713-46700}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {152}, abstract = {Compactly, this thesis encompasses two major parts to examine mechanical responses of polymer compounds and two dimensional materials: 1- Molecular dynamics approach is investigated to study transverse impact behavior of polymers, polymer compounds and two dimensional materials. 2- Large deflection of circular and rectangular membranes is examined by employing continuum mechanics approach. Two dimensional materials (2D), including, Graphene and molybdenum disulfide (MoS2), exhibited new and promising physical and chemical properties, opening new opportunities to be utilized alone or to enhance the performance of conventional materials. These 2D materials have attracted tremendous attention owing to their outstanding physical properties, especially concerning transverse impact loading. Polymers, with the backbone of carbon (organic polymers) or do not include carbon atoms in the backbone (inorganic polymers) like polydimethylsiloxane (PDMS), have extraordinary characteristics particularly their flexibility leads to various easy ways of forming and casting. These simple shape processing label polymers as an excellent material often used as a matrix in composites (polymer compounds). In this PhD work, Classical Molecular Dynamics (MD) is implemented to calculate transverse impact loading of 2D materials as well as polymer compounds reinforced with graphene sheets. In particular, MD was adopted to investigate perforation of the target and impact resistance force . By employing MD approach, the minimum velocity of the projectile that could create perforation and passes through the target is obtained. The largest investigation was focused on how graphene could enhance the impact properties of the compound. Also the purpose of this work was to discover the effect of the atomic arrangement of 2D materials on the impact problem. To this aim, the impact properties of two different 2D materials, graphene and MoS2, are studied. The simulation of chemical functionalization was carried out systematically, either with covalently bonded molecules or with non-bonded ones, focusing the following efforts on the covalently bounded species, revealed as the most efficient linkers. To study transverse impact behavior by using classical MD approach , Large-scale Atomic/Molecular Massively Parallel Simulator (LAMMPS) software, that is well-known among most researchers, is employed. The simulation is done through predefined commands in LAMMPS. Generally these commands (atom style, pair style, angle style, dihedral style, improper style, kspace style, read data, fix, run, compute and so on) are used to simulate and run the model for the desired outputs. Depends on the particles and model types, suitable inter-atomic potentials (force fields) are considered. The ensembles, constraints and boundary conditions are applied depends upon the problem definition. To do so, atomic creation is needed. Python codes are developed to generate particles which explain atomic arrangement of each model. Each atomic arrangement introduced separately to LAMMPS for simulation. After applying constraints and boundary conditions, LAMMPS also include integrators like velocity-Verlet integrator or Brownian dynamics or other types of integrator to run the simulation and finally the outputs are emerged. The outputs are inspected carefully to appreciate the natural behavior of the problem. Appreciation of natural properties of the materials assist us to design new applicable materials. In investigation on the large deflection of circular and rectangular membranes, which is related to the second part of this thesis, continuum mechanics approach is implemented. Nonlinear F{\"o}ppl membrane theory, which carefully release nonlinear governing equations of motion, is considered to establish the non-linear partial differential equilibrium equations of the membranes under distributed and centric point loads. The Galerkin and energy methods are utilized to solve non-linear partial differential equilibrium equations of circular and rectangular plates respectively. Maximum deflection as well as stress through the film region, which are kinds of issue in many industrial applications, are obtained.}, subject = {Molekulardynamik}, language = {en} } @article{KumariHarirchianLahmeretal., author = {Kumari, Vandana and Harirchian, Ehsan and Lahmer, Tom and Rasulzade, Shahla}, title = {Evaluation of Machine Learning and Web-Based Process for Damage Score Estimation of Existing Buildings}, series = {Buildings}, volume = {2022}, journal = {Buildings}, number = {Volume 12, issue 5, article 578}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/buildings12050578}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220509-46387}, pages = {1 -- 23}, abstract = {The seismic vulnerability assessment of existing reinforced concrete (RC) buildings is a significant source of disaster mitigation plans and rescue services. Different countries evolved various Rapid Visual Screening (RVS) techniques and methodologies to deal with the devastating consequences of earthquakes on the structural characteristics of buildings and human casualties. Artificial intelligence (AI) methods, such as machine learning (ML) algorithm-based methods, are increasingly used in various scientific and technical applications. The investigation toward using these techniques in civil engineering applications has shown encouraging results and reduced human intervention, including uncertainties and biased judgment. In this study, several known non-parametric algorithms are investigated toward RVS using a dataset employing different earthquakes. Moreover, the methodology encourages the possibility of examining the buildings' vulnerability based on the factors related to the buildings' importance and exposure. In addition, a web-based application built on Django is introduced. The interface is designed with the idea to ease the seismic vulnerability investigation in real-time. The concept was validated using two case studies, and the achieved results showed the proposed approach's potential efficiency}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Liu, author = {Liu, Bokai}, title = {Stochastic multiscale modeling of polymeric nanocomposites using Data-driven techniques}, doi = {10.25643/bauhaus-universitaet.4637}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220503-46379}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {134}, abstract = {In recent years, lightweight materials, such as polymer composite materials (PNCs) have been studied and developed due to their excellent physical and chemical properties. Structures composed of these composite materials are widely used in aerospace engineering structures, automotive components, and electrical devices. The excellent and outstanding mechanical, thermal, and electrical properties of Carbon nanotube (CNT) make it an ideal filler to strengthen polymer materials' comparable properties. The heat transfer of composite materials has very promising engineering applications in many fields, especially in electronic devices and energy storage equipment. It is essential in high-energy density systems since electronic components need heat dissipation functionality. Or in other words, in electronic devices the generated heat should ideally be dissipated by light and small heat sinks. Polymeric composites consist of fillers embedded in a polymer matrix, the first ones will significantly affect the overall (macroscopic) performance of the material. There are many common carbon-based fillers such as single-walled carbon nanotubes (SWCNT), multi-walled carbon nanotubes (MWCNT), carbon nanobuds (CNB), fullerene, and graphene. Additives inside the matrix have become a popular subject for researchers. Some extraordinary characters, such as high-performance load, lightweight design, excellent chemical resistance, easy processing, and heat transfer, make the design of polymeric nanotube composites (PNCs) flexible. Due to the reinforcing effects with different fillers on composite materials, it has a higher degree of freedom and can be designed for the structure according to specific applications' needs. As already stated, our research focus will be on SWCNT enhanced PNCs. Since experiments are timeconsuming, sometimes expensive and cannot shed light into phenomena taking place for instance at the interfaces/interphases of composites, they are often complemented through theoretical and computational analysis. While most studies are based on deterministic approaches, there is a comparatively lower number of stochastic methods accounting for uncertainties in the input parameters. In deterministic models, the output of the model is fully determined by the parameter values and the initial conditions. However, uncertainties in the input parameters such as aspect ratio, volume fraction, thermal properties of fiber and matrix need to be taken into account for reliable predictions. In this research, a stochastic multiscale method is provided to study the influence of numerous uncertain input parameters on the thermal conductivity of the composite. Therefore, a hierarchical multi-scale method based on computational homogenization is presented in to predict the macroscopic thermal conductivity based on the fine-scale structure. In order to study the inner mechanism, we use the finite element method and employ surrogate models to conduct a Global Sensitivity Analysis (GSA). The SA is performed in order to quantify the influence of the conductivity of the fiber, matrix, Kapitza resistance, volume fraction and aspect ratio on the macroscopic conductivity. Therefore, we compute first-order and total-effect sensitivity indices with different surrogate models. As stochastic multiscale models are computational expensive, surrogate approaches are commonly exploited. With the emergence of high performance computing and artificial intelligence, machine learning has become a popular modeling tool for numerous applications. Machine learning (ML) is commonly used in regression and maps data through specific rules with algorithms to build input and output models. They are particularly useful for nonlinear input-output relationships when sufficient data is available. ML has also been used in the design of new materials and multiscale analysis. For instance, Artificial neural networks and integrated learning seem to be ideally for such a task. They can theoretically simulate any non-linear relationship through the connection of neurons. Mapping relationships are employed to carry out data-driven simulations of inputs and outputs in stochastic modeling. This research aims to develop a stochastic multi-scale computational models of PNCs in heat transfer. Multi-scale stochastic modeling with uncertainty analysis and machine learning methods consist of the following components: -Uncertainty Analysis. A surrogate based global sensitivity analysis is coupled with a hierarchical multi-scale method employing computational homogenization. The effect of the conductivity of the fibers and the matrix, the Kapitza resistance, volume fraction and aspect ratio on the 'macroscopic' conductivity of the composite is systematically studied. All selected surrogate models yield consistently the conclusions that the most influential input parameters are the aspect ratio followed by the volume fraction. The Kapitza Resistance has no significant effect on the thermal conductivity of the PNCs. The most accurate surrogate model in terms of the R2 value is the moving least square (MLS). -Hybrid Machine Learning Algorithms. A combination of artificial neural network (ANN) and particle swarm optimization (PSO) is applied to estimate the relationship between variable input and output parameters. The ANN is used for modeling the composite while PSO improves the prediction performance through an optimized global minimum search. The thermal conductivity of the fibers and the matrix, the kapitza resistance, volume fraction and aspect ratio are selected as input parameters. The output is the macroscopic (homogenized) thermal conductivity of the composite. The results show that the PSO significantly improves the predictive ability of this hybrid intelligent algorithm, which outperforms traditional neural networks. -Stochastic Integrated Machine Learning. A stochastic integrated machine learning based multiscale approach for the prediction of the macroscopic thermal conductivity in PNCs is developed. Seven types of machine learning models are exploited in this research, namely Multivariate Adaptive Regression Splines (MARS), Support Vector Machine (SVM), Regression Tree (RT), Bagging Tree (Bag), Random Forest (RF), Gradient Boosting Machine (GBM) and Cubist. They are used as components of stochastic modeling to construct the relationship between the variable of the inputs' uncertainty and the macroscopic thermal conductivity of PNCs. Particle Swarm Optimization (PSO) is used for hyper-parameter tuning to find the global optimal values leading to a significant reduction in the computational cost. The advantages and disadvantages of various methods are also analyzed in terms of computing time and model complexity to finally give a recommendation for the applicability of different models.}, subject = {Polymere}, language = {en} } @article{ZhangRen, author = {Zhang, Yongzheng and Ren, Huilong}, title = {Implicit implementation of the nonlocal operator method: an open source code}, series = {Engineering with computers}, volume = {2022}, journal = {Engineering with computers}, publisher = {Springer}, address = {London}, doi = {10.1007/s00366-021-01537-x}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220216-45930}, pages = {1 -- 35}, abstract = {In this paper, we present an open-source code for the first-order and higher-order nonlocal operator method (NOM) including a detailed description of the implementation. The NOM is based on so-called support, dual-support, nonlocal operators, and an operate energy functional ensuring stability. The nonlocal operator is a generalization of the conventional differential operators. Combined with the method of weighed residuals and variational principles, NOM establishes the residual and tangent stiffness matrix of operate energy functional through some simple matrix without the need of shape functions as in other classical computational methods such as FEM. NOM only requires the definition of the energy drastically simplifying its implementation. The implementation in this paper is focused on linear elastic solids for sake of conciseness through the NOM can handle more complex nonlinear problems. The NOM can be very flexible and efficient to solve partial differential equations (PDEs), it's also quite easy for readers to use the NOM and extend it to solve other complicated physical phenomena described by one or a set of PDEs. Finally, we present some classical benchmark problems including the classical cantilever beam and plate-with-a-hole problem, and we also make an extension of this method to solve complicated problems including phase-field fracture modeling and gradient elasticity material.}, subject = {Strukturmechanik}, language = {en} } @article{Zhang, author = {Zhang, Yongzheng}, title = {Nonlocal dynamic Kirchhoff plate formulation based on nonlocal operator method}, series = {Engineering with Computers}, volume = {2022}, journal = {Engineering with Computers}, publisher = {Springer}, address = {London}, doi = {10.1007/s00366-021-01587-1}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220209-45849}, pages = {1 -- 35}, abstract = {In this study, we propose a nonlocal operator method (NOM) for the dynamic analysis of (thin) Kirchhoff plates. The nonlocal Hessian operator is derived based on a second-order Taylor series expansion. The NOM does not require any shape functions and associated derivatives as 'classical' approaches such as FEM, drastically facilitating the implementation. Furthermore, NOM is higher order continuous, which is exploited for thin plate analysis that requires C1 continuity. The nonlocal dynamic governing formulation and operator energy functional for Kirchhoff plates are derived from a variational principle. The Verlet-velocity algorithm is used for the time discretization. After confirming the accuracy of the nonlocal Hessian operator, several numerical examples are simulated by the nonlocal dynamic Kirchhoff plate formulation.}, subject = {Angewandte Mathematik}, language = {en} } @article{RabczukGuoZhuangetal., author = {Rabczuk, Timon and Guo, Hongwei and Zhuang, Xiaoying and Chen, Pengwan and Alajlan, Naif}, title = {Stochastic deep collocation method based on neural architecture search and transfer learning for heterogeneous porous media}, series = {Engineering with Computers}, volume = {2022}, journal = {Engineering with Computers}, publisher = {Springer}, address = {London}, doi = {10.1007/s00366-021-01586-2}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220209-45835}, pages = {1 -- 26}, abstract = {We present a stochastic deep collocation method (DCM) based on neural architecture search (NAS) and transfer learning for heterogeneous porous media. We first carry out a sensitivity analysis to determine the key hyper-parameters of the network to reduce the search space and subsequently employ hyper-parameter optimization to finally obtain the parameter values. The presented NAS based DCM also saves the weights and biases of the most favorable architectures, which is then used in the fine-tuning process. We also employ transfer learning techniques to drastically reduce the computational cost. The presented DCM is then applied to the stochastic analysis of heterogeneous porous material. Therefore, a three dimensional stochastic flow model is built providing a benchmark to the simulation of groundwater flow in highly heterogeneous aquifers. The performance of the presented NAS based DCM is verified in different dimensions using the method of manufactured solutions. We show that it significantly outperforms finite difference methods in both accuracy and computational cost.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Valizadeh, author = {Valizadeh, Navid}, title = {Developments in Isogeometric Analysis and Application to High-Order Phase-Field Models of Biomembranes}, doi = {10.25643/bauhaus-universitaet.4565}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220114-45658}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Isogeometric analysis (IGA) is a numerical method for solving partial differential equations (PDEs), which was introduced with the aim of integrating finite element analysis with computer-aided design systems. The main idea of the method is to use the same spline basis functions which describe the geometry in CAD systems for the approximation of solution fields in the finite element method (FEM). Originally, NURBS which is a standard technology employed in CAD systems was adopted as basis functions in IGA but there were several variants of IGA using other technologies such as T-splines, PHT splines, and subdivision surfaces as basis functions. In general, IGA offers two key advantages over classical FEM: (i) by describing the CAD geometry exactly using smooth, high-order spline functions, the mesh generation process is simplified and the interoperability between CAD and FEM is improved, (ii) IGA can be viewed as a high-order finite element method which offers basis functions with high inter-element continuity and therefore can provide a primal variational formulation of high-order PDEs in a straightforward fashion. The main goal of this thesis is to further advance isogeometric analysis by exploiting these major advantages, namely precise geometric modeling and the use of smooth high-order splines as basis functions, and develop robust computational methods for problems with complex geometry and/or complex multi-physics. As the first contribution of this thesis, we leverage the precise geometric modeling of isogeometric analysis and propose a new method for its coupling with meshfree discretizations. We exploit the strengths of both methods by using IGA to provide a smooth, geometrically-exact surface discretization of the problem domain boundary, while the Reproducing Kernel Particle Method (RKPM) discretization is used to provide the volumetric discretization of the domain interior. The coupling strategy is based upon the higher-order consistency or reproducing conditions that are directly imposed in the physical domain. The resulting coupled method enjoys several favorable features: (i) it preserves the geometric exactness of IGA, (ii) it circumvents the need for global volumetric parameterization of the problem domain, (iii) it achieves arbitrary-order approximation accuracy while preserving higher-order smoothness of the discretization. Several numerical examples are solved to show the optimal convergence properties of the coupled IGA-RKPM formulation, and to demonstrate its effectiveness in constructing volumetric discretizations for complex-geometry objects. As for the next contribution, we exploit the use of smooth, high-order spline basis functions in IGA to solve high-order surface PDEs governing the morphological evolution of vesicles. These governing equations are often consisted of geometric PDEs, high-order PDEs on stationary or evolving surfaces, or a combination of them. We propose an isogeometric formulation for solving these PDEs. In the context of geometric PDEs, we consider phase-field approximations of mean curvature flow and Willmore flow problems and numerically study the convergence behavior of isogeometric analysis for these problems. As a model problem for high-order PDEs on stationary surfaces, we consider the Cahn-Hilliard equation on a sphere, where the surface is modeled using a phase-field approach. As for the high-order PDEs on evolving surfaces, a phase-field model of a deforming multi-component vesicle, which consists of two fourth-order nonlinear PDEs, is solved using the isogeometric analysis in a primal variational framework. Through several numerical examples in 2D, 3D and axisymmetric 3D settings, we show the robustness of IGA for solving the considered phase-field models. Finally, we present a monolithic, implicit formulation based on isogeometric analysis and generalized-alpha time integration for simulating hydrodynamics of vesicles according to a phase-field model. Compared to earlier works, the number of equations of the phase-field model which need to be solved is reduced by leveraging high continuity of NURBS functions, and the algorithm is extended to 3D settings. We use residual-based variational multi-scale method (RBVMS) for solving Navier-Stokes equations, while the rest of PDEs in the phase-field model are treated using a standard Galerkin-based IGA. We introduce the resistive immersed surface (RIS) method into the formulation which can be employed for an implicit description of complex geometries using a diffuse-interface approach. The implementation highlights the robustness of the RBVMS method for Navier-Stokes equations of incompressible flows with non-trivial localized forcing terms including bending and tension forces of the vesicle. The potential of the phase-field model and isogeometric analysis for accurate simulation of a variety of fluid-vesicle interaction problems in 2D and 3D is demonstrated.}, subject = {Phasenfeldmodell}, language = {en} } @article{SchmidtLahmer, author = {Schmidt, Albrecht and Lahmer, Tom}, title = {Efficient domain decomposition based reliability analysis for polymorphic uncertain material parameters}, series = {Proceedings in Applied Mathematics \& Mechanics}, volume = {2021}, journal = {Proceedings in Applied Mathematics \& Mechanics}, number = {Volume 21, issue 1}, publisher = {Wiley-VHC}, address = {Weinheim}, doi = {10.1002/pamm.202100014}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220112-45563}, pages = {1 -- 4}, abstract = {Realistic uncertainty description incorporating aleatoric and epistemic uncertainties can be described within the framework of polymorphic uncertainty, which is computationally demanding. Utilizing a domain decomposition approach for random field based uncertainty models the proposed level-based sampling method can reduce these computational costs significantly and shows good agreement with a standard sampling technique. While 2-level configurations tend to get unstable with decreasing sampling density 3-level setups show encouraging results for the investigated reliability analysis of a structural unit square.}, subject = {Polymorphie}, language = {en} } @phdthesis{Wang, author = {Wang, Jiasheng}, title = {Lebensdauerabsch{\"a}tzung von Bauteilen aus globularem Grauguss auf der Grundlage der lokalen gießprozessabh{\"a}ngigen Werkstoffzust{\"a}nde}, doi = {10.25643/bauhaus-universitaet.4554}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220111-45542}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {165}, abstract = {Das Ziel der Arbeit ist, eine m{\"o}gliche Verbesserung der G{\"u}te der Lebensdauervorhersage f{\"u}r Gusseisenwerkstoffe mit Kugelgraphit zu erreichen, wobei die Gießprozesse verschiedener Hersteller ber{\"u}cksichtigt werden. Im ersten Schritt wurden Probenk{\"o}rper aus GJS500 und GJS600 von mehreren Gusslieferanten gegossen und daraus Schwingproben erstellt. Insgesamt wurden Schwingfestigkeitswerte der einzelnen gegossenen Proben sowie der Proben des Bauteils von verschiedenen Gussherstellern weltweit entweder durch direkte Schwingversuche oder durch eine Sammlung von Betriebsfestigkeitsversuchen bestimmt. Dank der metallografischen Arbeit und Korrelationsanalyse konnten drei wesentliche Parameter zur Bestimmung der lokalen Dauerfestigkeit festgestellt werden: 1. statische Festigkeit, 2. Ferrit- und Perlitanteil der Mikrostrukturen und 3. Kugelgraphitanzahl pro Fl{\"a}cheneinheit. Basierend auf diesen Erkenntnissen wurde ein neues Festigkeitsverh{\"a}ltnisdiagramm (sogenanntes Sd/Rm-SG-Diagramm) entwickelt. Diese neue Methodik sollte vor allem erm{\"o}glichen, die Bauteildauerfestigkeit auf der Grundlage der gemessenen oder durch eine Gießsimulation vorhersagten lokalen Zugfestigkeitswerte sowie Mikrogef{\"u}genstrukturen besser zu prognostizieren. Mithilfe der Versuche sowie der Gießsimulation ist es gelungen, unterschiedliche Methoden der Lebensdauervorhersage unter Ber{\"u}cksichtigung der Herstellungsprozesse weiterzuentwickeln.}, subject = {Grauguss}, language = {de} } @article{MeiabadiMoradiKaramimoghadametal., author = {Meiabadi, Mohammad Saleh and Moradi, Mahmoud and Karamimoghadam, Mojtaba and Ardabili, Sina and Bodaghi, Mahdi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Modeling the Producibility of 3D Printing in Polylactic Acid Using Artificial Neural Networks and Fused Filament Fabrication}, series = {polymers}, volume = {2021}, journal = {polymers}, number = {Volume 13, issue 19, article 3219}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/polym13193219}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220110-45518}, pages = {1 -- 21}, abstract = {Polylactic acid (PLA) is a highly applicable material that is used in 3D printers due to some significant features such as its deformation property and affordable cost. For improvement of the end-use quality, it is of significant importance to enhance the quality of fused filament fabrication (FFF)-printed objects in PLA. The purpose of this investigation was to boost toughness and to reduce the production cost of the FFF-printed tensile test samples with the desired part thickness. To remove the need for numerous and idle printing samples, the response surface method (RSM) was used. Statistical analysis was performed to deal with this concern by considering extruder temperature (ET), infill percentage (IP), and layer thickness (LT) as controlled factors. The artificial intelligence method of artificial neural network (ANN) and ANN-genetic algorithm (ANN-GA) were further developed to estimate the toughness, part thickness, and production-cost-dependent variables. Results were evaluated by correlation coefficient and RMSE values. According to the modeling results, ANN-GA as a hybrid machine learning (ML) technique could enhance the accuracy of modeling by about 7.5, 11.5, and 4.5\% for toughness, part thickness, and production cost, respectively, in comparison with those for the single ANN method. On the other hand, the optimization results confirm that the optimized specimen is cost-effective and able to comparatively undergo deformation, which enables the usability of printed PLA objects.}, subject = {3D-Druck}, language = {en} } @article{RenZhuangOterkusetal., author = {Ren, Huilong and Zhuang, Xiaoying and Oterkus, Erkan and Zhu, Hehua and Rabczuk, Timon}, title = {Nonlocal strong forms of thin plate, gradient elasticity, magneto-electro-elasticity and phase-field fracture by nonlocal operator method}, series = {Engineering with Computers}, volume = {2021}, journal = {Engineering with Computers}, doi = {10.1007/s00366-021-01502-8}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211207-45388}, pages = {1 -- 22}, abstract = {The derivation of nonlocal strong forms for many physical problems remains cumbersome in traditional methods. In this paper, we apply the variational principle/weighted residual method based on nonlocal operator method for the derivation of nonlocal forms for elasticity, thin plate, gradient elasticity, electro-magneto-elasticity and phase-field fracture method. The nonlocal governing equations are expressed as an integral form on support and dual-support. The first example shows that the nonlocal elasticity has the same form as dual-horizon non-ordinary state-based peridynamics. The derivation is simple and general and it can convert efficiently many local physical models into their corresponding nonlocal forms. In addition, a criterion based on the instability of the nonlocal gradient is proposed for the fracture modelling in linear elasticity. Several numerical examples are presented to validate nonlocal elasticity and the nonlocal thin plate.}, subject = {Bruchmechanik}, language = {en} } @article{AlkamLahmer, author = {Alkam, Feras and Lahmer, Tom}, title = {A robust method of the status monitoring of catenary poles installed along high-speed electrified train tracks}, series = {Results in Engineering}, volume = {2021}, journal = {Results in Engineering}, number = {volume 12, article 100289}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.rineng.2021.100289}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211011-45212}, pages = {1 -- 8}, abstract = {Electric trains are considered one of the most eco-friendly and safest means of transportation. Catenary poles are used worldwide to support overhead power lines for electric trains. The performance of the catenary poles has an extensive influence on the integrity of the train systems and, consequently, the connected human services. It became a must nowadays to develop SHM systems that provide the instantaneous status of catenary poles in- service, making the decision-making processes to keep or repair the damaged poles more feasible. This study develops a data-driven, model-free approach for status monitoring of cantilever structures, focusing on pre-stressed, spun-cast ultrahigh-strength concrete catenary poles installed along high-speed train tracks. The pro-posed approach evaluates multiple damage features in an unfied damage index, which leads to straightforward interpretation and comparison of the output. Besides, it distinguishes between multiple damage scenarios of the poles, either the ones caused by material degradation of the concrete or by the cracks that can be propagated during the life span of the given structure. Moreover, using a logistic function to classify the integrity of structure avoids the expensive learning step in the existing damage detection approaches, namely, using the modern machine and deep learning methods. The findings of this study look very promising when applied to other types of cantilever structures, such as the poles that support the power transmission lines, antenna masts, chimneys, and wind turbines.}, subject = {Fahrleitung}, language = {en} } @phdthesis{Mauludin, author = {Mauludin, Luthfi Muhammad}, title = {Computational Modeling of Fracture in Encapsulation-Based Self-Healing Concrete Using Cohesive Elements}, doi = {10.25643/bauhaus-universitaet.4520}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211008-45204}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {170}, abstract = {Encapsulation-based self-healing concrete has received a lot of attention nowadays in civil engineering field. These capsules are embedded in the cementitious matrix during concrete mixing. When the cracks appear, the embedded capsules which are placed along the path of incoming crack are fractured and then release of healing agents in the vicinity of damage. The materials of capsules need to be designed in a way that they should be able to break with small deformation, so the internal fluid can be released to seal the crack. This study focuses on computational modeling of fracture in encapsulation-based selfhealing concrete. The numerical model of 2D and 3D with randomly packed aggreates and capsules have been developed to analyze fracture mechanism that plays a significant role in the fracture probability of capsules and consequently the self-healing process. The capsules are assumed to be made of Poly Methyl Methacrylate (PMMA) and the potential cracks are represented by pre-inserted cohesive elements with tension and shear softening laws along the element boundaries of the mortar matrix, aggregates, capsules, and at the interfaces between these phases. The effects of volume fraction, core-wall thickness ratio, and mismatch fracture properties of capsules on the load carrying capacity of self-healing concrete and fracture probability of the capsules are investigated. The output of this study will become valuable tool to assist not only the experimentalists but also the manufacturers in designing an appropriate capsule material for self-healing concrete.}, subject = {beton}, language = {en} } @unpublished{KhosraviSheikhKhozaniCooper, author = {Khosravi, Khabat and Sheikh Khozani, Zohreh and Cooper, James R.}, title = {Predicting stable gravel-bed river hydraulic geometry: A test of novel, advanced, hybrid data mining algorithms}, volume = {2021}, doi = {10.25643/bauhaus-universitaet.4499}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211004-44998}, abstract = {Accurate prediction of stable alluvial hydraulic geometry, in which erosion and sedimentation are in equilibrium, is one of the most difficult but critical topics in the field of river engineering. Data mining algorithms have been gaining more attention in this field due to their high performance and flexibility. However, an understanding of the potential for these algorithms to provide fast, cheap, and accurate predictions of hydraulic geometry is lacking. This study provides the first quantification of this potential. Using at-a-station field data, predictions of flow depth, water-surface width and longitudinal water surface slope are made using three standalone data mining techniques -, Instance-based Learning (IBK), KStar, Locally Weighted Learning (LWL) - along with four types of novel hybrid algorithms in which the standalone models are trained with Vote, Attribute Selected Classifier (ASC), Regression by Discretization (RBD), and Cross-validation Parameter Selection (CVPS) algorithms (Vote-IBK, Vote-Kstar, Vote-LWL, ASC-IBK, ASC-Kstar, ASC-LWL, RBD-IBK, RBD-Kstar, RBD-LWL, CVPSIBK, CVPS-Kstar, CVPS-LWL). Through a comparison of their predictive performance and a sensitivity analysis of the driving variables, the results reveal: (1) Shield stress was the most effective parameter in the prediction of all geometry dimensions; (2) hybrid models had a higher prediction power than standalone data mining models, empirical equations and traditional machine learning algorithms; (3) Vote-Kstar model had the highest performance in predicting depth and width, and ASC-Kstar in estimating slope, each providing very good prediction performance. Through these algorithms, the hydraulic geometry of any river can potentially be predicted accurately and with ease using just a few, readily available flow and channel parameters. Thus, the results reveal that these models have great potential for use in stable channel design in data poor catchments, especially in developing nations where technical modelling skills and understanding of the hydraulic and sediment processes occurring in the river system may be lacking.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Rasulzade, Shahla and Lahmer, Tom and Raj Das, Rohan}, title = {A Synthesized Study Based on Machine Learning Approaches for Rapid Classifying Earthquake Damage Grades to RC Buildings}, series = {Applied Sciences}, volume = {2021}, journal = {Applied Sciences}, number = {Volume 11, issue 16, article 7540}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app11167540}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210818-44853}, pages = {1 -- 33}, abstract = {A vast number of existing buildings were constructed before the development and enforcement of seismic design codes, which run into the risk of being severely damaged under the action of seismic excitations. This poses not only a threat to the life of people but also affects the socio-economic stability in the affected area. Therefore, it is necessary to assess such buildings' present vulnerability to make an educated decision regarding risk mitigation by seismic strengthening techniques such as retrofitting. However, it is economically and timely manner not feasible to inspect, repair, and augment every old building on an urban scale. As a result, a reliable rapid screening methods, namely Rapid Visual Screening (RVS), have garnered increasing interest among researchers and decision-makers alike. In this study, the effectiveness of five different Machine Learning (ML) techniques in vulnerability prediction applications have been investigated. The damage data of four different earthquakes from Ecuador, Haiti, Nepal, and South Korea, have been utilized to train and test the developed models. Eight performance modifiers have been implemented as variables with a supervised ML. The investigations on this paper illustrate that the assessed vulnerability classes by ML techniques were very close to the actual damage levels observed in the buildings.}, subject = {Maschinelles Lernen}, language = {en} } @article{NooriMortazaviKeshtkarietal., author = {Noori, Hamidreza and Mortazavi, Bohayra and Keshtkari, Leila and Zhuang, Xiaoying and Rabczuk, Timon}, title = {Nanopore creation in MoS2 and graphene monolayers by nanoparticles impact: a reactive molecular dynamics study}, series = {Applied Physics A}, volume = {2021}, journal = {Applied Physics A}, number = {volume 127, article 541}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s00339-021-04693-5}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44756}, pages = {1 -- 13}, abstract = {In this work, extensive reactive molecular dynamics simulations are conducted to analyze the nanopore creation by nano-particles impact over single-layer molybdenum disulfide (MoS2) with 1T and 2H phases. We also compare the results with graphene monolayer. In our simulations, nanosheets are exposed to a spherical rigid carbon projectile with high initial velocities ranging from 2 to 23 km/s. Results for three different structures are compared to examine the most critical factors in the perforation and resistance force during the impact. To analyze the perforation and impact resistance, kinetic energy and displacement time history of the projectile as well as perforation resistance force of the projectile are investigated. Interestingly, although the elasticity module and tensile strength of the graphene are by almost five times higher than those of MoS2, the results demonstrate that 1T and 2H-MoS2 phases are more resistive to the impact loading and perforation than graphene. For the MoS2nanosheets, we realize that the 2H phase is more resistant to impact loading than the 1T counterpart. Our reactive molecular dynamics results highlight that in addition to the strength and toughness, atomic structure is another crucial factor that can contribute substantially to impact resistance of 2D materials. The obtained results can be useful to guide the experimental setups for the nanopore creation in MoS2or other 2D lattices.}, subject = {Nanomechanik}, language = {en} } @phdthesis{KhademiZahedi, author = {Khademi Zahedi, Reza}, title = {Stress Distribution in Buried Defective PE Pipes and Crack Propagation in Nanosheets}, doi = {10.25643/bauhaus-universitaet.4481}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210803-44814}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {235}, abstract = {Buried PE pipelines are the main choice for transporting hazardous hydrocarbon fluids and are used in urban gas distribution networks. Molecular dynamics (MD) simulations used to investigate material behavior at nanoscale.}, subject = {Gasleitung}, language = {en} } @misc{Habtemariam, type = {Master Thesis}, author = {Habtemariam, Abinet Kifle}, title = {Numerical Demolition Analysis of a Slender Guyed Antenna Mast}, doi = {10.25643/bauhaus-universitaet.4460}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210723-44609}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {75}, abstract = {The main purpose of the thesis is to ensure the safe demolition of old guyed antenna masts that are located in different parts of Germany. The major problem in demolition of this masts is the falling down of the masts in unexpected direction because of buckling problem. The objective of this thesis is development of a numerical models using finite element method (FEM) and assuring a controlled collapse by coming up with different time setups for the detonation of explosives which are responsible for cutting down the cables. The result of this thesis will avoid unexpected outcomes during the demolition processes and prevent risk of collapsing of the mast over near by structures.}, subject = {Abbruch}, language = {en} } @phdthesis{Alkam, author = {Alkam, Feras}, title = {Vibration-based Monitoring of Concrete Catenary Poles using Bayesian Inference}, volume = {2021}, publisher = {Bauhaus-Universit{\"a}tsverlag}, address = {Weimar}, doi = {10.25643/bauhaus-universitaet.4433}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210526-44338}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {177}, abstract = {This work presents a robust status monitoring approach for detecting damage in cantilever structures based on logistic functions. Also, a stochastic damage identification approach based on changes of eigenfrequencies is proposed. The proposed algorithms are verified using catenary poles of electrified railways track. The proposed damage features overcome the limitation of frequency-based damage identification methods available in the literature, which are valid to detect damage in structures to Level 1 only. Changes in eigenfrequencies of cantilever structures are enough to identify possible local damage at Level 3, i.e., to cover damage detection, localization, and quantification. The proposed algorithms identified the damage with relatively small errors, even at a high noise level.}, subject = {Parameteridentifikation}, language = {en} } @article{AlkamLahmer, author = {Alkam, Feras and Lahmer, Tom}, title = {Eigenfrequency-Based Bayesian Approach for Damage Identification in Catenary Poles}, series = {Infrastructures}, volume = {2021}, journal = {Infrastructures}, number = {Volume 6, issue 4, article 57}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/infrastructures6040057}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210510-44256}, pages = {1 -- 19}, abstract = {This study proposes an efficient Bayesian, frequency-based damage identification approach to identify damages in cantilever structures with an acceptable error rate, even at high noise levels. The catenary poles of electric high-speed train systems were selected as a realistic case study to cover the objectives of this study. Compared to other frequency-based damage detection approaches described in the literature, the proposed approach is efficiently able to detect damages in cantilever structures to higher levels of damage detection, namely identifying both the damage location and severity using a low-cost structural health monitoring (SHM) system with a limited number of sensors; for example, accelerometers. The integration of Bayesian inference, as a stochastic framework, in the proposed approach, makes it possible to utilize the benefit of data fusion in merging the informative data from multiple damage features, which increases the quality and accuracy of the results. The findings provide the decision-maker with the information required to manage the maintenance, repair, or replacement procedures.}, subject = {Fahrleitung}, language = {en} } @article{LashkarAraKalantariSheikhKhozanietal., author = {Lashkar-Ara, Babak and Kalantari, Niloofar and Sheikh Khozani, Zohreh and Mosavi, Amir}, title = {Assessing Machine Learning versus a Mathematical Model to Estimate the Transverse Shear Stress Distribution in a Rectangular Channel}, series = {Mathematics}, volume = {2021}, journal = {Mathematics}, number = {Volume 9, Issue 6, Article 596}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math9060596}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210504-44197}, pages = {15}, abstract = {One of the most important subjects of hydraulic engineering is the reliable estimation of the transverse distribution in the rectangular channel of bed and wall shear stresses. This study makes use of the Tsallis entropy, genetic programming (GP) and adaptive neuro-fuzzy inference system (ANFIS) methods to assess the shear stress distribution (SSD) in the rectangular channel. To evaluate the results of the Tsallis entropy, GP and ANFIS models, laboratory observations were used in which shear stress was measured using an optimized Preston tube. This is then used to measure the SSD in various aspect ratios in the rectangular channel. To investigate the shear stress percentage, 10 data series with a total of 112 different data for were used. The results of the sensitivity analysis show that the most influential parameter for the SSD in smooth rectangular channel is the dimensionless parameter B/H, Where the transverse coordinate is B, and the flow depth is H. With the parameters (b/B), (B/H) for the bed and (z/H), (B/H) for the wall as inputs, the modeling of the GP was better than the other one. Based on the analysis, it can be concluded that the use of GP and ANFIS algorithms is more effective in estimating shear stress in smooth rectangular channels than the Tsallis entropy-based equations.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Ren, author = {Ren, Huilong}, title = {Dual-horizon peridynamics and Nonlocal operator method}, doi = {10.25643/bauhaus-universitaet.4403}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210412-44039}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {223}, abstract = {In the last two decades, Peridynamics (PD) attracts much attention in the field of fracture mechanics. One key feature of PD is the nonlocality, which is quite different from the ideas in conventional methods such as FEM and meshless method. However, conventional PD suffers from problems such as constant horizon, explicit algorithm, hourglass mode. In this thesis, by examining the nonlocality with scrutiny, we proposed several new concepts such as dual-horizon (DH) in PD, dual-support (DS) in smoothed particle hydrodynamics (SPH), nonlocal operators and operator energy functional. The conventional PD (SPH) is incorporated in the DH-PD (DS-SPH), which can adopt an inhomogeneous discretization and inhomogeneous support domains. The DH-PD (DS-SPH) can be viewed as some fundamental improvement on the conventional PD (SPH). Dual formulation of PD and SPH allows h-adaptivity while satisfying the conservations of linear momentum, angular momentum and energy. By developing the concept of nonlocality further, we introduced the nonlocal operator method as a generalization of DH-PD. Combined with energy functional of various physical models, the nonlocal forms based on dual-support concept are derived. In addition, the variation of the energy functional allows implicit formulation of the nonlocal theory. At last, we developed the higher order nonlocal operator method which is capable of solving higher order partial differential equations on arbitrary domain in higher dimensional space. Since the concepts are developed gradually, we described our findings chronologically. In chapter 2, we developed a DH-PD formulation that includes varying horizon sizes and solves the "ghost force" issue. The concept of dual-horizon considers the unbalanced interactions between the particles with different horizon sizes. The present formulation fulfills both the balances of linear momentum and angular momentum exactly with arbitrary particle discretization. All three peridynamic formulations, namely bond based, ordinary state based and non-ordinary state based peridynamics can be implemented within the DH-PD framework. A simple adaptive refinement procedure (h-adaptivity) is proposed reducing the computational cost. Both two- and three- dimensional examples including the Kalthoff-Winkler experiment and plate with branching cracks are tested to demonstrate the capability of the method. In chapter 3, a nonlocal operator method (NOM) based on the variational principle is proposed for the solution of waveguide problem in computational electromagnetic field. Common differential operators as well as the variational forms are defined within the context of nonlocal operators. The present nonlocal formulation allows the assembling of the tangent stiffness matrix with ease, which is necessary for the eigenvalue analysis of the waveguide problem. The present formulation is applied to solve 1D Schrodinger equation, 2D electrostatic problem and the differential electromagnetic vector wave equations based on electric fields. In chapter 4, a general nonlocal operator method is proposed which is applicable for solving partial differential equations (PDEs) of mechanical problems. The nonlocal operator can be regarded as the integral form, ``equivalent'' to the differential form in the sense of a nonlocal interaction model. The variation of a nonlocal operator plays an equivalent role as the derivatives of the shape functions in the meshless methods or those of the finite element method. Based on the variational principle, the residual and the tangent stiffness matrix can be obtained with ease. The nonlocal operator method is enhanced here also with an operator energy functional to satisfy the linear consistency of the field. A highlight of the present method is the functional derived based on the nonlocal operator can convert the construction of residual and stiffness matrix into a series of matrix multiplications using the predefined nonlocal operators. The nonlocal strong forms of different functionals can be obtained easily via the concept of support and dual-support. Several numerical examples of different types of PDEs are presented. In chapter 5, we extended the NOM to higher order scheme by using a higher order Taylor series expansion of the unknown field. Such a higher order scheme improves the original NOM in chapter 3 and chapter 4, which can only achieve one-order convergence. The higher order NOM obtains all partial derivatives with specified maximal order simultaneously without resorting to shape functions. The functional based on the nonlocal operators converts the construction of residual and stiffness matrix into a series of matrix multiplication on the nonlocal operator matrix. Several numerical examples solved by strong form or weak form are presented to show the capabilities of this method. In chapter 6, the NOM proposed as a particle-based method in chapter 3,4,5, has difficulty in imposing accurately the boundary conditions of various orders. In this paper, we converted the particle-based NOM into a scheme with interpolation property. The new scheme describes partial derivatives of various orders at a point by the nodes in the support and takes advantage of the background mesh for numerical integration. The boundary conditions are enforced via the modified variational principle. The particle-based NOM can be viewed a special case of NOM with interpolation property when nodal integration is used. The scheme based on numerical integration greatly improves the stability of the method, as a consequence, the operator energy functional in particle-based NOM is not required. We demonstrated the capabilities of current method by solving the gradient solid problems and comparing the numerical results with the available exact solutions. In chapter 7, we derived the DS-SPH in solid within the framework of variational principle. The tangent stiffness matrix of SPH can be obtained with ease, and can be served as the basis for the present implicit SPH. We proposed an hourglass energy functional, which allows the direct derivation of hourglass force and hourglass tangent stiffness matrix. The dual-support is {involved} in all derivations based on variational principles and is automatically satisfied in the assembling of stiffness matrix. The implementation of stiffness matrix comprises with two steps, the nodal assembly based on deformation gradient and global assembly on all nodes. Several numerical examples are presented to validate the method.}, subject = {Peridynamik}, language = {en} } @phdthesis{Harirchian, author = {Harirchian, Ehsan}, title = {Improved Rapid Assessment of Earthquake Hazard Safety of Existing Buildings Using a Hierarchical Type-2 Fuzzy Logic Model}, doi = {10.25643/bauhaus-universitaet.4396}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210326-43963}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {143}, abstract = {Although it is impractical to avert subsequent natural disasters, advances in simulation science and seismological studies make it possible to lessen the catastrophic damage. There currently exists in many urban areas a large number of structures, which are prone to damage by earthquakes. These were constructed without the guidance of a national seismic code, either before it existed or before it was enforced. For instance, in Istanbul, Turkey, as a high seismic area, around 90\% of buildings are substandard, which can be generalized into other earthquakeprone regions in Turkey. The reliability of this building stock resulting from earthquake-induced collapse is currently uncertain. Nonetheless, it is also not feasible to perform a detailed seismic vulnerability analysis on each building as a solution to the scenario, as it will be too complicated and expensive. This indicates the necessity of a reliable, rapid, and computationally easy method for seismic vulnerability assessment, commonly known as Rapid Visual Screening (RVS). In RVS methodology, an observational survey of buildings is performed, and according to the data collected during the visual inspection, a structural score is calculated without performing any structural calculations to determine the expected damage of a building and whether the building needs detailed assessment. Although this method might save time and resources due to the subjective/qualitative judgments of experts who performed the inspection, the evaluation process is dominated by vagueness and uncertainties, where the vagueness can be handled adequately through the fuzzy set theory but do not cover all sort of uncertainties due to its crisp membership functions. In this study, a novel method of rapid visual hazard safety assessment of buildings against earthquake is introduced in which an interval type-2 fuzzy logic system (IT2FLS) is used to cover uncertainties. In addition, the proposed method provides the possibility to evaluate the earthquake risk of the building by considering factors related to the building importance and exposure. A smartphone app prototype of the method has been introduced. For validation of the proposed method, two case studies have been selected, and the result of the analysis presents the robust efficiency of the proposed method.}, subject = {Fuzzy-Logik}, language = {en} } @unpublished{KhosraviSheikhKhozaniMao, author = {Khosravi, Khabat and Sheikh Khozani, Zohreh and Mao, Luka}, title = {A comparison between advanced hybrid machine learning algorithms and empirical equations applied to abutment scour depth prediction}, doi = {10.25643/bauhaus-universitaet.4388}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210311-43889}, pages = {43}, abstract = {Complex vortex flow patterns around bridge piers, especially during floods, cause scour process that can result in the failure of foundations. Abutment scour is a complex three-dimensional phenomenon that is difficult to predict especially with traditional formulas obtained using empirical approaches such as regressions. This paper presents a test of a standalone Kstar model with five novel hybrid algorithm of bagging (BA-Kstar), dagging (DA-Kstar), random committee (RC-Kstar), random subspace (RS-Kstar), and weighted instance handler wrapper (WIHWKstar) to predict scour depth (ds) for clear water condition. The dataset consists of 99 scour depth data from flume experiments (Dey and Barbhuiya, 2005) using abutment shapes such as vertical, semicircular and 45◦ wing. Four dimensionless parameter of relative flow depth (h/l), excess abutment Froude number (Fe), relative sediment size (d50/l) and relative submergence (d50/h) were considered for the prediction of relative scour depth (ds/l). A portion of the dataset was used for the calibration (70\%), and the remaining used for model validation. Pearson correlation coefficients helped deciding relevance of the input parameters combination and finally four different combinations of input parameters were used. The performance of the models was assessed visually and with quantitative metrics. Overall, the best input combination for vertical abutment shape is the combination of Fe, d50/l and h/l, while for semicircular and 45◦ wing the combination of the Fe and d50/l is the most effective input parameter combination. Our results show that incorporating Fe, d50/l and h/l lead to higher performance while involving d50/h reduced the models prediction power for vertical abutment shape and for semicircular and 45◦ wing involving h/l and d50/h lead to more error. The WIHW-Kstar provided the highest performance in scour depth prediction around vertical abutment shape while RC-Kstar model outperform of other models for scour depth prediction around semicircular and 45◦ wing.}, subject = {maschinelles Lernen}, language = {en} } @phdthesis{Goswami, author = {Goswami, Somdatta}, title = {Phase field modeling of fracture with isogeometric analysis and machine learning methods}, doi = {10.25643/bauhaus-universitaet.4384}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210304-43841}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {168}, abstract = {This thesis presents the advances and applications of phase field modeling in fracture analysis. In this approach, the sharp crack surface topology in a solid is approximated by a diffusive crack zone governed by a scalar auxiliary variable. The uniqueness of phase field modeling is that the crack paths are automatically determined as part of the solution and no interface tracking is required. The damage parameter varies continuously over the domain. But this flexibility comes with associated difficulties: (1) a very fine spatial discretization is required to represent sharp local gradients correctly; (2) fine discretization results in high computational cost; (3) computation of higher-order derivatives for improved convergence rates and (4) curse of dimensionality in conventional numerical integration techniques. As a consequence, the practical applicability of phase field models is severely limited. The research presented in this thesis addresses the difficulties of the conventional numerical integration techniques for phase field modeling in quasi-static brittle fracture analysis. The first method relies on polynomial splines over hierarchical T-meshes (PHT-splines) in the framework of isogeometric analysis (IGA). An adaptive h-refinement scheme is developed based on the variational energy formulation of phase field modeling. The fourth-order phase field model provides increased regularity in the exact solution of the phase field equation and improved convergence rates for numerical solutions on a coarser discretization, compared to the second-order model. However, second-order derivatives of the phase field are required in the fourth-order model. Hence, at least a minimum of C1 continuous basis functions are essential, which is achieved using hierarchical cubic B-splines in IGA. PHT-splines enable the refinement to remain local at singularities and high gradients, consequently reducing the computational cost greatly. Unfortunately, when modeling complex geometries, multiple parameter spaces (patches) are joined together to describe the physical domain and there is typically a loss of continuity at the patch boundaries. This decrease of smoothness is dictated by the geometry description, where C0 parameterizations are normally used to deal with kinks and corners in the domain. Hence, the application of the fourth-order model is severely restricted. To overcome the high computational cost for the second-order model, we develop a dual-mesh adaptive h-refinement approach. This approach uses a coarser discretization for the elastic field and a finer discretization for the phase field. Independent refinement strategies have been used for each field. The next contribution is based on physics informed deep neural networks. The network is trained based on the minimization of the variational energy of the system described by general non-linear partial differential equations while respecting any given law of physics, hence the name physics informed neural network (PINN). The developed approach needs only a set of points to define the geometry, contrary to the conventional mesh-based discretization techniques. The concept of `transfer learning' is integrated with the developed PINN approach to improve the computational efficiency of the network at each displacement step. This approach allows a numerically stable crack growth even with larger displacement steps. An adaptive h-refinement scheme based on the generation of more quadrature points in the damage zone is developed in this framework. For all the developed methods, displacement-controlled loading is considered. The accuracy and the efficiency of both methods are studied numerically showing that the developed methods are powerful and computationally efficient tools for accurately predicting fractures.}, subject = {Phasenfeldmodell}, language = {en} } @unpublished{SheikhKhozaniKumbhakar, author = {Sheikh Khozani, Zohreh and Kumbhakar, Manotosh}, title = {Discussion of "Estimation of one-dimensional velocity distribution by measuring velocity at two points" by Yeganeh and Heidari (2020)}, doi = {10.25643/bauhaus-universitaet.4366}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210216-43663}, abstract = {The concept of information entropy together with the principle of maximum entropy to open channel flow is essentially based on some physical consideration of the problem under consideration. This paper is a discussion on Yeganeh and Heidari (2020)'s paper, who proposed a new approach for measuring vertical distribution of streamwise velocity in open channels. The discussers argue that their approach is conceptually incorrect and thus leads to a physically unrealistic situation. In addition, the discussers found some wrong mathematical expressions (which are assumed to be typos) written in the paper, and also point out that the authors did not cite some of the original papers on the topic.}, subject = {Geschwindigkeit}, language = {en} } @article{BandJanizadehChandraPaletal., author = {Band, Shahab S. and Janizadeh, Saeid and Chandra Pal, Subodh and Chowdhuri, Indrajit and Siabi, Zhaleh and Norouzi, Akbar and Melesse, Assefa M. and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Comparative Analysis of Artificial Intelligence Models for Accurate Estimation of Groundwater Nitrate Concentration}, series = {Sensors}, volume = {2020}, journal = {Sensors}, number = {Volume 20, issue 20, article 5763}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/s20205763}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43364}, pages = {1 -- 23}, abstract = {Prediction of the groundwater nitrate concentration is of utmost importance for pollution control and water resource management. This research aims to model the spatial groundwater nitrate concentration in the Marvdasht watershed, Iran, based on several artificial intelligence methods of support vector machine (SVM), Cubist, random forest (RF), and Bayesian artificial neural network (Baysia-ANN) machine learning models. For this purpose, 11 independent variables affecting groundwater nitrate changes include elevation, slope, plan curvature, profile curvature, rainfall, piezometric depth, distance from the river, distance from residential, Sodium (Na), Potassium (K), and topographic wetness index (TWI) in the study area were prepared. Nitrate levels were also measured in 67 wells and used as a dependent variable for modeling. Data were divided into two categories of training (70\%) and testing (30\%) for modeling. The evaluation criteria coefficient of determination (R2), mean absolute error (MAE), root mean square error (RMSE), and Nash-Sutcliffe efficiency (NSE) were used to evaluate the performance of the models used. The results of modeling the susceptibility of groundwater nitrate concentration showed that the RF (R2 = 0.89, RMSE = 4.24, NSE = 0.87) model is better than the other Cubist (R2 = 0.87, RMSE = 5.18, NSE = 0.81), SVM (R2 = 0.74, RMSE = 6.07, NSE = 0.74), Bayesian-ANN (R2 = 0.79, RMSE = 5.91, NSE = 0.75) models. The results of groundwater nitrate concentration zoning in the study area showed that the northern parts of the case study have the highest amount of nitrate, which is higher in these agricultural areas than in other areas. The most important cause of nitrate pollution in these areas is agriculture activities and the use of groundwater to irrigate these crops and the wells close to agricultural areas, which has led to the indiscriminate use of chemical fertilizers by irrigation or rainwater of these fertilizers is washed and penetrates groundwater and pollutes the aquifer.}, subject = {Grundwasser}, language = {en} } @article{KarimimoshaverHajivalieiShokrietal., author = {Karimimoshaver, Mehrdad and Hajivaliei, Hatameh and Shokri, Manouchehr and Khalesro, Shakila and Aram, Farshid and Shamshirband, Shahaboddin}, title = {A Model for Locating Tall Buildings through a Visual Analysis Approach}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 17, article 6072}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10176072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43350}, pages = {1 -- 25}, abstract = {Tall buildings have become an integral part of cities despite all their pros and cons. Some current tall buildings have several problems because of their unsuitable location; the problems include increasing density, imposing traffic on urban thoroughfares, blocking view corridors, etc. Some of these buildings have destroyed desirable views of the city. In this research, different criteria have been chosen, such as environment, access, social-economic, land-use, and physical context. These criteria and sub-criteria are prioritized and weighted by the analytic network process (ANP) based on experts' opinions, using Super Decisions V2.8 software. On the other hand, layers corresponding to sub-criteria were made in ArcGIS 10.3 simultaneously, then via a weighted overlay (map algebra), a locating plan was created. In the next step seven hypothetical tall buildings (20 stories), in the best part of the locating plan, were considered to evaluate how much of theses hypothetical buildings would be visible (fuzzy visibility) from the street and open spaces throughout the city. These processes have been modeled by MATLAB software, and the final fuzzy visibility plan was created by ArcGIS. Fuzzy visibility results can help city managers and planners to choose which location is suitable for a tall building and how much visibility may be appropriate. The proposed model can locate tall buildings based on technical and visual criteria in the future development of the city and it can be widely used in any city as long as the criteria and weights are localized.}, subject = {Geb{\"a}ude}, language = {en} } @article{BandJanizadehChandraPaletal., author = {Band, Shahab S. and Janizadeh, Saeid and Chandra Pal, Subodh and Saha, Asish and Chakrabortty, Rabbin and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Novel Ensemble Approach of Deep Learning Neural Network (DLNN) Model and Particle Swarm Optimization (PSO) Algorithm for Prediction of Gully Erosion Susceptibility}, series = {Sensors}, volume = {2020}, journal = {Sensors}, number = {Volume 20, issue 19, article 5609}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/s20195609}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43341}, pages = {1 -- 27}, abstract = {This study aims to evaluate a new approach in modeling gully erosion susceptibility (GES) based on a deep learning neural network (DLNN) model and an ensemble particle swarm optimization (PSO) algorithm with DLNN (PSO-DLNN), comparing these approaches with common artificial neural network (ANN) and support vector machine (SVM) models in Shirahan watershed, Iran. For this purpose, 13 independent variables affecting GES in the study area, namely, altitude, slope, aspect, plan curvature, profile curvature, drainage density, distance from a river, land use, soil, lithology, rainfall, stream power index (SPI), and topographic wetness index (TWI), were prepared. A total of 132 gully erosion locations were identified during field visits. To implement the proposed model, the dataset was divided into the two categories of training (70\%) and testing (30\%). The results indicate that the area under the curve (AUC) value from receiver operating characteristic (ROC) considering the testing datasets of PSO-DLNN is 0.89, which indicates superb accuracy. The rest of the models are associated with optimal accuracy and have similar results to the PSO-DLNN model; the AUC values from ROC of DLNN, SVM, and ANN for the testing datasets are 0.87, 0.85, and 0.84, respectively. The efficiency of the proposed model in terms of prediction of GES was increased. Therefore, it can be concluded that the DLNN model and its ensemble with the PSO algorithm can be used as a novel and practical method to predict gully erosion susceptibility, which can help planners and managers to manage and reduce the risk of this phenomenon.}, subject = {Geoinformatik}, language = {en} } @article{MosaviQasemShokrietal., author = {Mosavi, Amir Hosein and Qasem, Sultan Noman and Shokri, Manouchehr and Band, Shahab S. and Mohammadzadeh, Ardashir}, title = {Fractional-Order Fuzzy Control Approach for Photovoltaic/Battery Systems under Unknown Dynamics, Variable Irradiation and Temperature}, series = {Electronics}, volume = {2020}, journal = {Electronics}, number = {Volume 9, issue 9, article 1455}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/electronics9091455}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43381}, pages = {1 -- 19}, abstract = {For this paper, the problem of energy/voltage management in photovoltaic (PV)/battery systems was studied, and a new fractional-order control system on basis of type-3 (T3) fuzzy logic systems (FLSs) was developed. New fractional-order learning rules are derived for tuning of T3-FLSs such that the stability is ensured. In addition, using fractional-order calculus, the robustness was studied versus dynamic uncertainties, perturbation of irradiation, and temperature and abruptly faults in output loads, and, subsequently, new compensators were proposed. In several examinations under difficult operation conditions, such as random temperature, variable irradiation, and abrupt changes in output load, the capability of the schemed controller was verified. In addition, in comparison with other methods, such as proportional-derivative-integral (PID), sliding mode controller (SMC), passivity-based control systems (PBC), and linear quadratic regulator (LQR), the superiority of the suggested method was demonstrated.}, subject = {Fuzzy-Logik}, language = {en} } @article{MosaviShokriMansoretal., author = {Mosavi, Amir Hosein and Shokri, Manouchehr and Mansor, Zulkefli and Qasem, Sultan Noman and Band, Shahab S. and Mohammadzadeh, Ardashir}, title = {Machine Learning for Modeling the Singular Multi-Pantograph Equations}, series = {Entropy}, volume = {2020}, journal = {Entropy}, number = {volume 22, issue 9, article 1041}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/e22091041}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43436}, pages = {1 -- 18}, abstract = {In this study, a new approach to basis of intelligent systems and machine learning algorithms is introduced for solving singular multi-pantograph differential equations (SMDEs). For the first time, a type-2 fuzzy logic based approach is formulated to find an approximated solution. The rules of the suggested type-2 fuzzy logic system (T2-FLS) are optimized by the square root cubature Kalman filter (SCKF) such that the proposed fineness function to be minimized. Furthermore, the stability and boundedness of the estimation error is proved by novel approach on basis of Lyapunov theorem. The accuracy and robustness of the suggested algorithm is verified by several statistical examinations. It is shown that the suggested method results in an accurate solution with rapid convergence and a lower computational cost.}, subject = {Fuzzy-Regelung}, language = {en} } @article{BandJanizadehSahaetal., author = {Band, Shahab S. and Janizadeh, Saeid and Saha, Sunil and Mukherjee, Kaustuv and Khosrobeigi Bozchaloei, Saeid and Cerd{\`a}, Artemi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Evaluating the Efficiency of Different Regression, Decision Tree, and Bayesian Machine Learning Algorithms in Spatial Piping Erosion Susceptibility Using ALOS/PALSAR Data}, series = {Land}, volume = {2020}, journal = {Land}, number = {volume 9, issue 10, article 346}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/land9100346}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43424}, pages = {1 -- 22}, abstract = {Piping erosion is one form of water erosion that leads to significant changes in the landscape and environmental degradation. In the present study, we evaluated piping erosion modeling in the Zarandieh watershed of Markazi province in Iran based on random forest (RF), support vector machine (SVM), and Bayesian generalized linear models (Bayesian GLM) machine learning algorithms. For this goal, due to the importance of various geo-environmental and soil properties in the evolution and creation of piping erosion, 18 variables were considered for modeling the piping erosion susceptibility in the Zarandieh watershed. A total of 152 points of piping erosion were recognized in the study area that were divided into training (70\%) and validation (30\%) for modeling. The area under curve (AUC) was used to assess the effeciency of the RF, SVM, and Bayesian GLM. Piping erosion susceptibility results indicated that all three RF, SVM, and Bayesian GLM models had high efficiency in the testing step, such as the AUC shown with values of 0.9 for RF, 0.88 for SVM, and 0.87 for Bayesian GLM. Altitude, pH, and bulk density were the variables that had the greatest influence on the piping erosion susceptibility in the Zarandieh watershed. This result indicates that geo-environmental and soil chemical variables are accountable for the expansion of piping erosion in the Zarandieh watershed.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Winkel, author = {Winkel, Benjamin}, title = {A three-dimensional model of skeletal muscle for physiological, pathological and experimental mechanical simulations}, doi = {10.25643/bauhaus-universitaet.4300}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201211-43002}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {In recent decades, a multitude of concepts and models were developed to understand, assess and predict muscular mechanics in the context of physiological and pathological events. Most of these models are highly specialized and designed to selectively address fields in, e.g., medicine, sports science, forensics, product design or CGI; their data are often not transferable to other ranges of application. A single universal model, which covers the details of biochemical and neural processes, as well as the development of internal and external force and motion patterns and appearance could not be practical with regard to the diversity of the questions to be investigated and the task to find answers efficiently. With reasonable limitations though, a generalized approach is feasible. The objective of the work at hand was to develop a model for muscle simulation which covers the phenomenological aspects, and thus is universally applicable in domains where up until now specialized models were utilized. This includes investigations on active and passive motion, structural interaction of muscles within the body and with external elements, for example in crash scenarios, but also research topics like the verification of in vivo experiments and parameter identification. For this purpose, elements for the simulation of incompressible deformations were studied, adapted and implemented into the finite element code SLang. Various anisotropic, visco-elastic muscle models were developed or enhanced. The applicability was demonstrated on the base of several examples, and a general base for the implementation of further material models was developed and elaborated.}, subject = {Biomechanik}, language = {en} } @phdthesis{Rabizadeh, author = {Rabizadeh, Ehsan}, title = {Goal-oriented A Posteriori Error Estimation and Adaptive Mesh Refinement in 2D/3D Thermoelasticity Problems}, doi = {10.25643/bauhaus-universitaet.4286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201113-42864}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {In recent years, substantial attention has been devoted to thermoelastic multifield problems and their numerical analysis. Thermoelasticity is one of the important categories of multifield problems which deals with the effect of mechanical and thermal disturbances on an elastic body. In other words, thermoelasticity encompasses the phenomena that describe the elastic and thermal behavior of solids and their interactions under thermo-mechanical loadings. Since providing an analytical solution for general coupled thermoelasticity problems is mathematically complicated, the development of alternative numerical solution techniques seems essential. Due to the nature of numerical analysis methods, presence of error in results is inevitable, therefore in any numerical simulation, the main concern is the accuracy of the approximation. There are different error estimation (EE) methods to assess the overall quality of numerical approximation. In many real-life numerical simulations, not only the overall error, but also the local error or error in a particular quantity of interest is of main interest. The error estimation techniques which are developed to evaluate the error in the quantity of interest are known as "goal-oriented" error estimation (GOEE) methods. This project, for the first time, investigates the classical a posteriori error estimation and goal-oriented a posteriori error estimation in 2D/3D thermoelasticity problems. Generally, the a posteriori error estimation techniques can be categorized into two major branches of recovery-based and residual-based error estimators. In this research, application of both recovery- and residual-based error estimators in thermoelasticity are studied. Moreover, in order to reduce the error in the quantity of interest efficiently and optimally in 2D and 3D thermoelastic problems, goal-oriented adaptive mesh refinement is performed. As the first application category, the error estimation in classical Thermoelasticity (CTE) is investigated. In the first step, a rh-adaptive thermo-mechanical formulation based on goal-oriented error estimation is proposed.The developed goal-oriented error estimation relies on different stress recovery techniques, i.e., the superconvergent patch recovery (SPR), L2-projection patch recovery (L2-PR), and weighted superconvergent patch recovery (WSPR). Moreover, a new adaptive refinement strategy (ARS) is presented that minimizes the error in a quantity of interest and refines the discretization such that the error is equally distributed in the refined mesh. The method is validated by numerous numerical examples where an analytical solution or reference solution is available. After investigating error estimation in classical thermoelasticity and evaluating the quality of presented error estimators, we extended the application of the developed goal-oriented error estimation and the associated adaptive refinement technique to the classical fully coupled dynamic thermoelasticity. In this part, we present an adaptive method for coupled dynamic thermoelasticity problems based on goal-oriented error estimation. We use dimensionless variables in the finite element formulation and for the time integration we employ the acceleration-based Newmark-_ method. In this part, the SPR, L2-PR, and WSPR recovery methods are exploited to estimate the error in the quantity of interest (QoI). By using adaptive refinement in space, the error in the quantity of interest is minimized. Therefore, the discretization is refined such that the error is equally distributed in the refined mesh. We demonstrate the efficiency of this method by numerous numerical examples. After studying the recovery-based error estimators, we investigated the residual-based error estimation in thermoelasticity. In the last part of this research, we present a 3D adaptive method for thermoelastic problems based on goal-oriented error estimation where the error is measured with respect to a pointwise quantity of interest. We developed a method for a posteriori error estimation and mesh adaptation based on dual weighted residual (DWR) method relying on the duality principles and consisting of an adjoint problem solution. Here, we consider the application of the derived estimator and mesh refinement to two-/three-dimensional (2D/3D) thermo-mechanical multifield problems. In this study, the goal is considered to be given by singular pointwise functions, such as the point value or point value derivative at a specific point of interest (PoI). An adaptive algorithm has been adopted to refine the mesh to minimize the goal in the quantity of interest. The mesh adaptivity procedure based on the DWR method is performed by adaptive local h-refinement/coarsening with allowed hanging nodes. According to the proposed DWR method, the error contribution of each element is evaluated. In the refinement process, the contribution of each element to the goal error is considered as the mesh refinement criterion. In this study, we substantiate the accuracy and performance of this method by several numerical examples with available analytical solutions. Here, 2D and 3D problems under thermo-mechanical loadings are considered as benchmark problems. To show how accurately the derived estimator captures the exact error in the evaluation of the pointwise quantity of interest, in all examples, considering the analytical solutions, the goal error effectivity index as a standard measure of the quality of an estimator is calculated. Moreover, in order to demonstrate the efficiency of the proposed method and show the optimal behavior of the employed refinement method, the results of different conventional error estimators and refinement techniques (e.g., global uniform refinement, Kelly, and weighted Kelly techniques) are used for comparison.}, subject = {Mesh Refinement}, language = {en} }