@inproceedings{OPUS4-2451, title = {International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, series = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, booktitle = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, editor = {G{\"u}rlebeck, Klaus and Lahmer, Tom}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2451}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20150828-24515}, pages = {230}, abstract = {The 20th International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering will be held at the Bauhaus University Weimar from 20th till 22nd July 2015. Architects, computer scientists, mathematicians, and engineers from all over the world will meet in Weimar for an interdisciplinary exchange of experiences, to report on their results in research, development and practice and to discuss. The conference covers a broad range of research areas: numerical analysis, function theoretic methods, partial differential equations, continuum mechanics, engineering applications, coupled problems, computer sciences, and related topics. Several plenary lectures in aforementioned areas will take place during the conference. We invite architects, engineers, designers, computer scientists, mathematicians, planners, project managers, and software developers from business, science and research to participate in the conference!}, subject = {Angewandte Informatik}, language = {en} } @phdthesis{Abbas, author = {Abbas, Tajammal}, title = {Assessment of Numerical Prediction Models for Aeroelastic Instabilities of Bridges}, publisher = {Jonas Verlag}, address = {Weimar}, doi = {10.25643/bauhaus-universitaet.2716}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180515-27161}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {275}, abstract = {The phenomenon of aerodynamic instability caused by the wind is usually a major design criterion for long-span cable-supported bridges. If the wind speed exceeds the critical flutter speed of the bridge, this constitutes an Ultimate Limit State. The prediction of the flutter boundary, therefore, requires accurate and robust models. The complexity and uncertainty of models for such engineering problems demand strategies for model assessment. This study is an attempt to use the concepts of sensitivity and uncertainty analyses to assess the aeroelastic instability prediction models for long-span bridges. The state-of-the-art theory concerning the determination of the flutter stability limit is presented. Since flutter is a coupling of aerodynamic forcing with a structural dynamics problem, different types and classes of structural and aerodynamic models can be combined to study the interaction. Here, both numerical approaches and analytical models are utilised and coupled in different ways to assess the prediction quality of the coupled model.}, subject = {Br{\"u}cke}, language = {en} } @unpublished{AbbasKavrakovMorgenthaletal., author = {Abbas, Tajammal and Kavrakov, Igor and Morgenthal, Guido and Lahmer, Tom}, title = {Prediction of aeroelastic response of bridge decks using artificial neural networks}, doi = {10.25643/bauhaus-universitaet.4097}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40974}, abstract = {The assessment of wind-induced vibrations is considered vital for the design of long-span bridges. The aim of this research is to develop a methodological framework for robust and efficient prediction strategies for complex aerodynamic phenomena using hybrid models that employ numerical analyses as well as meta-models. Here, an approach to predict motion-induced aerodynamic forces is developed using artificial neural network (ANN). The ANN is implemented in the classical formulation and trained with a comprehensive dataset which is obtained from computational fluid dynamics forced vibration simulations. The input to the ANN is the response time histories of a bridge section, whereas the output is the motion-induced forces. The developed ANN has been tested for training and test data of different cross section geometries which provide promising predictions. The prediction is also performed for an ambient response input with multiple frequencies. Moreover, the trained ANN for aerodynamic forcing is coupled with the structural model to perform fully-coupled fluid--structure interaction analysis to determine the aeroelastic instability limit. The sensitivity of the ANN parameters to the model prediction quality and the efficiency has also been highlighted. The proposed methodology has wide application in the analysis and design of long-span bridges.}, subject = {Aerodynamik}, language = {en} } @article{AbbaspourGilandehMolaeeSabzietal., author = {Abbaspour-Gilandeh, Yousef and Molaee, Amir and Sabzi, Sajad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir}, title = {A Combined Method of Image Processing and Artificial Neural Network for the Identification of 13 Iranian Rice Cultivars}, series = {agronomy}, volume = {2020}, journal = {agronomy}, number = {Volume 10, Issue 1, 117}, publisher = {MDPI}, doi = {10.3390/agronomy10010117}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200123-40695}, pages = {21}, abstract = {Due to the importance of identifying crop cultivars, the advancement of accurate assessment of cultivars is considered essential. The existing methods for identifying rice cultivars are mainly time-consuming, costly, and destructive. Therefore, the development of novel methods is highly beneficial. The aim of the present research is to classify common rice cultivars in Iran based on color, morphologic, and texture properties using artificial intelligence (AI) methods. In doing so, digital images of 13 rice cultivars in Iran in three forms of paddy, brown, and white are analyzed through pre-processing and segmentation of using MATLAB. Ninety-two specificities, including 60 color, 14 morphologic, and 18 texture properties, were identified for each rice cultivar. In the next step, the normal distribution of data was evaluated, and the possibility of observing a significant difference between all specificities of cultivars was studied using variance analysis. In addition, the least significant difference (LSD) test was performed to obtain a more accurate comparison between cultivars. To reduce data dimensions and focus on the most effective components, principal component analysis (PCA) was employed. Accordingly, the accuracy of rice cultivar separations was calculated for paddy, brown rice, and white rice using discriminant analysis (DA), which was 89.2\%, 87.7\%, and 83.1\%, respectively. To identify and classify the desired cultivars, a multilayered perceptron neural network was implemented based on the most effective components. The results showed 100\% accuracy of the network in identifying and classifying all mentioned rice cultivars. Hence, it is concluded that the integrated method of image processing and pattern recognition methods, such as statistical classification and artificial neural networks, can be used for identifying and classification of rice cultivars.}, subject = {Maschinelles Lernen}, language = {en} } @article{AbdelnourZabel, author = {Abdelnour, Mena and Zabel, Volkmar}, title = {Modal identification of structures with a dynamic behaviour characterised by global and local modes at close frequencies}, series = {Acta Mechanica}, volume = {2023}, journal = {Acta Mechanica}, publisher = {Springer}, address = {Wien}, doi = {10.1007/s00707-023-03598-z}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230525-63822}, pages = {1 -- 21}, abstract = {Identification of modal parameters of a space frame structure is a complex assignment due to a large number of degrees of freedom, close natural frequencies, and different vibrating mechanisms. Research has been carried out on the modal identification of rather simple truss structures. So far, less attention has been given to complex three-dimensional truss structures. This work develops a vibration-based methodology for determining modal information of three-dimensional space truss structures. The method uses a relatively complex space truss structure for its verification. Numerical modelling of the system gives modal information about the expected vibration behaviour. The identification process involves closely spaced modes that are characterised by local and global vibration mechanisms. To distinguish between local and global vibrations of the system, modal strain energies are used as an indicator. The experimental validation, which incorporated a modal analysis employing the stochastic subspace identification method, has confirmed that considering relatively high model orders is required to identify specific mode shapes. Especially in the case of the determination of local deformation modes of space truss members, higher model orders have to be taken into account than in the modal identification of most other types of structures.}, subject = {Fachwerkbau}, language = {en} } @phdthesis{Abeltshauser, author = {Abeltshauser, Rainer}, title = {Identification and separation of physical effects of coupled systems by using defined model abstractions}, doi = {10.25643/bauhaus-universitaet.2860}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28600}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {The thesis investigates at the computer aided simulation process for operational vibration analysis of complex coupled systems. As part of the internal methods project "Absolute Values" of the BMW Group, the thesis deals with the analysis of the structural dynamic interactions and excitation interactions. The overarching aim of the methods project is to predict the operational vibrations of engines. Simulations are usually used to analyze technical aspects (e. g. operational vibrations, strength, ...) of single components in the industrial development. The boundary conditions of submodels are mostly based on experiences. So the interactions with neighboring components and systems are neglected. To get physically more realistic results but still efficient simulations, this work wants to support the engineer during the preprocessing phase by useful criteria. At first suitable abstraction levels based on the existing literature are defined to identify structural dynamic interactions and excitation interactions of coupled systems. So it is possible to separate different effects of the coupled subsystems. On this basis, criteria are derived to assess the influence of interactions between the considered systems. These criteria can be used during the preprocessing phase and help the engineer to build up efficient models with respect to the interactions with neighboring systems. The method was developed by using several models with different complexity levels. Furthermore, the method is proved for the application in the industrial environment by using the example of a current combustion engine.}, subject = {Strukturdynamik}, language = {en} } @phdthesis{AbuBakar, author = {Abu Bakar, Ilyani Akmar}, title = {Computational Analysis of Woven Fabric Composites: Single- and Multi-Objective Optimizations and Sensitivity Analysis in Meso-scale Structures}, issn = {1610-7381}, doi = {10.25643/bauhaus-universitaet.4176}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200605-41762}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {151}, abstract = {This study permits a reliability analysis to solve the mechanical behaviour issues existing in the current structural design of fabric structures. Purely predictive material models are highly desirable to facilitate an optimized design scheme and to significantly reduce time and cost at the design stage, such as experimental characterization. The present study examined the role of three major tasks; a) single-objective optimization, b) sensitivity analyses and c) multi-objective optimization on proposed weave structures for woven fabric composites. For single-objective optimization task, the first goal is to optimize the elastic properties of proposed complex weave structure under unit cells basis based on periodic boundary conditions. We predict the geometric characteristics towards skewness of woven fabric composites via Evolutionary Algorithm (EA) and a parametric study. We also demonstrate the effect of complex weave structures on the fray tendency in woven fabric composites via tightness evaluation. We utilize a procedure which does not require a numerical averaging process for evaluating the elastic properties of woven fabric composites. The fray tendency and skewness of woven fabrics depends upon the behaviour of the floats which is related to the factor of weave. Results of this study may suggest a broader view for further research into the effects of complex weave structures or may provide an alternative to the fray and skewness problems of current weave structure in woven fabric composites. A comprehensive study is developed on the complex weave structure model which adopts the dry woven fabric of the most potential pattern in singleobjective optimization incorporating the uncertainties parameters of woven fabric composites. The comprehensive study covers the regression-based and variance-based sensitivity analyses. The second task goal is to introduce the fabric uncertainties parameters and elaborate how they can be incorporated into finite element models on macroscopic material parameters such as elastic modulus and shear modulus of dry woven fabric subjected to uni-axial and biaxial deformations. Significant correlations in the study, would indicate the need for a thorough investigation of woven fabric composites under uncertainties parameters. The study describes here could serve as an alternative to identify effective material properties without prolonged time consumption and expensive experimental tests. The last part focuses on a hierarchical stochastic multi-scale optimization approach (fine-scale and coarse-scale optimizations) under geometrical uncertainties parameters for hybrid composites considering complex weave structure. The fine-scale optimization is to determine the best lamina pattern that maximizes its macroscopic elastic properties, conducted by EA under the following uncertain mesoscopic parameters: yarn spacing, yarn height, yarn width and misalignment of yarn angle. The coarse-scale optimization has been carried out to optimize the stacking sequences of symmetric hybrid laminated composite plate with uncertain mesoscopic parameters by employing the Ant Colony Algorithm (ACO). The objective functions of the coarse-scale optimization are to minimize the cost (C) and weight (W) of the hybrid laminated composite plate considering the fundamental frequency and the buckling load factor as the design constraints. Based on the uncertainty criteria of the design parameters, the appropriate variation required for the structural design standards can be evaluated using the reliability tool, and then an optimized design decision in consideration of cost can be subsequently determined.}, subject = {Verbundwerkstoff}, language = {en} } @phdthesis{Ahmad, author = {Ahmad, Sofyan}, title = {Reference Surface-Based System Identification}, doi = {10.25643/bauhaus-universitaet.2113}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20140205-21132}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {153}, abstract = {Environmental and operational variables and their impact on structural responses have been acknowledged as one of the most important challenges for the application of the ambient vibration-based damage identification in structures. The damage detection procedures may yield poor results, if the impacts of loading and environmental conditions of the structures are not considered. The reference-surface-based method, which is proposed in this thesis, is addressed to overcome this problem. In the proposed method, meta-models are used to take into account significant effects of the environmental and operational variables. The usage of the approximation models, allows the proposed method to simply handle multiple non-damaged variable effects simultaneously, which for other methods seems to be very complex. The input of the meta-model are the multiple non-damaged variables while the output is a damage indicator. The reference-surface-based method diminishes the effect of the non-damaged variables to the vibration based damage detection results. Hence, the structure condition that is assessed by using ambient vibration data at any time would be more reliable. Immediate reliable information regarding the structure condition is required to quickly respond to the event, by means to take necessary actions concerning the future use or further investigation of the structures, for instance shortly after extreme events such as earthquakes. The critical part of the proposed damage detection method is the learning phase, where the meta-models are trained by using input-output relation of observation data. Significant problems that may encounter during the learning phase are outlined and some remedies to overcome the problems are suggested. The proposed damage identification method is applied to numerical and experimental models. In addition to the natural frequencies, wavelet energy and stochastic subspace damage indicators are used.}, subject = {System Identification}, language = {en} } @inproceedings{AhmadZabelKoenke, author = {Ahmad, Sofyan and Zabel, Volkmar and K{\"o}nke, Carsten}, title = {WAVELET-BASED INDICATORS FOR RESPONSE SURFACE MODELS IN DAMAGE IDENTIFICATION OF STRUCTURES}, series = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 04 - 06 2012, Bauhaus-University Weimar}, booktitle = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 04 - 06 2012, Bauhaus-University Weimar}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2758}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170306-27588}, pages = {14}, abstract = {In this paper, wavelet energy damage indicator is used in response surface methodology to identify the damage in simulated filler beam railway bridge. The approximate model is addressed to include the operational and surrounding condition in the assessment. The procedure is split into two stages, the training and detecting phase. During training phase, a so-called response surface is built from training data using polynomial regression and radial basis function approximation approaches. The response surface is used to detect the damage in structure during detection phase. The results show that the response surface model is able to detect moderate damage in one of bridge supports while the temperatures and train velocities are varied.}, subject = {Angewandte Mathematik}, language = {en} } @article{AhmadiBaghbanSadeghzadehetal., author = {Ahmadi, Mohammad Hossein and Baghban, Alireza and Sadeghzadeh, Milad and Zamen, Mohammad and Mosavi, Amir and Shamshirband, Shahaboddin and Kumar, Ravinder and Mohammadi-Khanaposhtani, Mohammad}, title = {Evaluation of electrical efficiency of photovoltaic thermal solar collector}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {volume 14, issue 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1734094}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200304-41049}, pages = {545 -- 565}, abstract = {In this study, machine learning methods of artificial neural networks (ANNs), least squares support vector machines (LSSVM), and neuro-fuzzy are used for advancing prediction models for thermal performance of a photovoltaic-thermal solar collector (PV/T). In the proposed models, the inlet temperature, flow rate, heat, solar radiation, and the sun heat have been considered as the input variables. Data set has been extracted through experimental measurements from a novel solar collector system. Different analyses are performed to examine the credibility of the introduced models and evaluate their performances. The proposed LSSVM model outperformed the ANFIS and ANNs models. LSSVM model is reported suitable when the laboratory measurements are costly and time-consuming, or achieving such values requires sophisticated interpretations.}, subject = {Fotovoltaik}, language = {en} } @article{AlYasiriMutasharGuerlebecketal., author = {Al-Yasiri, Zainab Riyadh Shaker and Mutashar, Hayder Majid and G{\"u}rlebeck, Klaus and Lahmer, Tom}, title = {Damage Sensitive Signals for the Assessment of the Conditions of Wind Turbine Rotor Blades Using Electromagnetic Waves}, series = {Infrastructures}, volume = {2022}, journal = {Infrastructures}, number = {Volume 7, Issue 8 (August 2022), article 104}, editor = {Shafiullah, GM}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/infrastructures7080104}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220831-47093}, pages = {18}, abstract = {One of the most important renewable energy technologies used nowadays are wind power turbines. In this paper, we are interested in identifying the operating status of wind turbines, especially rotor blades, by means of multiphysical models. It is a state-of-the-art technology to test mechanical structures with ultrasonic-based methods. However, due to the density and the required high resolution, the testing is performed with high-frequency waves, which cannot penetrate the structure in depth. Therefore, there is a need to adopt techniques in the fields of multiphysical model-based inversion schemes or data-driven structural health monitoring. Before investing effort in the development of such approaches, further insights and approaches are necessary to make the techniques applicable to structures such as wind power plants (blades). Among the expected developments, further accelerations of the so-called "forward codes" for a more efficient implementation of the wave equation could be envisaged. Here, we employ electromagnetic waves for the early detection of cracks. Because in many practical situations, it is not possible to apply techniques from tomography (characterized by multiple sources and sensor pairs), we focus here on the question of whether the existence of cracks can be determined by using only one source for the sent waves.}, subject = {Windkraftwerk}, language = {en} } @phdthesis{Alalade, author = {Alalade, Muyiwa}, title = {An Enhanced Full Waveform Inversion Method for the Structural Analysis of Dams}, doi = {10.25643/bauhaus-universitaet.3956}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190813-39566}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Since the Industrial Revolution in the 1700s, the high emission of gaseous wastes into the atmosphere from the usage of fossil fuels has caused a general increase in temperatures globally. To combat the environmental imbalance, there is an increase in the demand for renewable energy sources. Dams play a major role in the generation of "green" energy. However, these structures require frequent and strict monitoring to ensure safe and efficient operation. To tackle the challenges faced in the application of convention dam monitoring techniques, this work proposes the inverse analysis of numerical models to identify damaged regions in the dam. Using a dynamic coupled hydro-mechanical Extended Finite Element Method (XFEM) model and a global optimization strategy, damage (crack) in the dam is identified. By employing seismic waves to probe the dam structure, a more detailed information on the distribution of heterogeneous materials and damaged regions are obtained by the application of the Full Waveform Inversion (FWI) method. The FWI is based on a local optimization strategy and thus it is highly dependent on the starting model. A variety of data acquisition setups are investigated, and an optimal setup is proposed. The effect of different starting models and noise in the measured data on the damage identification is considered. Combining the non-dependence of a starting model of the global optimization strategy based dynamic coupled hydro-mechanical XFEM method and the detailed output of the local optimization strategy based FWI method, an enhanced Full Waveform Inversion is proposed for the structural analysis of dams.}, subject = {Talsperre}, language = {en} } @article{AlaladeReichertKoehnetal., author = {Alalade, Muyiwa and Reichert, Ina and K{\"o}hn, Daniel and Wuttke, Frank and Lahmer, Tom}, title = {A Cyclic Multi-Stage Implementation of the Full-Waveform Inversion for the Identification of Anomalies in Dams}, series = {Infrastructures}, volume = {2022}, journal = {Infrastructures}, number = {Volume 7, issue 12, article 161}, editor = {Qu, Chunxu and Gao, Chunxu and Zhang, Rui and Jia, Ziguang and Li, Jiaxiang}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/infrastructures7120161}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221201-48396}, pages = {19}, abstract = {For the safe and efficient operation of dams, frequent monitoring and maintenance are required. These are usually expensive, time consuming, and cumbersome. To alleviate these issues, we propose applying a wave-based scheme for the location and quantification of damages in dams. To obtain high-resolution "interpretable" images of the damaged regions, we drew inspiration from non-linear full-multigrid methods for inverse problems and applied a new cyclic multi-stage full-waveform inversion (FWI) scheme. Our approach is less susceptible to the stability issues faced by the standard FWI scheme when dealing with ill-posed problems. In this paper, we first selected an optimal acquisition setup and then applied synthetic data to demonstrate the capability of our approach in identifying a series of anomalies in dams by a mixture of reflection and transmission tomography. The results had sufficient robustness, showing the prospects of application in the field of non-destructive testing of dams.}, subject = {Damm}, language = {en} } @article{AlemuHabteLahmeretal., author = {Alemu, Yohannes L. and Habte, Bedilu and Lahmer, Tom and Urgessa, Girum}, title = {Topologically preoptimized ground structure (TPOGS) for the optimization of 3D RC buildings}, series = {Asian Journal of Civil Engineering}, volume = {2023}, journal = {Asian Journal of Civil Engineering}, publisher = {Springer International Publishing}, address = {Cham}, doi = {10.1007/s42107-023-00640-2}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230517-63677}, pages = {1 -- 11}, abstract = {As an optimization that starts from a randomly selected structure generally does not guarantee reasonable optimality, the use of a systemic approach, named the ground structure, is widely accepted in steel-made truss and frame structural design. However, in the case of reinforced concrete (RC) structural optimization, because of the orthogonal orientation of structural members, randomly chosen or architect-sketched framing is used. Such a one-time fixed layout trend, in addition to its lack of a systemic approach, does not necessarily guarantee optimality. In this study, an approach for generating a candidate ground structure to be used for cost or weight minimization of 3D RC building structures with included slabs is developed. A multiobjective function at the floor optimization stage and a single objective function at the frame optimization stage are considered. A particle swarm optimization (PSO) method is employed for selecting the optimal ground structure. This method enables generating a simple, yet potential, real-world representation of topologically preoptimized ground structure while both structural and main architectural requirements are considered. This is supported by a case study for different floor domain sizes.}, subject = {Bodenmechanik}, language = {en} } @phdthesis{Alkam, author = {Alkam, Feras}, title = {Vibration-based Monitoring of Concrete Catenary Poles using Bayesian Inference}, volume = {2021}, publisher = {Bauhaus-Universit{\"a}tsverlag}, address = {Weimar}, doi = {10.25643/bauhaus-universitaet.4433}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210526-44338}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {177}, abstract = {This work presents a robust status monitoring approach for detecting damage in cantilever structures based on logistic functions. Also, a stochastic damage identification approach based on changes of eigenfrequencies is proposed. The proposed algorithms are verified using catenary poles of electrified railways track. The proposed damage features overcome the limitation of frequency-based damage identification methods available in the literature, which are valid to detect damage in structures to Level 1 only. Changes in eigenfrequencies of cantilever structures are enough to identify possible local damage at Level 3, i.e., to cover damage detection, localization, and quantification. The proposed algorithms identified the damage with relatively small errors, even at a high noise level.}, subject = {Parameteridentifikation}, language = {en} } @article{AlkamLahmer, author = {Alkam, Feras and Lahmer, Tom}, title = {A robust method of the status monitoring of catenary poles installed along high-speed electrified train tracks}, series = {Results in Engineering}, volume = {2021}, journal = {Results in Engineering}, number = {volume 12, article 100289}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.rineng.2021.100289}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211011-45212}, pages = {1 -- 8}, abstract = {Electric trains are considered one of the most eco-friendly and safest means of transportation. Catenary poles are used worldwide to support overhead power lines for electric trains. The performance of the catenary poles has an extensive influence on the integrity of the train systems and, consequently, the connected human services. It became a must nowadays to develop SHM systems that provide the instantaneous status of catenary poles in- service, making the decision-making processes to keep or repair the damaged poles more feasible. This study develops a data-driven, model-free approach for status monitoring of cantilever structures, focusing on pre-stressed, spun-cast ultrahigh-strength concrete catenary poles installed along high-speed train tracks. The pro-posed approach evaluates multiple damage features in an unfied damage index, which leads to straightforward interpretation and comparison of the output. Besides, it distinguishes between multiple damage scenarios of the poles, either the ones caused by material degradation of the concrete or by the cracks that can be propagated during the life span of the given structure. Moreover, using a logistic function to classify the integrity of structure avoids the expensive learning step in the existing damage detection approaches, namely, using the modern machine and deep learning methods. The findings of this study look very promising when applied to other types of cantilever structures, such as the poles that support the power transmission lines, antenna masts, chimneys, and wind turbines.}, subject = {Fahrleitung}, language = {en} } @article{AlkamLahmer, author = {Alkam, Feras and Lahmer, Tom}, title = {Eigenfrequency-Based Bayesian Approach for Damage Identification in Catenary Poles}, series = {Infrastructures}, volume = {2021}, journal = {Infrastructures}, number = {Volume 6, issue 4, article 57}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/infrastructures6040057}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210510-44256}, pages = {1 -- 19}, abstract = {This study proposes an efficient Bayesian, frequency-based damage identification approach to identify damages in cantilever structures with an acceptable error rate, even at high noise levels. The catenary poles of electric high-speed train systems were selected as a realistic case study to cover the objectives of this study. Compared to other frequency-based damage detection approaches described in the literature, the proposed approach is efficiently able to detect damages in cantilever structures to higher levels of damage detection, namely identifying both the damage location and severity using a low-cost structural health monitoring (SHM) system with a limited number of sensors; for example, accelerometers. The integration of Bayesian inference, as a stochastic framework, in the proposed approach, makes it possible to utilize the benefit of data fusion in merging the informative data from multiple damage features, which increases the quality and accuracy of the results. The findings provide the decision-maker with the information required to manage the maintenance, repair, or replacement procedures.}, subject = {Fahrleitung}, language = {en} } @misc{Almasi, type = {Master Thesis}, author = {Almasi, Ashkan}, title = {Stochastic Analysis of Interfacial Effects on the Polymeric Nanocomposites}, doi = {10.25643/bauhaus-universitaet.2433}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20150709-24339}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {The polymeric clay nanocomposites are a new class of materials of which recently have become the centre of attention due to their superior mechanical and physical properties. Several studies have been performed on the mechanical characterisation of these nanocomposites; however most of those studies have neglected the effect of the interfacial region between the clays and the matrix despite of its significant influence on the mechanical performance of the nanocomposites. There are different analytical methods to calculate the overall elastic material properties of the composites. In this study we use the Mori-Tanaka method to determine the overall stiffness of the composites for simple inclusion geometries of cylinder and sphere. Furthermore, the effect of interphase layer on the overall properties of composites is calculated. Here, we intend to get ounds for the effective mechanical properties to compare with the analytical results. Hence, we use linear displacement boundary conditions (LD) and uniform traction boundary conditions (UT) accordingly. Finally, the analytical results are compared with numerical results and they are in a good agreement. The next focus of this dissertation is a computational approach with a hierarchical multiscale method on the mesoscopic level. In other words, in this study we use the stochastic analysis and computational homogenization method to analyse the effect of thickness and stiffness of the interfacial region on the overall elastic properties of the clay/epoxy nanocomposites. The results show that the increase in interphase thickness, reduces the stiffness of the clay/epoxy naocomposites and this decrease becomes significant in higher clay contents. The results of the sensitivity analysis prove that the stiffness of the interphase layer has more significant effect on the final stiffness of nanocomposites. We also validate the results with the available experimental results from the literature which show good agreement.}, language = {en} } @article{AmaniSaboorBagherzadehRabczuk, author = {Amani, Jafar and Saboor Bagherzadeh, Amir and Rabczuk, Timon}, title = {Error estimate and adaptive refinement in mixed discrete least squares meshless method}, series = {Mathematical Problems in Engineering}, journal = {Mathematical Problems in Engineering}, doi = {10.1155/2014/721240}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170413-31181}, abstract = {The node moving and multistage node enrichment adaptive refinement procedures are extended in mixed discrete least squares meshless (MDLSM) method for efficient analysis of elasticity problems. In the formulation of MDLSM method, mixed formulation is accepted to avoid second-order differentiation of shape functions and to obtain displacements and stresses simultaneously. In the refinement procedures, a robust error estimator based on the value of the least square residuals functional of the governing differential equations and its boundaries at nodal points is used which is inherently available from the MDLSM formulation and can efficiently identify the zones with higher numerical errors. The results are compared with the refinement procedures in the irreducible formulation of discrete least squares meshless (DLSM) method and show the accuracy and efficiency of the proposed procedures. Also, the comparison of the error norms and convergence rate show the fidelity of the proposed adaptive refinement procedures in the MDLSM method.}, subject = {Elastizit{\"a}t}, language = {en} } @phdthesis{Amiri, author = {Amiri, Fatemeh}, title = {Computational modelling of fracture with local maximum entropy approximations}, doi = {10.25643/bauhaus-universitaet.2631}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160719-26310}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {130}, abstract = {The key objective of this research is to study fracture with a meshfree method, local maximum entropy approximations, and model fracture in thin shell structures with complex geometry and topology. This topic is of high relevance for real-world applications, for example in the automotive industry and in aerospace engineering. The shell structure can be described efficiently by meshless methods which are capable of describing complex shapes as a collection of points instead of a structured mesh. In order to find the appropriate numerical method to achieve this goal, the first part of the work was development of a method based on local maximum entropy (LME) shape functions together with enrichment functions used in partition of unity methods to discretize problems in linear elastic fracture mechanics. We obtain improved accuracy relative to the standard extended finite element method (XFEM) at a comparable computational cost. In addition, we keep the advantages of the LME shape functions,such as smoothness and non-negativity. We show numerically that optimal convergence (same as in FEM) for energy norm and stress intensity factors can be obtained through the use of geometric (fixed area) enrichment with no special treatment of the nodes near the crack such as blending or shifting. As extension of this method to three dimensional problems and complex thin shell structures with arbitrary crack growth is cumbersome, we developed a phase field model for fracture using LME. Phase field models provide a powerful tool to tackle moving interface problems, and have been extensively used in physics and materials science. Phase methods are gaining popularity in a wide set of applications in applied science and engineering, recently a second order phase field approximation for brittle fracture has gathered significant interest in computational fracture such that sharp cracks discontinuities are modeled by a diffusive crack. By minimizing the system energy with respect to the mechanical displacements and the phase-field, subject to an irreversibility condition to avoid crack healing, this model can describe crack nucleation, propagation, branching and merging. One of the main advantages of the phase field modeling of fractures is the unified treatment of the interfacial tracking and mechanics, which potentially leads to simple, robust, scalable computer codes applicable to complex systems. In other words, this approximation reduces considerably the implementation complexity because the numerical tracking of the fracture is not needed, at the expense of a high computational cost. We present a fourth-order phase field model for fracture based on local maximum entropy (LME) approximations. The higher order continuity of the meshfree LME approximation allows to directly solve the fourth-order phase field equations without splitting the fourth-order differential equation into two second order differential equations. Notably, in contrast to previous discretizations that use at least a quadratic basis, only linear completeness is needed in the LME approximation. We show that the crack surface can be captured more accurately in the fourth-order model than the second-order model. Furthermore, less nodes are needed for the fourth-order model to resolve the crack path. Finally, we demonstrate the performance of the proposed meshfree fourth order phase-field formulation for 5 representative numerical examples. Computational results will be compared to analytical solutions within linear elastic fracture mechanics and experimental data for three-dimensional crack propagation. In the last part of this research, we present a phase-field model for fracture in Kirchoff-Love thin shells using the local maximum-entropy (LME) meshfree method. Since the crack is a natural outcome of the analysis it does not require an explicit representation and tracking, which is advantageous over techniques as the extended finite element method that requires tracking of the crack paths. The geometric description of the shell is based on statistical learning techniques that allow dealing with general point set surfaces avoiding a global parametrization, which can be applied to tackle surfaces of complex geometry and topology. We show the flexibility and robustness of the present methodology for two examples: plate in tension and a set of open connected pipes.}, language = {en} } @article{AmirinasabShamshirbandChronopoulosetal., author = {Amirinasab, Mehdi and Shamshirband, Shahaboddin and Chronopoulos, Anthony Theodore and Mosavi, Amir and Nabipour, Narjes}, title = {Energy-Efficient Method for Wireless Sensor Networks Low-Power Radio Operation in Internet of Things}, series = {electronics}, volume = {2020}, journal = {electronics}, number = {volume 9, issue 2, 320}, publisher = {MDPI}, doi = {10.3390/electronics9020320}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40954}, pages = {20}, abstract = {The radio operation in wireless sensor networks (WSN) in Internet of Things (IoT)applications is the most common source for power consumption. Consequently, recognizing and controlling the factors affecting radio operation can be valuable for managing the node power consumption. Among essential factors affecting radio operation, the time spent for checking the radio is of utmost importance for monitoring power consumption. It can lead to false WakeUp or idle listening in radio duty cycles and ContikiMAC. ContikiMAC is a low-power radio duty-cycle protocol in Contiki OS used in WakeUp mode, as a clear channel assessment (CCA) for checking radio status periodically. This paper presents a detailed analysis of radio WakeUp time factors of ContikiMAC. Furthermore, we propose a lightweight CCA (LW-CCA) as an extension to ContikiMAC to reduce the Radio Duty-Cycles in false WakeUps and idle listening though using dynamic received signal strength indicator (RSSI) status check time. The simulation results in the Cooja simulator show that LW-CCA reduces about 8\% energy consumption in nodes while maintaining up to 99\% of the packet delivery rate (PDR).}, subject = {Internet der Dinge}, language = {en} } @article{BandJanizadehChandraPaletal., author = {Band, Shahab S. and Janizadeh, Saeid and Chandra Pal, Subodh and Chowdhuri, Indrajit and Siabi, Zhaleh and Norouzi, Akbar and Melesse, Assefa M. and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Comparative Analysis of Artificial Intelligence Models for Accurate Estimation of Groundwater Nitrate Concentration}, series = {Sensors}, volume = {2020}, journal = {Sensors}, number = {Volume 20, issue 20, article 5763}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/s20205763}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43364}, pages = {1 -- 23}, abstract = {Prediction of the groundwater nitrate concentration is of utmost importance for pollution control and water resource management. This research aims to model the spatial groundwater nitrate concentration in the Marvdasht watershed, Iran, based on several artificial intelligence methods of support vector machine (SVM), Cubist, random forest (RF), and Bayesian artificial neural network (Baysia-ANN) machine learning models. For this purpose, 11 independent variables affecting groundwater nitrate changes include elevation, slope, plan curvature, profile curvature, rainfall, piezometric depth, distance from the river, distance from residential, Sodium (Na), Potassium (K), and topographic wetness index (TWI) in the study area were prepared. Nitrate levels were also measured in 67 wells and used as a dependent variable for modeling. Data were divided into two categories of training (70\%) and testing (30\%) for modeling. The evaluation criteria coefficient of determination (R2), mean absolute error (MAE), root mean square error (RMSE), and Nash-Sutcliffe efficiency (NSE) were used to evaluate the performance of the models used. The results of modeling the susceptibility of groundwater nitrate concentration showed that the RF (R2 = 0.89, RMSE = 4.24, NSE = 0.87) model is better than the other Cubist (R2 = 0.87, RMSE = 5.18, NSE = 0.81), SVM (R2 = 0.74, RMSE = 6.07, NSE = 0.74), Bayesian-ANN (R2 = 0.79, RMSE = 5.91, NSE = 0.75) models. The results of groundwater nitrate concentration zoning in the study area showed that the northern parts of the case study have the highest amount of nitrate, which is higher in these agricultural areas than in other areas. The most important cause of nitrate pollution in these areas is agriculture activities and the use of groundwater to irrigate these crops and the wells close to agricultural areas, which has led to the indiscriminate use of chemical fertilizers by irrigation or rainwater of these fertilizers is washed and penetrates groundwater and pollutes the aquifer.}, subject = {Grundwasser}, language = {en} } @article{BandJanizadehChandraPaletal., author = {Band, Shahab S. and Janizadeh, Saeid and Chandra Pal, Subodh and Saha, Asish and Chakrabortty, Rabbin and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Novel Ensemble Approach of Deep Learning Neural Network (DLNN) Model and Particle Swarm Optimization (PSO) Algorithm for Prediction of Gully Erosion Susceptibility}, series = {Sensors}, volume = {2020}, journal = {Sensors}, number = {Volume 20, issue 19, article 5609}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/s20195609}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43341}, pages = {1 -- 27}, abstract = {This study aims to evaluate a new approach in modeling gully erosion susceptibility (GES) based on a deep learning neural network (DLNN) model and an ensemble particle swarm optimization (PSO) algorithm with DLNN (PSO-DLNN), comparing these approaches with common artificial neural network (ANN) and support vector machine (SVM) models in Shirahan watershed, Iran. For this purpose, 13 independent variables affecting GES in the study area, namely, altitude, slope, aspect, plan curvature, profile curvature, drainage density, distance from a river, land use, soil, lithology, rainfall, stream power index (SPI), and topographic wetness index (TWI), were prepared. A total of 132 gully erosion locations were identified during field visits. To implement the proposed model, the dataset was divided into the two categories of training (70\%) and testing (30\%). The results indicate that the area under the curve (AUC) value from receiver operating characteristic (ROC) considering the testing datasets of PSO-DLNN is 0.89, which indicates superb accuracy. The rest of the models are associated with optimal accuracy and have similar results to the PSO-DLNN model; the AUC values from ROC of DLNN, SVM, and ANN for the testing datasets are 0.87, 0.85, and 0.84, respectively. The efficiency of the proposed model in terms of prediction of GES was increased. Therefore, it can be concluded that the DLNN model and its ensemble with the PSO algorithm can be used as a novel and practical method to predict gully erosion susceptibility, which can help planners and managers to manage and reduce the risk of this phenomenon.}, subject = {Geoinformatik}, language = {en} } @article{BandJanizadehSahaetal., author = {Band, Shahab S. and Janizadeh, Saeid and Saha, Sunil and Mukherjee, Kaustuv and Khosrobeigi Bozchaloei, Saeid and Cerd{\`a}, Artemi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Evaluating the Efficiency of Different Regression, Decision Tree, and Bayesian Machine Learning Algorithms in Spatial Piping Erosion Susceptibility Using ALOS/PALSAR Data}, series = {Land}, volume = {2020}, journal = {Land}, number = {volume 9, issue 10, article 346}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/land9100346}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43424}, pages = {1 -- 22}, abstract = {Piping erosion is one form of water erosion that leads to significant changes in the landscape and environmental degradation. In the present study, we evaluated piping erosion modeling in the Zarandieh watershed of Markazi province in Iran based on random forest (RF), support vector machine (SVM), and Bayesian generalized linear models (Bayesian GLM) machine learning algorithms. For this goal, due to the importance of various geo-environmental and soil properties in the evolution and creation of piping erosion, 18 variables were considered for modeling the piping erosion susceptibility in the Zarandieh watershed. A total of 152 points of piping erosion were recognized in the study area that were divided into training (70\%) and validation (30\%) for modeling. The area under curve (AUC) was used to assess the effeciency of the RF, SVM, and Bayesian GLM. Piping erosion susceptibility results indicated that all three RF, SVM, and Bayesian GLM models had high efficiency in the testing step, such as the AUC shown with values of 0.9 for RF, 0.88 for SVM, and 0.87 for Bayesian GLM. Altitude, pH, and bulk density were the variables that had the greatest influence on the piping erosion susceptibility in the Zarandieh watershed. This result indicates that geo-environmental and soil chemical variables are accountable for the expansion of piping erosion in the Zarandieh watershed.}, subject = {Maschinelles Lernen}, language = {en} } @article{BanihaniRabczukAlmomani, author = {Banihani, Suleiman and Rabczuk, Timon and Almomani, Thakir}, title = {POD for real-time simulation of hyperelastic soft biological tissue using the point collocation method of finite spheres}, series = {Mathematical Problems in Engineering}, journal = {Mathematical Problems in Engineering}, doi = {10.1155/2013/386501}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170413-31203}, abstract = {The point collocation method of finite spheres (PCMFS) is used to model the hyperelastic response of soft biological tissue in real time within the framework of virtual surgery simulation. The proper orthogonal decomposition (POD) model order reduction (MOR) technique was used to achieve reduced-order model of the problem, minimizing computational cost. The PCMFS is a physics-based meshfree numerical technique for real-time simulation of surgical procedures where the approximation functions are applied directly on the strong form of the boundary value problem without the need for integration, increasing computational efficiency. Since computational speed has a significant role in simulation of surgical procedures, the proposed technique was able to model realistic nonlinear behavior of organs in real time. Numerical results are shown to demonstrate the effectiveness of the new methodology through a comparison between full and reduced analyses for several nonlinear problems. It is shown that the proposed technique was able to achieve good agreement with the full model; moreover, the computational and data storage costs were significantly reduced.}, subject = {Chirurgie}, language = {en} } @phdthesis{Brehm2011, author = {Brehm, Maik}, title = {Vibration-based model updating: Reduction and quantification of uncertainties}, doi = {10.25643/bauhaus-universitaet.1465}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20110926-15553}, school = {Bauhaus-Universit{\"a}t Weimar}, year = {2011}, abstract = {Numerical models and their combination with advanced solution strategies are standard tools for many engineering disciplines to design or redesign structures and to optimize designs with the purpose to improve specific requirements. As the successful application of numerical models depends on their suitability to represent the behavior related to the intended use, they should be validated by experimentally obtained results. If the discrepancy between numerically derived and experimentally obtained results is not acceptable, a model revision or a revision of the experiment need to be considered. Model revision is divided into two classes, the model updating and the basic revision of the numerical model. The presented thesis is related to a special branch of model updating, the vibration-based model updating. Vibration-based model updating is a tool to improve the correlation of the numerical model by adjusting uncertain model input parameters by means of results extracted from vibration tests. Evidently, uncertainties related to the experiment, the numerical model, or the applied numerical solving strategies can influence the correctness of the identified model input parameters. The reduction of uncertainties for two critical problems and the quantification of uncertainties related to the investigation of several nominally identical structures are the main emphases of this thesis. First, the reduction of uncertainties by optimizing reference sensor positions is considered. The presented approach relies on predicted power spectral amplitudes and an initial finite element model as a basis to define the assessment criterion for predefined sensor positions. In combination with geometry-based design variables, which represent the sensor positions, genetic and particle swarm optimization algorithms are applied. The applicability of the proposed approach is demonstrated on a numerical benchmark study of a simply supported beam and a case study of a real test specimen. Furthermore, the theory of determining the predicted power spectral amplitudes is validated with results from vibration tests. Second, the possibility to reduce uncertainties related to an inappropriate assignment for numerically derived and experimentally obtained modes is investigated. In the context of vibration-based model updating, the correct pairing is essential. The most common criterion for indicating corresponding mode shapes is the modal assurance criterion. Unfortunately, this criterion fails in certain cases and is not reliable for automatic approaches. Hence, an alternative criterion, the energy-based modal assurance criterion, is proposed. This criterion combines the mathematical characteristic of orthogonality with the physical properties of the structure by modal strain energies. A numerical example and a case study with experimental data are presented to show the advantages of the proposed energy-based modal assurance criterion in comparison to the traditional modal assurance criterion. Third, the application of optimization strategies combined with information theory based objective functions is analyzed for the purpose of stochastic model updating. This approach serves as an alternative to the common sensitivity-based stochastic model updating strategies. Their success depends strongly on the defined initial model input parameters. In contrast, approaches based on optimization strategies can be more flexible. It can be demonstrated, that the investigated nature inspired optimization strategies in combination with Bhattacharyya distance and Kullback-Leibler divergence are appropriate. The obtained accuracies and the respective computational effort are comparable with sensitivity-based stochastic model updating strategies. The application of model updating procedures to improve the quality and suitability of a numerical model is always related to additional costs. The presented innovative approaches will contribute to reduce and quantify uncertainties within a vibration-based model updating process. Therefore, the increased benefit can compensate the additional effort, which is necessary to apply model updating procedures.}, subject = {Dynamik}, language = {en} } @inproceedings{BrehmZabelBucheretal., author = {Brehm, Maik and Zabel, Volkmar and Bucher, Christian and Ribeiro, D.}, title = {AN AUTOMATIC MODE SELECTION STRATEGY FOR MODEL UPDATING USING THE MODAL ASSURANCE CRITERION AND MODAL STRAIN ENERGIES}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2833}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28330}, pages = {18}, abstract = {In the context of finite element model updating using vibration test data, natural frequencies and mode shapes are used as validation criteria. Consequently, the order of natural frequencies and mode shapes is important. As only limited spatial information is available and noise is present in the measurements, the automatic selection of the most likely numerical mode shape corresponding to a measured mode shape is a difficult task. The most common criterion to indicate corresponding mode shapes is the modal assurance criterion. Unfortunately, this criterion fails in certain cases. In this paper, the pure mathematical modal assurance criterion will be enhanced by additional physical information of the numerical model in terms of modal strain energies. A numerical example and a benchmark study with real measured data are presented to show the advantages of the enhanced energy based criterion in comparison to the traditional modal assurance criterion.}, subject = {Angewandte Informatik}, language = {en} } @phdthesis{Budarapu, author = {Budarapu, Pattabhi Ramaiah}, title = {Adaptive multiscale methods for fracture}, doi = {10.25643/bauhaus-universitaet.2391}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20150507-23918}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {One major research focus in the Material Science and Engineering Community in the past decade has been to obtain a more fundamental understanding on the phenomenon 'material failure'. Such an understanding is critical for engineers and scientists developing new materials with higher strength and toughness, developing robust designs against failure, or for those concerned with an accurate estimate of a component's design life. Defects like cracks and dislocations evolve at nano scales and influence the macroscopic properties such as strength, toughness and ductility of a material. In engineering applications, the global response of the system is often governed by the behaviour at the smaller length scales. Hence, the sub-scale behaviour must be computed accurately for good predictions of the full scale behaviour. Molecular Dynamics (MD) simulations promise to reveal the fundamental mechanics of material failure by modeling the atom to atom interactions. Since the atomistic dimensions are of the order of Angstroms ( A), approximately 85 billion atoms are required to model a 1 micro- m^3 volume of Copper. Therefore, pure atomistic models are prohibitively expensive with everyday engineering computations involving macroscopic cracks and shear bands, which are much larger than the atomistic length and time scales. To reduce the computational effort, multiscale methods are required, which are able to couple a continuum description of the structure with an atomistic description. In such paradigms, cracks and dislocations are explicitly modeled at the atomistic scale, whilst a self-consistent continuum model elsewhere. Many multiscale methods for fracture are developed for "fictitious" materials based on "simple" potentials such as the Lennard-Jones potential. Moreover, multiscale methods for evolving cracks are rare. Efficient methods to coarse grain the fine scale defects are missing. However, the existing multiscale methods for fracture do not adaptively adjust the fine scale domain as the crack propagates. Most methods, therefore only "enlarge" the fine scale domain and therefore drastically increase computational cost. Adaptive adjustment requires the fine scale domain to be refined and coarsened. One of the major difficulties in multiscale methods for fracture is to up-scale fracture related material information from the fine scale to the coarse scale, in particular for complex crack problems. Most of the existing approaches therefore were applied to examples with comparatively few macroscopic cracks. Key contributions The bridging scale method is enhanced using the phantom node method so that cracks can be modeled at the coarse scale. To ensure self-consistency in the bulk, a virtual atom cluster is devised providing the response of the intact material at the coarse scale. A molecular statics model is employed in the fine scale where crack propagation is modeled by naturally breaking the bonds. The fine scale and coarse scale models are coupled by enforcing the displacement boundary conditions on the ghost atoms. An energy criterion is used to detect the crack tip location. Adaptive refinement and coarsening schemes are developed and implemented during the crack propagation. The results were observed to be in excellent agreement with the pure atomistic simulations. The developed multiscale method is one of the first adaptive multiscale method for fracture. A robust and simple three dimensional coarse graining technique to convert a given atomistic region into an equivalent coarse region, in the context of multiscale fracture has been developed. The developed method is the first of its kind. The developed coarse graining technique can be applied to identify and upscale the defects like: cracks, dislocations and shear bands. The current method has been applied to estimate the equivalent coarse scale models of several complex fracture patterns arrived from the pure atomistic simulations. The upscaled fracture pattern agree well with the actual fracture pattern. The error in the potential energy of the pure atomistic and the coarse grained model was observed to be acceptable. A first novel meshless adaptive multiscale method for fracture has been developed. The phantom node method is replaced by a meshless differential reproducing kernel particle method. The differential reproducing kernel particle method is comparatively more expensive but allows for a more "natural" coupling between the two scales due to the meshless interpolation functions. The higher order continuity is also beneficial. The centro symmetry parameter is used to detect the crack tip location. The developed multiscale method is employed to study the complex crack propagation. Results based on the meshless adaptive multiscale method were observed to be in excellent agreement with the pure atomistic simulations. The developed multiscale methods are applied to study the fracture in practical materials like Graphene and Graphene on Silicon surface. The bond stretching and the bond reorientation were observed to be the net mechanisms of the crack growth in Graphene. The influence of time step on the crack propagation was studied using two different time steps. Pure atomistic simulations of fracture in Graphene on Silicon surface are presented. Details of the three dimensional multiscale method to study the fracture in Graphene on Silicon surface are discussed.}, subject = {Material}, language = {en} } @article{ChakrabortyAnitescuZhuangetal., author = {Chakraborty, Ayan and Anitescu, Cosmin and Zhuang, Xiaoying and Rabczuk, Timon}, title = {Domain adaptation based transfer learning approach for solving PDEs on complex geometries}, series = {Engineering with Computers}, volume = {2022}, journal = {Engineering with Computers}, doi = {10.1007/s00366-022-01661-2}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220811-46776}, pages = {1 -- 20}, abstract = {In machine learning, if the training data is independently and identically distributed as the test data then a trained model can make an accurate predictions for new samples of data. Conventional machine learning has a strong dependence on massive amounts of training data which are domain specific to understand their latent patterns. In contrast, Domain adaptation and Transfer learning methods are sub-fields within machine learning that are concerned with solving the inescapable problem of insufficient training data by relaxing the domain dependence hypothesis. In this contribution, this issue has been addressed and by making a novel combination of both the methods we develop a computationally efficient and practical algorithm to solve boundary value problems based on nonlinear partial differential equations. We adopt a meshfree analysis framework to integrate the prevailing geometric modelling techniques based on NURBS and present an enhanced deep collocation approach that also plays an important role in the accuracy of solutions. We start with a brief introduction on how these methods expand upon this framework. We observe an excellent agreement between these methods and have shown that how fine-tuning a pre-trained network to a specialized domain may lead to an outstanding performance compare to the existing ones. As proof of concept, we illustrate the performance of our proposed model on several benchmark problems.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Chan, author = {Chan, Chiu Ling}, title = {Smooth representation of thin shells and volume structures for isogeometric analysis}, doi = {10.25643/bauhaus-universitaet.4208}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200812-42083}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {162}, abstract = {The purpose of this study is to develop self-contained methods for obtaining smooth meshes which are compatible with isogeometric analysis (IGA). The study contains three main parts. We start by developing a better understanding of shapes and splines through the study of an image-related problem. Then we proceed towards obtaining smooth volumetric meshes of the given voxel-based images. Finally, we treat the smoothness issue on the multi-patch domains with C1 coupling. Following are the highlights of each part. First, we present a B-spline convolution method for boundary representation of voxel-based images. We adopt the filtering technique to compute the B-spline coefficients and gradients of the images effectively. We then implement the B-spline convolution for developing a non-rigid images registration method. The proposed method is in some sense of "isoparametric", for which all the computation is done within the B-splines framework. Particularly, updating the images by using B-spline composition promote smooth transformation map between the images. We show the possible medical applications of our method by applying it for registration of brain images. Secondly, we develop a self-contained volumetric parametrization method based on the B-splines boundary representation. We aim to convert a given voxel-based data to a matching C1 representation with hierarchical cubic splines. The concept of the osculating circle is employed to enhance the geometric approximation, where it is done by a single template and linear transformations (scaling, translations, and rotations) without the need for solving an optimization problem. Moreover, we use the Laplacian smoothing and refinement techniques to avoid irregular meshes and to improve mesh quality. We show with several examples that the method is capable of handling complex 2D and 3D configurations. In particular, we parametrize the 3D Stanford bunny which contains irregular shapes and voids. Finally, we propose the B´ezier ordinates approach and splines approach for C1 coupling. In the first approach, the new basis functions are defined in terms of the B´ezier Bernstein polynomials. For the second approach, the new basis is defined as a linear combination of C0 basis functions. The methods are not limited to planar or bilinear mappings. They allow the modeling of solutions to fourth order partial differential equations (PDEs) on complex geometric domains, provided that the given patches are G1 continuous. Both methods have their advantages. In particular, the B´ezier approach offer more degree of freedoms, while the spline approach is more computationally efficient. In addition, we proposed partial degree elevation to overcome the C1-locking issue caused by the over constraining of the solution space. We demonstrate the potential of the resulting C1 basis functions for application in IGA which involve fourth order PDEs such as those appearing in Kirchhoff-Love shell models, Cahn-Hilliard phase field application, and biharmonic problems.}, subject = {Modellierung}, language = {en} } @article{ChowdhuryZabel, author = {Chowdhury, Sharmistha and Zabel, Volkmar}, title = {Influence of loading sequence on wind induced fatigue assessment of bolts in TV-tower connection block}, series = {Results in Engineering}, volume = {2022}, journal = {Results in Engineering}, number = {Volume 16, article 100603}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.rineng.2022.100603}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221028-47303}, pages = {1 -- 18}, abstract = {Bolted connections are widely employed in structures like transmission poles, wind turbines, and television (TV) towers. The behaviour of bolted connections is often complex and plays a significant role in the overall dynamic characteristics of the structure. The goal of this work is to conduct a fatigue lifecycle assessment of such a bolted connection block of a 193 m tall TV tower, for which 205 days of real measurement data have been obtained from the installed monitoring devices. Based on the recorded data, the best-fit stochastic wind distribution for 50 years, the decisive wind action, and the locations to carry out the fatigue analysis have been decided. A 3D beam model of the entire tower is developed to extract the nodal forces corresponding to the connection block location under various mean wind speeds, which is later coupled with a detailed complex finite element model of the connection block, with over three million degrees of freedom, for acquiring stress histories on some pre-selected bolts. The random stress histories are analysed using the rainflow counting algorithm (RCA) and the damage is estimated using Palmgren-Miner's damage accumulation law. A modification is proposed to integrate the loading sequence effect into the RCA, which otherwise is ignored, and the differences between the two RCAs are investigated in terms of the accumulated damage.}, subject = {Schadensakkumulation}, language = {en} } @article{DehghaniSalehiMosavietal., author = {Dehghani, Majid and Salehi, Somayeh and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Ghamisi, Pedram}, title = {Spatial Analysis of Seasonal Precipitation over Iran: Co-Variation with Climate Indices}, series = {ISPRS, International Journal of Geo-Information}, volume = {2020}, journal = {ISPRS, International Journal of Geo-Information}, number = {Volume 9, Issue 2, 73}, publisher = {MDPI}, doi = {10.3390/ijgi9020073}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40740}, pages = {23}, abstract = {Temporary changes in precipitation may lead to sustained and severe drought or massive floods in different parts of the world. Knowing the variation in precipitation can effectively help the water resources decision-makers in water resources management. Large-scale circulation drivers have a considerable impact on precipitation in different parts of the world. In this research, the impact of El Ni{\~n}o-Southern Oscillation (ENSO), Pacific Decadal Oscillation (PDO), and North Atlantic Oscillation (NAO) on seasonal precipitation over Iran was investigated. For this purpose, 103 synoptic stations with at least 30 years of data were utilized. The Spearman correlation coefficient between the indices in the previous 12 months with seasonal precipitation was calculated, and the meaningful correlations were extracted. Then, the month in which each of these indices has the highest correlation with seasonal precipitation was determined. Finally, the overall amount of increase or decrease in seasonal precipitation due to each of these indices was calculated. Results indicate the Southern Oscillation Index (SOI), NAO, and PDO have the most impact on seasonal precipitation, respectively. Additionally, these indices have the highest impact on the precipitation in winter, autumn, spring, and summer, respectively. SOI has a diverse impact on winter precipitation compared to the PDO and NAO, while in the other seasons, each index has its special impact on seasonal precipitation. Generally, all indices in different phases may decrease the seasonal precipitation up to 100\%. However, the seasonal precipitation may increase more than 100\% in different seasons due to the impact of these indices. The results of this study can be used effectively in water resources management and especially in dam operation.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Eckardt2009, author = {Eckardt, Stefan}, title = {Adaptive heterogeneous multiscale models for the nonlinear simulation of concrete}, doi = {10.25643/bauhaus-universitaet.1416}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20100317-15023}, school = {Bauhaus-Universit{\"a}t Weimar}, year = {2009}, abstract = {The nonlinear behavior of concrete can be attributed to the propagation of microcracks within the heterogeneous internal material structure. In this thesis, a mesoscale model is developed which allows for the explicit simulation of these microcracks. Consequently, the actual physical phenomena causing the complex nonlinear macroscopic behavior of concrete can be represented using rather simple material formulations. On the mesoscale, the numerical model explicitly resolves the components of the internal material structure. For concrete, a three-phase model consisting of aggregates, mortar matrix and interfacial transition zone is proposed. Based on prescribed grading curves, an efficient algorithm for the generation of three-dimensional aggregate distributions using ellipsoids is presented. In the numerical model, tensile failure of the mortar matrix is described using a continuum damage approach. In order to reduce spurious mesh sensitivities, introduced by the softening behavior of the matrix material, nonlocal integral-type material formulations are applied. The propagation of cracks at the interface between aggregates and mortar matrix is represented in a discrete way using a cohesive crack approach. The iterative solution procedure is stabilized using a new path following constraint within the framework of load-displacement-constraint methods which allows for an efficient representation of snap-back phenomena. In several examples, the influence of the randomly generated heterogeneous material structure on the stochastic scatter of the results is analyzed. Furthermore, the ability of mesoscale models to represent size effects is investigated. Mesoscale simulations require the discretization of the internal material structure. Compared to simulations on the macroscale, the numerical effort and the memory demand increases dramatically. Due to the complexity of the numerical model, mesoscale simulations are, in general, limited to small specimens. In this thesis, an adaptive heterogeneous multiscale approach is presented which allows for the incorporation of mesoscale models within nonlinear simulations of concrete structures. In heterogeneous multiscale models, only critical regions, i.e. regions in which damage develops, are resolved on the mesoscale, whereas undamaged or sparsely damage regions are modeled on the macroscale. A crucial point in simulations with heterogeneous multiscale models is the coupling of sub-domains discretized on different length scales. The sub-domains differ not only in the size of the finite elements but also in the constitutive description. In this thesis, different methods for the coupling of non-matching discretizations - constraint equations, the mortar method and the arlequin method - are investigated and the application to heterogeneous multiscale models is presented. Another important point is the detection of critical regions. An adaptive solution procedure allowing the transfer of macroscale sub-domains to the mesoscale is proposed. In this context, several indicators which trigger the model adaptation are introduced. Finally, the application of the proposed adaptive heterogeneous multiscale approach in nonlinear simulations of concrete structures is presented.}, subject = {Beton}, language = {en} } @inproceedings{EckardtKoenke, author = {Eckardt, Stefan and K{\"o}nke, Carsten}, title = {ENERGY RELEASE CONTROL FOR NONLINEAR MESOSCALE SIMULATIONS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2841}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28414}, pages = {5}, abstract = {In nonlinear simulations the loading is, in general, applied in an incremental way. Path-following algorithms are used to trace the equilibrium path during the failure process. Standard displacement controlled solution strategies fail if snap-back phenomena occur. In this contribution, a path-following algorithm based on the dissipation of the inelastic energy is presented which allows for the simulation of snap-backs. Since the constraint is defined in terms of the internal energy, the algorithm is not restricted to continuum damage models. Furthermore, no a priori knowledge about the final damage distribution is required. The performance of the proposed algorithm is illustrated using nonlinear mesoscale simulations.}, subject = {Angewandte Informatik}, language = {en} } @inproceedings{EckardtKoenke, author = {Eckardt, Stefan and K{\"o}nke, Carsten}, title = {ADAPTIVE SIMULATION OF THE DAMAGE BEHAVIOR OF CONCRETE USING HETEROGENEOUS MULTISCALE MODELS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2947}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29478}, pages = {15}, abstract = {In this paper an adaptive heterogeneous multiscale model, which couples two substructures with different length scales into one numerical model is introduced for the simulation of damage in concrete. In the presented approach the initiation, propagation and coalescence of microcracks is simulated using a mesoscale model, which explicitly represents the heterogeneous material structure of concrete. The mesoscale model is restricted to the damaged parts of the structure, whereas the undamaged regions are simulated on the macroscale. As a result an adaptive enlargement of the mesoscale model during the simulation is necessary. In the first part of the paper the generation of the heterogeneous mesoscopic structure of concrete, the finite element discretization of the mesoscale model, the applied isotropic damage model and the cohesive zone model are briefly introduced. Furthermore the mesoscale simulation of a uniaxial tension test of a concrete prism is presented and own obtained numerical results are compared to experimental results. The second part is focused on the adaptive heterogeneous multiscale approach. Indicators for the model adaptation and for the coupling between the different numerical models will be introduced. The transfer from the macroscale to the mesoscale and the adaptive enlargement of the mesoscale substructure will be presented in detail. A nonlinear simulation of a realistic structure using an adaptive heterogeneous multiscale model is presented at the end of the paper to show the applicability of the proposed approach to large-scale structures.}, subject = {Architektur }, language = {en} } @article{FaizollahzadehArdabiliNajafiAlizamiretal., author = {Faizollahzadeh Ardabili, Sina and Najafi, Bahman and Alizamir, Meysam and Mosavi, Amir and Shamshirband, Shahaboddin and Rabczuk, Timon}, title = {Using SVM-RSM and ELM-RSM Approaches for Optimizing the Production Process of Methyl and Ethyl Esters}, series = {Energies}, journal = {Energies}, number = {11, 2889}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11112889}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181025-38170}, pages = {1 -- 20}, abstract = {The production of a desired product needs an effective use of the experimental model. The present study proposes an extreme learning machine (ELM) and a support vector machine (SVM) integrated with the response surface methodology (RSM) to solve the complexity in optimization and prediction of the ethyl ester and methyl ester production process. The novel hybrid models of ELM-RSM and ELM-SVM are further used as a case study to estimate the yield of methyl and ethyl esters through a trans-esterification process from waste cooking oil (WCO) based on American Society for Testing and Materials (ASTM) standards. The results of the prediction phase were also compared with artificial neural networks (ANNs) and adaptive neuro-fuzzy inference system (ANFIS), which were recently developed by the second author of this study. Based on the results, an ELM with a correlation coefficient of 0.9815 and 0.9863 for methyl and ethyl esters, respectively, had a high estimation capability compared with that for SVM, ANNs, and ANFIS. Accordingly, the maximum production yield was obtained in the case of using ELM-RSM of 96.86\% for ethyl ester at a temperature of 68.48 °C, a catalyst value of 1.15 wt. \%, mixing intensity of 650.07 rpm, and an alcohol to oil molar ratio (A/O) of 5.77; for methyl ester, the production yield was 98.46\% at a temperature of 67.62 °C, a catalyst value of 1.1 wt. \%, mixing intensity of 709.42 rpm, and an A/O of 6.09. Therefore, ELM-RSM increased the production yield by 3.6\% for ethyl ester and 3.1\% for methyl ester, compared with those for the experimental data.}, subject = {Biodiesel}, language = {en} } @article{FaridmehrTahirLahmer, author = {Faridmehr, Iman and Tahir, Mamood Md. and Lahmer, Tom}, title = {Classification System for Semi-Rigid Beam-to-Column Connections}, series = {LATIN AMERICAN JOURNAL OF SOLIDS AND STRUCTURES 11}, journal = {LATIN AMERICAN JOURNAL OF SOLIDS AND STRUCTURES 11}, doi = {10.1590/1679-78252595}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170401-30988}, pages = {2152 -- 2175}, abstract = {The current study attempts to recognise an adequate classification for a semi-rigid beam-to-column connection by investigating strength, stiffness and ductility. For this purpose, an experimental test was carried out to investigate the moment-rotation (M-theta) features of flush end-plate (FEP) connections including variable parameters like size and number of bolts, thickness of end-plate, and finally, size of beams and columns. The initial elastic stiffness and ultimate moment capacity of connections were determined by an extensive analytical procedure from the proposed method prescribed by ANSI/AISC 360-10, and Eurocode 3 Part 1-8 specifications. The behaviour of beams with partially restrained or semi-rigid connections were also studied by incorporating classical analysis methods. The results confirmed that thickness of the column flange and end-plate substantially govern over the initial rotational stiffness of of flush end-plate connections. The results also clearly showed that EC3 provided a more reliable classification index for flush end-plate (FEP) connections. The findings from this study make significant contributions to the current literature as the actual response characteristics of such connections are non-linear. Therefore, such semirigid behaviour should be used to for an analysis and design method.}, subject = {Tragf{\"a}higkeit}, language = {en} } @article{FaroughiKarimimoshaverArametal., author = {Faroughi, Maryam and Karimimoshaver, Mehrdad and Aram, Farshid and Solgi, Ebrahim and Mosavi, Amir and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Computational modeling of land surface temperature using remote sensing data to investigate the spatial arrangement of buildings and energy consumption relationship}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {https://doi.org/10.1080/19942060.2019.1707711}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200110-40585}, pages = {254 -- 270}, abstract = {The effect of urban form on energy consumption has been the subject of various studies around the world. Having examined the effect of buildings on energy consumption, these studies indicate that the physical form of a city has a notable impact on the amount of energy consumed in its spaces. The present study identified the variables that affected energy consumption in residential buildings and analyzed their effects on energy consumption in four neighborhoods in Tehran: Apadana, Bimeh, Ekbatan-phase I, and Ekbatan-phase II. After extracting the variables, their effects are estimated with statistical methods, and the results are compared with the land surface temperature (LST) remote sensing data derived from Landsat 8 satellite images taken in the winter of 2019. The results showed that physical variables, such as the size of buildings, population density, vegetation cover, texture concentration, and surface color, have the greatest impacts on energy usage. For the Apadana neighborhood, the factors with the most potent effect on energy consumption were found to be the size of buildings and the population density. However, for other neighborhoods, in addition to these two factors, a third factor was also recognized to have a significant effect on energy consumption. This third factor for the Bimeh, Ekbatan-I, and Ekbatan-II neighborhoods was the type of buildings, texture concentration, and orientation of buildings, respectively.}, subject = {Fernerkung}, language = {en} } @article{FathiSajadzadehMohammadiSheshkaletal., author = {Fathi, Sadegh and Sajadzadeh, Hassan and Mohammadi Sheshkal, Faezeh and Aram, Farshid and Pinter, Gergo and Felde, Imre and Mosavi, Amir}, title = {The Role of Urban Morphology Design on Enhancing Physical Activity and Public Health}, series = {International Journal of Environmental Research and Public Health}, volume = {2020}, journal = {International Journal of Environmental Research and Public Health}, number = {Volume 17, Issue 7, 2359}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/ijerph17072359}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200402-41225}, pages = {29}, abstract = {Along with environmental pollution, urban planning has been connected to public health. The research indicates that the quality of built environments plays an important role in reducing mental disorders and overall health. The structure and shape of the city are considered as one of the factors influencing happiness and health in urban communities and the type of the daily activities of citizens. The aim of this study was to promote physical activity in the main structure of the city via urban design in a way that the main form and morphology of the city can encourage citizens to move around and have physical activity within the city. Functional, physical, cultural-social, and perceptual-visual features are regarded as the most important and effective criteria in increasing physical activities in urban spaces, based on literature review. The environmental quality of urban spaces and their role in the physical activities of citizens in urban spaces were assessed by using the questionnaire tool and analytical network process (ANP) of structural equation modeling. Further, the space syntax method was utilized to evaluate the role of the spatial integration of urban spaces on improving physical activities. Based on the results, consideration of functional diversity, spatial flexibility and integration, security, and the aesthetic and visual quality of urban spaces plays an important role in improving the physical health of citizens in urban spaces. Further, more physical activities, including motivation for walking and the sense of public health and happiness, were observed in the streets having higher linkage and space syntax indexes with their surrounding texture.}, subject = {Morphologie}, language = {en} } @phdthesis{Ghasemi, author = {Ghasemi, Hamid}, title = {Stochastic optimization of fiber reinforced composites considering uncertainties}, doi = {10.25643/bauhaus-universitaet.2704}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20161117-27042}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {140}, abstract = {Briefly, the two basic questions that this research is supposed to answer are: 1. Howmuch fiber is needed and how fibers should be distributed through a fiber reinforced composite (FRC) structure in order to obtain the optimal and reliable structural response? 2. How do uncertainties influence the optimization results and reliability of the structure? Giving answer to the above questions a double stage sequential optimization algorithm for finding the optimal content of short fiber reinforcements and their distribution in the composite structure, considering uncertain design parameters, is presented. In the first stage, the optimal amount of short fibers in a FRC structure with uniformly distributed fibers is conducted in the framework of a Reliability Based Design Optimization (RBDO) problem. Presented model considers material, structural and modeling uncertainties. In the second stage, the fiber distribution optimization (with the aim to further increase in structural reliability) is performed by defining a fiber distribution function through a Non-Uniform Rational BSpline (NURBS) surface. The advantages of using the NURBS surface as a fiber distribution function include: using the same data set for the optimization and analysis; high convergence rate due to the smoothness of the NURBS; mesh independency of the optimal layout; no need for any post processing technique and its non-heuristic nature. The output of stage 1 (the optimal fiber content for homogeneously distributed fibers) is considered as the input of stage 2. The output of stage 2 is the Reliability Index (b ) of the structure with the optimal fiber content and distribution. First order reliability method (in order to approximate the limit state function) as well as different material models including Rule of Mixtures, Mori-Tanaka, energy-based approach and stochastic multi-scales are implemented in different examples. The proposed combined model is able to capture the role of available uncertainties in FRC structures through a computationally efficient algorithm using all sequential, NURBS and sensitivity based techniques. The methodology is successfully implemented for interfacial shear stress optimization in sandwich beams and also for optimization of the internal cooling channels in a ceramic matrix composite. Finally, after some changes and modifications by combining Isogeometric Analysis, level set and point wise density mapping techniques, the computational framework is extended for topology optimization of piezoelectric / flexoelectric materials.}, subject = {Finite-Elemente-Methode}, language = {en} } @article{GhazvineiDarvishiMosavietal., author = {Ghazvinei, Pezhman Taherei and Darvishi, Hossein Hassanpour and Mosavi, Amir and Yusof, Khamaruzaman bin Wan and Alizamir, Meysam and Shamshirband, Shahaboddin and Chau, Kwok-Wing}, title = {Sugarcane growth prediction based on meteorological parameters using extreme learning machine and artificial neural network}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2018}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {12,1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2018.1526119}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181017-38129}, pages = {738 -- 749}, abstract = {Management strategies for sustainable sugarcane production need to deal with the increasing complexity and variability of the whole sugar system. Moreover, they need to accommodate the multiple goals of different industry sectors and the wider community. Traditional disciplinary approaches are unable to provide integrated management solutions, and an approach based on whole systems analysis is essential to bring about beneficial change to industry and the community. The application of this approach to water management, environmental management and cane supply management is outlined, where the literature indicates that the application of extreme learning machine (ELM) has never been explored in this realm. Consequently, the leading objective of the current research was set to filling this gap by applying ELM to launch swift and accurate model for crop production data-driven. The key learning has been the need for innovation both in the technical aspects of system function underpinned by modelling of sugarcane growth. Therefore, the current study is an attempt to establish an integrate model using ELM to predict the concluding growth amount of sugarcane. Prediction results were evaluated and further compared with artificial neural network (ANN) and genetic programming models. Accuracy of the ELM model is calculated using the statistics indicators of Root Means Square Error (RMSE), Pearson Coefficient (r), and Coefficient of Determination (R2) with promising results of 0.8, 0.47, and 0.89, respectively. The results also show better generalization ability in addition to faster learning curve. Thus, proficiency of the ELM for supplementary work on advancement of prediction model for sugarcane growth was approved with promising results.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @phdthesis{Goswami, author = {Goswami, Somdatta}, title = {Phase field modeling of fracture with isogeometric analysis and machine learning methods}, doi = {10.25643/bauhaus-universitaet.4384}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210304-43841}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {168}, abstract = {This thesis presents the advances and applications of phase field modeling in fracture analysis. In this approach, the sharp crack surface topology in a solid is approximated by a diffusive crack zone governed by a scalar auxiliary variable. The uniqueness of phase field modeling is that the crack paths are automatically determined as part of the solution and no interface tracking is required. The damage parameter varies continuously over the domain. But this flexibility comes with associated difficulties: (1) a very fine spatial discretization is required to represent sharp local gradients correctly; (2) fine discretization results in high computational cost; (3) computation of higher-order derivatives for improved convergence rates and (4) curse of dimensionality in conventional numerical integration techniques. As a consequence, the practical applicability of phase field models is severely limited. The research presented in this thesis addresses the difficulties of the conventional numerical integration techniques for phase field modeling in quasi-static brittle fracture analysis. The first method relies on polynomial splines over hierarchical T-meshes (PHT-splines) in the framework of isogeometric analysis (IGA). An adaptive h-refinement scheme is developed based on the variational energy formulation of phase field modeling. The fourth-order phase field model provides increased regularity in the exact solution of the phase field equation and improved convergence rates for numerical solutions on a coarser discretization, compared to the second-order model. However, second-order derivatives of the phase field are required in the fourth-order model. Hence, at least a minimum of C1 continuous basis functions are essential, which is achieved using hierarchical cubic B-splines in IGA. PHT-splines enable the refinement to remain local at singularities and high gradients, consequently reducing the computational cost greatly. Unfortunately, when modeling complex geometries, multiple parameter spaces (patches) are joined together to describe the physical domain and there is typically a loss of continuity at the patch boundaries. This decrease of smoothness is dictated by the geometry description, where C0 parameterizations are normally used to deal with kinks and corners in the domain. Hence, the application of the fourth-order model is severely restricted. To overcome the high computational cost for the second-order model, we develop a dual-mesh adaptive h-refinement approach. This approach uses a coarser discretization for the elastic field and a finer discretization for the phase field. Independent refinement strategies have been used for each field. The next contribution is based on physics informed deep neural networks. The network is trained based on the minimization of the variational energy of the system described by general non-linear partial differential equations while respecting any given law of physics, hence the name physics informed neural network (PINN). The developed approach needs only a set of points to define the geometry, contrary to the conventional mesh-based discretization techniques. The concept of `transfer learning' is integrated with the developed PINN approach to improve the computational efficiency of the network at each displacement step. This approach allows a numerically stable crack growth even with larger displacement steps. An adaptive h-refinement scheme based on the generation of more quadrature points in the damage zone is developed in this framework. For all the developed methods, displacement-controlled loading is considered. The accuracy and the efficiency of both methods are studied numerically showing that the developed methods are powerful and computationally efficient tools for accurately predicting fractures.}, subject = {Phasenfeldmodell}, language = {en} } @article{GuoZhuangChenetal., author = {Guo, Hongwei and Zhuang, Xiaoying and Chen, Pengwan and Alajlan, Naif and Rabczuk, Timon}, title = {Analysis of three-dimensional potential problems in non-homogeneous media with physics-informed deep collocation method using material transfer learning and sensitivity analysis}, series = {Engineering with Computers}, volume = {2022}, journal = {Engineering with Computers}, doi = {10.1007/s00366-022-01633-6}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220811-46764}, pages = {1 -- 22}, abstract = {In this work, we present a deep collocation method (DCM) for three-dimensional potential problems in non-homogeneous media. This approach utilizes a physics-informed neural network with material transfer learning reducing the solution of the non-homogeneous partial differential equations to an optimization problem. We tested different configurations of the physics-informed neural network including smooth activation functions, sampling methods for collocation points generation and combined optimizers. A material transfer learning technique is utilized for non-homogeneous media with different material gradations and parameters, which enhance the generality and robustness of the proposed method. In order to identify the most influential parameters of the network configuration, we carried out a global sensitivity analysis. Finally, we provide a convergence proof of our DCM. The approach is validated through several benchmark problems, also testing different material variations.}, subject = {Deep learning}, language = {en} } @misc{Habtemariam, type = {Master Thesis}, author = {Habtemariam, Abinet Kifle}, title = {Numerical Demolition Analysis of a Slender Guyed Antenna Mast}, doi = {10.25643/bauhaus-universitaet.4460}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210723-44609}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {75}, abstract = {The main purpose of the thesis is to ensure the safe demolition of old guyed antenna masts that are located in different parts of Germany. The major problem in demolition of this masts is the falling down of the masts in unexpected direction because of buckling problem. The objective of this thesis is development of a numerical models using finite element method (FEM) and assuring a controlled collapse by coming up with different time setups for the detonation of explosives which are responsible for cutting down the cables. The result of this thesis will avoid unexpected outcomes during the demolition processes and prevent risk of collapsing of the mast over near by structures.}, subject = {Abbruch}, language = {en} } @phdthesis{Hamdia, author = {Hamdia, Khader}, title = {On the fracture toughness of polymeric nanocomposites: Comprehensive stochastic and numerical studies}, doi = {10.25643/bauhaus-universitaet.3765}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180712-37652}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Polymeric nanocomposites (PNCs) are considered for numerous nanotechnology such as: nano-biotechnology, nano-systems, nanoelectronics, and nano-structured materials. Commonly , they are formed by polymer (epoxy) matrix reinforced with a nanosized filler. The addition of rigid nanofillers to the epoxy matrix has offered great improvements in the fracture toughness without sacrificing other important thermo-mechanical properties. The physics of the fracture in PNCs is rather complicated and is influenced by different parameters. The presence of uncertainty in the predicted output is expected as a result of stochastic variance in the factors affecting the fracture mechanism. Consequently, evaluating the improved fracture toughness in PNCs is a challenging problem. Artificial neural network (ANN) and adaptive neuro-fuzzy inference system (ANFIS) have been employed to predict the fracture energy of polymer/particle nanocomposites. The ANN and ANFIS models were constructed, trained, and tested based on a collection of 115 experimental datasets gathered from the literature. The performance evaluation indices of the developed ANN and ANFIS showed relatively small error, with high coefficients of determination (R2), and low root mean square error and mean absolute percentage error. In the framework for uncertainty quantification of PNCs, a sensitivity analysis (SA) has been conducted to examine the influence of uncertain input parameters on the fracture toughness of polymer/clay nanocomposites (PNCs). The phase-field approach is employed to predict the macroscopic properties of the composite considering six uncertain input parameters. The efficiency, robustness, and repeatability are compared and evaluated comprehensively for five different SA methods. The Bayesian method is applied to develop a methodology in order to evaluate the performance of different analytical models used in predicting the fracture toughness of polymeric particles nanocomposites. The developed method have considered the model and parameters uncertainties based on different reference data (experimental measurements) gained from the literature. Three analytical models differing in theory and assumptions were examined. The coefficients of variation of the model predictions to the measurements are calculated using the approximated optimal parameter sets. Then, the model selection probability is obtained with respect to the different reference data. Stochastic finite element modeling is implemented to predict the fracture toughness of polymer/particle nanocomposites. For this purpose, 2D finite element model containing an epoxy matrix and rigid nanoparticles surrounded by an interphase zone is generated. The crack propagation is simulated by the cohesive segments method and phantom nodes. Considering the uncertainties in the input parameters, a polynomial chaos expansion (PCE) surrogate model is construed followed by a sensitivity analysis.}, subject = {Bruch}, language = {en} } @phdthesis{Hanna, author = {Hanna, John}, title = {Computational Fracture Modeling and Design of Encapsulation-Based Self-Healing Concrete Using XFEM and Cohesive Surface Technique}, doi = {10.25643/bauhaus-universitaet.4746}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221124-47467}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {125}, abstract = {Encapsulation-based self-healing concrete (SHC) is the most promising technique for providing a self-healing mechanism to concrete. This is due to its capacity to heal fractures effectively without human interventions, extending the operational life and lowering maintenance costs. The healing mechanism is created by embedding capsules containing the healing agent inside the concrete. The healing agent will be released once the capsules are fractured and the healing occurs in the vicinity of the damaged part. The healing efficiency of the SHC is still not clear and depends on several factors; in the case of microcapsules SHC the fracture of microcapsules is the most important aspect to release the healing agents and hence heal the cracks. This study contributes to verifying the healing efficiency of SHC and the fracture mechanism of the microcapsules. Extended finite element method (XFEM) is a flexible, and powerful discrete crack method that allows crack propagation without the requirement for re-meshing and has been shown high accuracy for modeling fracture in concrete. In this thesis, a computational fracture modeling approach of Encapsulation-based SHC is proposed based on the XFEM and cohesive surface technique (CS) to study the healing efficiency and the potential of fracture and debonding of the microcapsules or the solidified healing agents from the concrete matrix as well. The concrete matrix and a microcapsule shell both are modeled by the XFEM and combined together by CS. The effects of the healed-crack length, the interfacial fracture properties, and microcapsule size on the load carrying capability and fracture pattern of the SHC have been studied. The obtained results are compared to those obtained from the zero thickness cohesive element approach to demonstrate the significant accuracy and the validity of the proposed simulation. The present fracture simulation is developed to study the influence of the capsular clustering on the fracture mechanism by varying the contact surface area of the CS between the microcapsule shell and the concrete matrix. The proposed fracture simulation is expanded to 3D simulations to validate the 2D computational simulations and to estimate the accuracy difference ratio between 2D and 3D simulations. In addition, a proposed design method is developed to design the size of the microcapsules consideration of a sufficient volume of healing agent to heal the expected crack width. This method is based on the configuration of the unit cell (UC), Representative Volume Element (RVE), Periodic Boundary Conditions (PBC), and associated them to the volume fraction (Vf) and the crack width as variables. The proposed microcapsule design is verified through computational fracture simulations.}, subject = {Beton}, language = {en} } @article{Hanna, author = {Hanna, John}, title = {Computational Modelling for the Effects of Capsular Clustering on Fracture of Encapsulation-Based Self-Healing Concrete Using XFEM and Cohesive Surface Technique}, series = {Applied Sciences}, volume = {2022}, journal = {Applied Sciences}, number = {Volume 12, issue 10, article 5112}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app12105112}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220721-46717}, pages = {1 -- 17}, abstract = {The fracture of microcapsules is an important issue to release the healing agent for healing the cracks in encapsulation-based self-healing concrete. The capsular clustering generated from the concrete mixing process is considered one of the critical factors in the fracture mechanism. Since there is a lack of studies in the literature regarding this issue, the design of self-healing concrete cannot be made without an appropriate modelling strategy. In this paper, the effects of microcapsule size and clustering on the fractured microcapsules are studied computationally. A simple 2D computational modelling approach is developed based on the eXtended Finite Element Method (XFEM) and cohesive surface technique. The proposed model shows that the microcapsule size and clustering have significant roles in governing the load-carrying capacity and the crack propagation pattern and determines whether the microcapsule will be fractured or debonded from the concrete matrix. The higher the microcapsule circumferential contact length, the higher the load-carrying capacity. When it is lower than 25\% of the microcapsule circumference, it will result in a greater possibility for the debonding of the microcapsule from the concrete. The greater the core/shell ratio (smaller shell thickness), the greater the likelihood of microcapsules being fractured.}, subject = {Beton}, language = {en} } @phdthesis{Harirchian, author = {Harirchian, Ehsan}, title = {Improved Rapid Assessment of Earthquake Hazard Safety of Existing Buildings Using a Hierarchical Type-2 Fuzzy Logic Model}, doi = {10.25643/bauhaus-universitaet.4396}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210326-43963}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {143}, abstract = {Although it is impractical to avert subsequent natural disasters, advances in simulation science and seismological studies make it possible to lessen the catastrophic damage. There currently exists in many urban areas a large number of structures, which are prone to damage by earthquakes. These were constructed without the guidance of a national seismic code, either before it existed or before it was enforced. For instance, in Istanbul, Turkey, as a high seismic area, around 90\% of buildings are substandard, which can be generalized into other earthquakeprone regions in Turkey. The reliability of this building stock resulting from earthquake-induced collapse is currently uncertain. Nonetheless, it is also not feasible to perform a detailed seismic vulnerability analysis on each building as a solution to the scenario, as it will be too complicated and expensive. This indicates the necessity of a reliable, rapid, and computationally easy method for seismic vulnerability assessment, commonly known as Rapid Visual Screening (RVS). In RVS methodology, an observational survey of buildings is performed, and according to the data collected during the visual inspection, a structural score is calculated without performing any structural calculations to determine the expected damage of a building and whether the building needs detailed assessment. Although this method might save time and resources due to the subjective/qualitative judgments of experts who performed the inspection, the evaluation process is dominated by vagueness and uncertainties, where the vagueness can be handled adequately through the fuzzy set theory but do not cover all sort of uncertainties due to its crisp membership functions. In this study, a novel method of rapid visual hazard safety assessment of buildings against earthquake is introduced in which an interval type-2 fuzzy logic system (IT2FLS) is used to cover uncertainties. In addition, the proposed method provides the possibility to evaluate the earthquake risk of the building by considering factors related to the building importance and exposure. A smartphone app prototype of the method has been introduced. For validation of the proposed method, two case studies have been selected, and the result of the analysis presents the robust efficiency of the proposed method.}, subject = {Fuzzy-Logik}, language = {en} } @article{HarirchianIsik, author = {Harirchian, Ehsan and Isik, Ercan}, title = {A Comparative Probabilistic Seismic Hazard Analysis for Eastern Turkey (Bitlis) Based on Updated Hazard Map and Its Effect on Regular RC Structures}, series = {Buildings}, volume = {2022}, journal = {Buildings}, number = {Volume 12, issue 10, article 1573}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/buildings12101573}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221028-47283}, pages = {1 -- 19}, abstract = {Determining the earthquake hazard of any settlement is one of the primary studies for reducing earthquake damage. Therefore, earthquake hazard maps used for this purpose must be renewed over time. Turkey Earthquake Hazard Map has been used instead of Turkey Earthquake Zones Map since 2019. A probabilistic seismic hazard was performed by using these last two maps and different attenuation relationships for Bitlis Province (Eastern Turkey) were located in the Lake Van Basin, which has a high seismic risk. The earthquake parameters were determined by considering all districts and neighborhoods in the province. Probabilistic seismic hazard analyses were carried out for these settlements using seismic sources and four different attenuation relationships. The obtained values are compared with the design spectrum stated in the last two earthquake maps. Significant differences exist between the design spectrum obtained according to the different exceedance probabilities. In this study, adaptive pushover analyses of sample-reinforced concrete buildings were performed using the design ground motion level. Structural analyses were carried out using three different design spectra, as given in the last two seismic design codes and the mean spectrum obtained from attenuation relationships. Different design spectra significantly change the target displacements predicted for the performance levels of the buildings.}, subject = {Erbeben}, language = {en} } @article{HarirchianJadhavMohammadetal., author = {Harirchian, Ehsan and Jadhav, Kirti and Mohammad, Kifaytullah and Aghakouchaki Hosseini, Seyed Ehsan and Lahmer, Tom}, title = {A Comparative Study of MCDM Methods Integrated with Rapid Visual Seismic Vulnerability Assessment of Existing RC Structures}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 18, article 6411}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10186411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200918-42360}, pages = {24}, abstract = {Recently, the demand for residence and usage of urban infrastructure has been increased, thereby resulting in the elevation of risk levels of human lives over natural calamities. The occupancy demand has rapidly increased the construction rate, whereas the inadequate design of structures prone to more vulnerability. Buildings constructed before the development of seismic codes have an additional susceptibility to earthquake vibrations. The structural collapse causes an economic loss as well as setbacks for human lives. An application of different theoretical methods to analyze the structural behavior is expensive and time-consuming. Therefore, introducing a rapid vulnerability assessment method to check structural performances is necessary for future developments. The process, as mentioned earlier, is known as Rapid Visual Screening (RVS). This technique has been generated to identify, inventory, and screen structures that are potentially hazardous. Sometimes, poor construction quality does not provide some of the required parameters; in this case, the RVS process turns into a tedious scenario. Hence, to tackle such a situation, multiple-criteria decision-making (MCDM) methods for the seismic vulnerability assessment opens a new gateway. The different parameters required by RVS can be taken in MCDM. MCDM evaluates multiple conflicting criteria in decision making in several fields. This paper has aimed to bridge the gap between RVS and MCDM. Furthermore, to define the correlation between these techniques, implementation of the methodologies from Indian, Turkish, and Federal Emergency Management Agency (FEMA) codes has been done. The effects of seismic vulnerability of structures have been observed and compared.}, subject = {Erdbebensicherheit}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Raj Das, Rohan and Rasulzade, Shahla and Lahmer, Tom}, title = {A Machine Learning Framework for Assessing Seismic Hazard Safety of Reinforced Concrete Buildings}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7153}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207153}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42744}, pages = {18}, abstract = {Although averting a seismic disturbance and its physical, social, and economic disruption is practically impossible, using the advancements in computational science and numerical modeling shall equip humanity to predict its severity, understand the outcomes, and equip for post-disaster management. Many buildings exist amidst the developed metropolitan areas, which are senile and still in service. These buildings were also designed before establishing national seismic codes or without the introduction of construction regulations. In that case, risk reduction is significant for developing alternatives and designing suitable models to enhance the existing structure's performance. Such models will be able to classify risks and casualties related to possible earthquakes through emergency preparation. Thus, it is crucial to recognize structures that are susceptible to earthquake vibrations and need to be prioritized for retrofitting. However, each building's behavior under seismic actions cannot be studied through performing structural analysis, as it might be unrealistic because of the rigorous computations, long period, and substantial expenditure. Therefore, it calls for a simple, reliable, and accurate process known as Rapid Visual Screening (RVS), which serves as a primary screening platform, including an optimum number of seismic parameters and predetermined performance damage conditions for structures. In this study, the damage classification technique was studied, and the efficacy of the Machine Learning (ML) method in damage prediction via a Support Vector Machine (SVM) model was explored. The ML model is trained and tested separately on damage data from four different earthquakes, namely Ecuador, Haiti, Nepal, and South Korea. Each dataset consists of varying numbers of input data and eight performance modifiers. Based on the study and the results, the ML model using SVM classifies the given input data into the belonging classes and accomplishes the performance on hazard safety evaluation of buildings.}, subject = {Erdbeben}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Rasulzade, Shahla and Lahmer, Tom and Raj Das, Rohan}, title = {A Synthesized Study Based on Machine Learning Approaches for Rapid Classifying Earthquake Damage Grades to RC Buildings}, series = {Applied Sciences}, volume = {2021}, journal = {Applied Sciences}, number = {Volume 11, issue 16, article 7540}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app11167540}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210818-44853}, pages = {1 -- 33}, abstract = {A vast number of existing buildings were constructed before the development and enforcement of seismic design codes, which run into the risk of being severely damaged under the action of seismic excitations. This poses not only a threat to the life of people but also affects the socio-economic stability in the affected area. Therefore, it is necessary to assess such buildings' present vulnerability to make an educated decision regarding risk mitigation by seismic strengthening techniques such as retrofitting. However, it is economically and timely manner not feasible to inspect, repair, and augment every old building on an urban scale. As a result, a reliable rapid screening methods, namely Rapid Visual Screening (RVS), have garnered increasing interest among researchers and decision-makers alike. In this study, the effectiveness of five different Machine Learning (ML) techniques in vulnerability prediction applications have been investigated. The damage data of four different earthquakes from Ecuador, Haiti, Nepal, and South Korea, have been utilized to train and test the developed models. Eight performance modifiers have been implemented as variables with a supervised ML. The investigations on this paper illustrate that the assessed vulnerability classes by ML techniques were very close to the actual damage levels observed in the buildings.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianLahmer, author = {Harirchian, Ehsan and Lahmer, Tom}, title = {Improved Rapid Visual Earthquake Hazard Safety Evaluation of Existing Buildings Using a Type-2 Fuzzy Logic Model}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, Issue 3, 2375}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10072375}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200331-41161}, pages = {14}, abstract = {Rapid Visual Screening (RVS) is a procedure that estimates structural scores for buildings and prioritizes their retrofit and upgrade requirements. Despite the speed and simplicity of RVS, many of the collected parameters are non-commensurable and include subjectivity due to visual observations. This might cause uncertainties in the evaluation, which emphasizes the use of a fuzzy-based method. This study aims to propose a novel RVS methodology based on the interval type-2 fuzzy logic system (IT2FLS) to set the priority of vulnerable building to undergo detailed assessment while covering uncertainties and minimizing their effects during evaluation. The proposed method estimates the vulnerability of a building, in terms of Damage Index, considering the number of stories, age of building, plan irregularity, vertical irregularity, building quality, and peak ground velocity, as inputs with a single output variable. Applicability of the proposed method has been investigated using a post-earthquake damage database of reinforced concrete buildings from the Bing{\"o}l and D{\"u}zce earthquakes in Turkey.}, subject = {Fuzzy-Logik}, language = {en} } @article{HarirchianLahmerBuddhirajuetal., author = {Harirchian, Ehsan and Lahmer, Tom and Buddhiraju, Sreekanth and Mohammad, Kifaytullah and Mosavi, Amir}, title = {Earthquake Safety Assessment of Buildings through Rapid Visual Screening}, series = {Buildings}, volume = {2020}, journal = {Buildings}, number = {Volume 10, Issue 3}, publisher = {MDPI}, doi = {10.3390/buildings10030051}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200331-41153}, pages = {15}, abstract = {Earthquake is among the most devastating natural disasters causing severe economical, environmental, and social destruction. Earthquake safety assessment and building hazard monitoring can highly contribute to urban sustainability through identification and insight into optimum materials and structures. While the vulnerability of structures mainly depends on the structural resistance, the safety assessment of buildings can be highly challenging. In this paper, we consider the Rapid Visual Screening (RVS) method, which is a qualitative procedure for estimating structural scores for buildings suitable for medium- to high-seismic cases. This paper presents an overview of the common RVS methods, i.e., FEMA P-154, IITK-GGSDMA, and EMPI. To examine the accuracy and validation, a practical comparison is performed between their assessment and observed damage of reinforced concrete buildings from a street survey in the Bing{\"o}l region, Turkey, after the 1 May 2003 earthquake. The results demonstrate that the application of RVS methods for preliminary damage estimation is a vital tool. Furthermore, the comparative analysis showed that FEMA P-154 creates an assessment that overestimates damage states and is not economically viable, while EMPI and IITK-GGSDMA provide more accurate and practical estimation, respectively.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianLahmerKumarietal., author = {Harirchian, Ehsan and Lahmer, Tom and Kumari, Vandana and Jadhav, Kirti}, title = {Application of Support Vector Machine Modeling for the Rapid Seismic Hazard Safety Evaluation of Existing Buildings}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {volume 13, issue 13, 3340}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13133340}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200707-41915}, pages = {15}, abstract = {The economic losses from earthquakes tend to hit the national economy considerably; therefore, models that are capable of estimating the vulnerability and losses of future earthquakes are highly consequential for emergency planners with the purpose of risk mitigation. This demands a mass prioritization filtering of structures to identify vulnerable buildings for retrofitting purposes. The application of advanced structural analysis on each building to study the earthquake response is impractical due to complex calculations, long computational time, and exorbitant cost. This exhibits the need for a fast, reliable, and rapid method, commonly known as Rapid Visual Screening (RVS). The method serves as a preliminary screening platform, using an optimum number of seismic parameters of the structure and predefined output damage states. In this study, the efficacy of the Machine Learning (ML) application in damage prediction through a Support Vector Machine (SVM) model as the damage classification technique has been investigated. The developed model was trained and examined based on damage data from the 1999 D{\"u}zce Earthquake in Turkey, where the building's data consists of 22 performance modifiers that have been implemented with supervised machine learning.}, subject = {Erdbeben}, language = {en} } @article{HarirchianLahmerRasulzade, author = {Harirchian, Ehsan and Lahmer, Tom and Rasulzade, Shahla}, title = {Earthquake Hazard Safety Assessment of Existing Buildings Using Optimized Multi-Layer Perceptron Neural Network}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {Volume 13, Issue 8, 2060}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13082060}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200504-41575}, pages = {16}, abstract = {The latest earthquakes have proven that several existing buildings, particularly in developing countries, are not secured from damages of earthquake. A variety of statistical and machine-learning approaches have been proposed to identify vulnerable buildings for the prioritization of retrofitting. The present work aims to investigate earthquake susceptibility through the combination of six building performance variables that can be used to obtain an optimal prediction of the damage state of reinforced concrete buildings using artificial neural network (ANN). In this regard, a multi-layer perceptron network is trained and optimized using a database of 484 damaged buildings from the D{\"u}zce earthquake in Turkey. The results demonstrate the feasibility and effectiveness of the selected ANN approach to classify concrete structural damage that can be used as a preliminary assessment technique to identify vulnerable buildings in disaster risk-management programs.}, subject = {Erdbeben}, language = {en} } @article{HassannatajJoloudariHassannatajJoloudariSaadatfaretal., author = {Hassannataj Joloudari, Javad and Hassannataj Joloudari, Edris and Saadatfar, Hamid and GhasemiGol, Mohammad and Razavi, Seyyed Mohammad and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Nadai, Laszlo}, title = {Coronary Artery Disease Diagnosis: Ranking the Significant Features Using a Random Trees Model}, series = {International Journal of Environmental Research and Public Health, IJERPH}, volume = {2020}, journal = {International Journal of Environmental Research and Public Health, IJERPH}, number = {Volume 17, Issue 3, 731}, publisher = {MDPI}, doi = {10.3390/ijerph17030731}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40819}, pages = {24}, abstract = {Heart disease is one of the most common diseases in middle-aged citizens. Among the vast number of heart diseases, coronary artery disease (CAD) is considered as a common cardiovascular disease with a high death rate. The most popular tool for diagnosing CAD is the use of medical imaging, e.g., angiography. However, angiography is known for being costly and also associated with a number of side effects. Hence, the purpose of this study is to increase the accuracy of coronary heart disease diagnosis through selecting significant predictive features in order of their ranking. In this study, we propose an integrated method using machine learning. The machine learning methods of random trees (RTs), decision tree of C5.0, support vector machine (SVM), and decision tree of Chi-squared automatic interaction detection (CHAID) are used in this study. The proposed method shows promising results and the study confirms that the RTs model outperforms other models.}, subject = {Maschinelles Lernen}, language = {en} } @misc{Herrmann, type = {Master Thesis}, author = {Herrmann, Annemarie}, title = {Investigation of buckling behavior of carbon fiber-reinforced composite shell structures with openings}, doi = {10.25643/bauhaus-universitaet.1812}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130107-18129}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {79}, abstract = {Thin-walled cylindrical composite shell structures are often applied in aerospace for lighter and cheaper launcher transport system. These structures exhibit sensitivity to geometrical imperfection and are prone to buckling under axial compression. Today the design is based on NASA guidelines from the 1960's [1] using a conservative lower bound curve embodying many experimental results of that time. It is well known that the advantages and different characteristics of composites as well as the evolution of manufacturing standards are not considered apporopriately in this outdated approach. The DESICOS project was initiated to provide new design guidelines regarding all the advantages of composites and allow further weight reduction of space structures by guaranteeing a more precise and robust design. Therefore it is necessary among other things to understand how a cutout with different dimensions affects the buckling load of a thin-walled cylindrical shell structure in combination with initial geometric imperfections. This work is intended to identify a ratio between the cutout characteristic dimension (in this case the cutout diameter) and the structure characteristic dimension (in this case the cylinder radius) that can be used to tell if the buckling structure is dominated by initial imperfections or is dominated by the cutout.}, subject = {buckling}, language = {en} } @phdthesis{Higuchi2007, author = {Higuchi, Shoko}, title = {Cost-Benefit Based Maintenance Optimization for Deteriorating Structures}, doi = {10.25643/bauhaus-universitaet.1288}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20080513-13616}, school = {Bauhaus-Universit{\"a}t Weimar}, year = {2007}, abstract = {In recent years increasingly consideration has been given to the lifetime extension of existing structures. This is based on the fact that a growing percentage of civil infrastructure as well as buildings is threatened by obsolescence and that due to simple monetary reasons this can no longer be countered by simply re-building everything anew. Hence maintenance interventions are required which allow partial or complete structural rehabilitation. However, maintenance interventions have to be economically reasonable, that is, maintenance expenditures have to be outweighed by expected future benefits. Is this not the case, then indeed the structure is obsolete - at least in its current functional, economic, technical, or social configuration - and innovative alternatives have to be evaluated. An optimization formulation for planning maintenance interventions based on cost-benefit criteria is proposed herein. The underlying formulation is as follows: (a) between maintenance interventions structural deterioration is described as a random process; (b) maintenance interventions can take place anytime throughout lifetime and comprise the rehabilitation of all deterioration states above a certain minimum level; and (c) maintenance interventions are optimized by taking into account all expected life-cycle costs (construction, failure, inspection and state-dependent repair costs) as well as state- or time-dependent benefit rates. The optimization is performed by an evolutionary algorithm. The proposed approach also allows to determine optimal lifetimes and acceptable failure rates. Numerical examples demonstrate the importance of defining benefit rates explicitly. It is shown, that the optimal solution to maintenance interventions requires to take action before reaching the acceptable failure rate or the zero expected net benefit rate level. Deferring decisions with respect to maintenance not only results, in general, in higher losses, but also results in overly hazardous structures.}, subject = {Kosten-Nutzen-Analyse}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} } @phdthesis{Hossain, author = {Hossain, Md Naim}, title = {Isogeometric analysis based on Geometry Independent Field approximaTion (GIFT) and Polynomial Splines over Hierarchical T-meshes}, doi = {10.25643/bauhaus-universitaet.4037}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20191129-40376}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {157}, abstract = {This thesis addresses an adaptive higher-order method based on a Geometry Independent Field approximatTion(GIFT) of polynomial/rationals plines over hierarchical T-meshes(PHT/RHT-splines). In isogeometric analysis, basis functions used for constructing geometric models in computer-aided design(CAD) are also employed to discretize the partial differential equations(PDEs) for numerical analysis. Non-uniform rational B-Splines(NURBS) are the most commonly used basis functions in CAD. However, they may not be ideal for numerical analysis where local refinement is required. The alternative method GIFT deploys different splines for geometry and numerical analysis. NURBS are utilized for the geometry representation, while for the field solution, PHT/RHT-splines are used. PHT-splines not only inherit the useful properties of B-splines and NURBS, but also possess the capabilities of local refinement and hierarchical structure. The smooth basis function properties of PHT-splines make them suitable for analysis purposes. While most problems considered in isogeometric analysis can be solved efficiently when the solution is smooth, many non-trivial problems have rough solutions. For example, this can be caused by the presence of re-entrant corners in the domain. For such problems, a tensor-product basis (as in the case of NURBS) is less suitable for resolving the singularities that appear since refinement propagates throughout the computational domain. Hierarchical bases and local refinement (as in the case of PHT-splines) allow for a more efficient way to resolve these singularities by adding more degrees of freedom where they are necessary. In order to drive the adaptive refinement, an efficient recovery-based error estimator is proposed in this thesis. The estimator produces a recovery solution which is a more accurate approximation than the computed numerical solution. Several two- and three-dimensional numerical investigations with PHT-splines of higher order and continuity prove that the proposed method is capable of obtaining results with higher accuracy, better convergence, fewer degrees of freedom and less computational cost than NURBS for smooth solution problems. The adaptive GIFT method utilizing PHT-splines with the recovery-based error estimator is used for solutions with discontinuities or singularities where adaptive local refinement in particular domains of interest achieves higher accuracy with fewer degrees of freedom. This method also proves that it can handle complicated multi-patch domains for two- and three-dimensional problems outperforming uniform refinement in terms of degrees of freedom and computational cost.}, subject = {Finite-Elemente-Methode}, language = {en} } @phdthesis{HosseinNezhadShirazi, author = {Hossein Nezhad Shirazi, Ali}, title = {Multi-Scale Modeling of Lithium ion Batteries: a thermal management approach and molecular dynamic studies}, doi = {10.25643/bauhaus-universitaet.4098}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200214-40986}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Rechargeable lithium ion batteries (LIBs) play a very significant role in power supply and storage. In recent decades, LIBs have caught tremendous attention in mobile communication, portable electronics, and electric vehicles. Furthermore, global warming has become a worldwide issue due to the ongoing production of greenhouse gases. It motivates solutions such as renewable sources of energy. Solar and wind energies are the most important ones in renewable energy sources. By technology progress, they will definitely require batteries to store the produced power to make a balance between power generation and consumption. Nowadays,rechargeable batteries such as LIBs are considered as one of the best solutions. They provide high specific energy and high rate performance while their rate of self-discharge is low. Performance of LIBs can be improved through the modification of battery characteristics. The size of solid particles in electrodes can impact the specific energy and the cyclability of batteries. It can improve the amount of lithium content in the electrode which is a vital parameter in capacity and capability of a battery. There exist diferent sources of heat generation in LIBs such as heat produced during electrochemical reactions, internal resistance in battery. The size of electrode's electroactive particles can directly affect the produced heat in battery. It will be shown that the smaller size of solid particle enhance the thermal characteristics of LIBs. Thermal issues such as overheating, temperature maldistribution in the battery, and thermal runaway have confined applications of LIBs. Such thermal challenges reduce the Life cycle of LIBs. As well, they may lead to dangerous conditions such as fire or even explosion in batteries. However, recent advances in fabrication of advanced materials such as graphene and carbon nanotubes with extraordinary thermal conductivity and electrical properties propose new opportunities to enhance their performance. Since experimental works are expensive, our objective is to use computational methods to investigate the thermal issues in LIBS. Dissipation of the heat produced in the battery can improve the cyclability and specific capacity of LIBs. In real applications, packs of LIB consist several battery cells that are used as the power source. Therefore, it is worth to investigate thermal characteristic of battery packs under their cycles of charging/discharging operations at different applied current rates. To remove the produced heat in batteries, they can be surrounded by materials with high thermal conductivity. Parafin wax absorbs high energy since it has a high latent heat. Absorption high amounts of energy occurs at constant temperature without phase change. As well, thermal conductivity of parafin can be magnified with nano-materials such as graphene, CNT, and fullerene to form a nano-composite medium. Improving the thermal conductivity of LIBs increase the heat dissipation from batteries which is a vital issue in systems of battery thermal management. The application of two-dimensional (2D) materials has been on the rise since exfoliation the graphene from bulk graphite. 2D materials are single-layered in an order of nanosizes which show superior thermal, mechanical, and optoelectronic properties. They are potential candidates for energy storage and supply, particularly in lithium ion batteries as electrode material. The high thermal conductivity of graphene and graphene-like materials can play a significant role in thermal management of batteries. However, defects always exist in nano-materials since there is no ideal fabrication process. One of the most important defects in materials are nano-crack which can dramatically weaken the mechanical properties of the materials. Newly synthesized crystalline carbon nitride with the stoichiometry of C3N have attracted many attentions due to its extraordinary mechanical and thermal properties. The other nano-material is phagraphene which shows anisotropic mechanical characteristics which is ideal in production of nanocomposite. It shows ductile fracture behavior when subjected under uniaxial loadings. It is worth to investigate their thermo-mechanical properties in its pristine and defective states. We hope that the findings of our work not only be useful for both experimental and theoretical researches but also help to design advanced electrodes for LIBs.}, subject = {Akkumulator}, language = {en} } @phdthesis{Haefner2006, author = {H{\"a}fner, Stefan}, title = {Grid-based procedures for the mechanical analysis of heterogeneous solids}, doi = {10.25643/bauhaus-universitaet.858}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20070830-9185}, school = {Bauhaus-Universit{\"a}t Weimar}, year = {2006}, abstract = {The importance of modern simulation methods in the mechanical analysis of heterogeneous solids is presented in detail. Thereby the problem is noted that even for small bodies the required high-resolution analysis reaches the limits of today's computational power, in terms of memory demand as well as acceptable computational effort. A further problem is that frequently the accuracy of geometrical modelling of heterogeneous bodies is inadequate. The present work introduces a systematic combination and adaption of grid-based methods for achieving an essentially higher resolution in the numerical analysis of heterogeneous solids. Grid-based methods are as well primely suited for developing efficient and numerically stable algorithms for flexible geometrical modeling. A key aspect is the uniform data management for a grid, which can be utilized to reduce the effort and complexity of almost all concerned methods. A new finite element program, called Mulgrido, was just developed to realize this concept consistently and to test the proposed methods. Several disadvantages which generally result from grid discretizations are selectively corrected by modified methods. The present work is structured into a geometrical model, a mechanical model and a numerical model. The geometrical model includes digital image-based modeling and in particular several methods for the theory-based generation of inclusion-matrix models. Essential contributions refer to variable shape, size distribution, separation checks and placement procedures of inclusions. The mechanical model prepares the fundamentals of continuum mechanics, homogenization and damage modeling for the following numerical methods. The first topic of the numerical model introduces to a special version of B-spline finite elements. These finite elements are entirely variable in the order k of B-splines. For homogeneous bodies this means that the approximation quality can arbitrarily be scaled. In addition, the multiphase finite element concept in combination with transition zones along material interfaces yields a valuable solution for heterogeneous bodies. As the formulation is element-based, the storage of a global stiffness matrix is superseded such that the memory demand can essentially be reduced. This is possible in combination with iterative solver methods which represent the second topic of the numerical model. Here, the focus lies on multigrid methods where the number of required operations to solve a linear equation system only increases linearly with problem size. Moreover, for badly conditioned problems quite an essential improvement is achieved by preconditioning. The third part of the numerical model discusses certain aspects of damage simulation which are closely related to the proposed grid discretization. The strong efficiency of the linear analysis can be maintained for damage simulation. This is achieved by a damage-controlled sequentially linear iteration scheme. Finally a study on the effective material behavior of heterogeneous bodies is presented. Especially the influence of inclusion shapes is examined. By means of altogether more than one hundred thousand random geometrical arrangements, the effective material behavior is statistically analyzed and assessed.}, subject = {B-Spline}, language = {en} } @inproceedings{HaefnerKesselKoenke, author = {H{\"a}fner, Stefan and Kessel, Marco and K{\"o}nke, Carsten}, title = {MULTIPHASE B-SPLINE FINITE ELEMENTS OF VARIABLE ORDER IN THE MECHANICAL ANALYSIS OF HETEROGENEOUS SOLIDS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2964}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29643}, pages = {37}, abstract = {Advanced finite elements are proposed for the mechanical analysis of heterogeneous materials. The approximation quality of these finite elements can be controlled by a variable order of B-spline shape functions. An element-based formulation is developed such that the finite element problem can iteratively be solved without storing a global stiffness matrix. This memory saving allows for an essential increase of problem size. The heterogeneous material is modelled by projection onto a uniform, orthogonal grid of elements. Conventional, strictly grid-based finite element models show severe oscillating defects in the stress solutions at material interfaces. This problem is cured by the extension to multiphase finite elements. This concept enables to define a heterogeneous material distribution within the finite element. This is possible by a variable number of integration points to each of which individual material properties can be assigned. Based on an interpolation of material properties at nodes and further smooth interpolation within the finite elements, a continuous material function is established. With both, continuous B-spline shape function and continuous material function, also the stress solution will be continuous in the domain. The inaccuracy implied by the continuous material field is by far less defective than the prior oscillating behaviour of stresses. One- and two-dimensional numerical examples are presented.}, subject = {Architektur }, language = {en} } @inproceedings{HaefnerKoenke, author = {H{\"a}fner, Stefan and K{\"o}nke, Carsten}, title = {DAMAGE SIMULATION OF HETEROGENEOUS SOLIDS BY NONLOCAL FORMULATIONS ON ORTHOGONAL GRIDS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2963}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29638}, pages = {15}, abstract = {The present paper is part of a comprehensive approach of grid-based modelling. This approach includes geometrical modelling by pixel or voxel models, advanced multiphase B-spline finite elements of variable order and fast iterative solver methods based on the multigrid method. So far, we have only presented these grid-based methods in connection with linear elastic analysis of heterogeneous materials. Damage simulation demands further considerations. The direct stress solution of standard bilinear finite elements is severly defective, especially along material interfaces. Besides achieving objective constitutive modelling, various nonlocal formulations are applied to improve the stress solution. Such a corrective data processing can either refer to input data in terms of Young's modulus or to the attained finite element stress solution, as well as to a combination of both. A damage-controlled sequentially linear analysis is applied in connection with an isotropic damage law. Essentially by a high resolution of the heterogeneous solid, local isotropic damage on the material subscale allows to simulate complex damage topologies such as cracks. Therefore anisotropic degradation of a material sample can be simulated. Based on an effectively secantial global stiffness the analysis is numerically stable. The iteration step size is controlled for an adequate simulation of the damage path. This requires many steps, but in the iterative solution process each new step starts with the solution of the prior step. Therefore this method is quite effective. The present paper provides an introduction of the proposed concept for a stable simulation of damage in heterogeneous solids.}, subject = {Architektur }, language = {en} } @inproceedings{HaefnerKoenke, author = {H{\"a}fner, Stefan and K{\"o}nke, Carsten}, title = {MULTIGRID PRECONDITIONED CONJUGATE GRADIENT METHOD IN THE MECHANICAL ANALYSIS OF HETEROGENEOUS SOLIDS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2962}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29626}, pages = {29}, abstract = {A fast solver method called the multigrid preconditioned conjugate gradient method is proposed for the mechanical analysis of heterogeneous materials on the mesoscale. Even small samples of a heterogeneous material such as concrete show a complex geometry of different phases. These materials can be modelled by projection onto a uniform, orthogonal grid of elements. As one major problem the possible resolution of the concrete specimen is generally restricted due to (a) computation times and even more critical (b) memory demand. Iterative solvers can be based on a local element-based formulation while orthogonal grids consist of geometrical identical elements. The element-based formulation is short and transparent, and therefore efficient in implementation. A variation of the material properties in elements or integration points is possible. The multigrid method is a fast iterative solver method, where ideally the computational effort only increases linear with problem size. This is an optimal property which is almost reached in the implementation presented here. In fact no other method is known which scales better than linear. Therefore the multigrid method gains in importance the larger the problem becomes. But for heterogeneous models with very large ratios of Young's moduli the multigrid method considerably slows down by a constant factor. Such large ratios occur in certain heterogeneous solids, as well as in the damage analysis of solids. As solution to this problem the multigrid preconditioned conjugate gradient method is proposed. A benchmark highlights the multigrid preconditioned conjugate gradient method as the method of choice for very large ratio's of Young's modulus. A proposed modified multigrid cycle shows good results, in the application as stand-alone solver or as preconditioner.}, subject = {Architektur }, language = {en} } @inproceedings{HaefnerVogelKoenke, author = {H{\"a}fner, Stefan and Vogel, Frank and K{\"o}nke, Carsten}, title = {FINITE ELEMENT ANALYSIS OF TORSION FOR ARBITRARY CROSS-SECTIONS}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2848}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28483}, pages = {11}, abstract = {The present article proposes an alternative way to compute the torsional stiffness based on three-dimensional continuum mechanics instead of applying a specific theory of torsion. A thin, representative beam slice is discretized by solid finite elements. Adequate boundary conditions and coupling conditions are integrated into the numerical model to obtain a proper answer on the torsion behaviour, thus on shear center, shear stress and torsional stiffness. This finite element approach only includes general assumptions of beam torsion which are independent of cross-section geometry. These assumptions essentially are: no in-plane deformation, constant torsion and free warping. Thus it is possible to achieve numerical solutions of high accuracy for arbitrary cross-sections. Due to the direct link to three-dimensional continuum mechanics, it is possible to extend the range of torsion analysis to sections which are composed of different materials or even to heterogeneous beams on a high scale of resolution. A brief study follows to validate the implementation and results are compared to analytical solutions.}, subject = {Angewandte Informatik}, language = {en} } @article{IlyaniAkmarKramerRabczuk, author = {Ilyani Akmar, A.B. and Kramer, O. and Rabczuk, Timon}, title = {Multi-objective evolutionary optimization of sandwich structures: An evaluation by elitist non-dominated sorting evolution strategy}, series = {American Journal of Engineering and Applied Sciences}, journal = {American Journal of Engineering and Applied Sciences}, doi = {10.3844/ajeassp.2015.185.201}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170418-31402}, pages = {185 -- 201}, abstract = {In this study, an application of evolutionary multi-objective optimization algorithms on the optimization of sandwich structures is presented. The solution strategy is known as Elitist Non-Dominated Sorting Evolution Strategy (ENSES) wherein Evolution Strategies (ES) as Evolutionary Algorithm (EA) in the elitist Non-dominated Sorting Genetic algorithm (NSGA-II) procedure. Evolutionary algorithm seems a compatible approach to resolve multi-objective optimization problems because it is inspired by natural evolution, which closely linked to Artificial Intelligence (AI) techniques and elitism has shown an important factor for improving evolutionary multi-objective search. In order to evaluate the notion of performance by ENSES, the well-known study case of sandwich structures are reconsidered. For Case 1, the goals of the multi-objective optimization are minimization of the deflection and the weight of the sandwich structures. The length, the core and skin thicknesses are the design variables of Case 1. For Case 2, the objective functions are the fabrication cost, the beam weight and the end deflection of the sandwich structures. There are four design variables i.e., the weld height, the weld length, the beam depth and the beam width in Case 2. Numerical results are presented in terms of Paretooptimal solutions for both evaluated cases.}, subject = {Optimierung}, language = {en} } @inproceedings{Itam, author = {Itam, Zarina}, title = {NUMERICAL SIMULATION OF THERMO-HYGRAL ALKALI-SILICA REACTION MODEL IN CONCRETE AT THE MESOSCALE}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2853}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28536}, pages = {7}, abstract = {This research aims to model Alkali-Silica Reaction gel expansion in concrete under the influence of hygral and thermal loading, based on experimental results. ASR provokes a heterogeneous expansion in concrete leading to dimensional changes and eventually the premature failure of the concrete structure. This can result in map cracking on the concrete surface which will decrease the concrete stiffness. Factors that influence ASR are parameters such as the cement alkalinity, the number of deleterious silica from the aggregate used, concrete porosity, and external factors like temperature, humidity and external source of alkali from ingression of deicing salts. Uncertainties of the influential factors make ASR a difficult phenomenon to solve; hence my approach to this matter is to solve the problem using stochastic modelling, where a numerical simulation of concrete cross-section with integration of experimental results from Finger-Institute for Building Materials Science at the Bauhaus-Universit{\"a}t Weimar. The problem is formulated as a multi-field problem, combining heat transfer, fluid transfer and the reaction rate model with the mechanical stress field. Simulation is performed as a mesoscale model considering aggregates and mortar matrix. The reaction rate model will be conducted using experimental results from concrete expansions due to ASR gained from concrete prism tests. Expansive strains values for transient environmental conditions due to the reaction rate will be determined from calculation based on the reaction rate model. Results from these models will be able to predict the rate of ASR expansion and the cracking propagation that may arise.}, subject = {Angewandte Informatik}, language = {en} } @article{IşıkBueyueksaracLeventEkincietal., author = {I{\c{s}}{\i}k, Ercan and B{\"u}y{\"u}ksara{\c{c}}, Ayd{\i}n and Levent Ekinci, Yunus and Ayd{\i}n, Mehmet Cihan and Harirchian, Ehsan}, title = {The Effect of Site-Specific Design Spectrum on Earthquake-Building Parameters: A Case Study from the Marmara Region (NW Turkey)}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7247}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207247}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42758}, pages = {23}, abstract = {The Marmara Region (NW Turkey) has experienced significant earthquakes (M > 7.0) to date. A destructive earthquake is also expected in the region. To determine the effect of the specific design spectrum, eleven provinces located in the region were chosen according to the Turkey Earthquake Building Code updated in 2019. Additionally, the differences between the previous and updated regulations of the country were investigated. Peak Ground Acceleration (PGA) and Peak Ground Velocity (PGV) were obtained for each province by using earthquake ground motion levels with 2\%, 10\%, 50\%, and 68\% probability of exceedance in 50-year periods. The PGA values in the region range from 0.16 to 0.7 g for earthquakes with a return period of 475 years. For each province, a sample of a reinforced-concrete building having two different numbers of stories with the same ground and structural characteristics was chosen. Static adaptive pushover analyses were performed for the sample reinforced-concrete building using each province's design spectrum. The variations in the earthquake and structural parameters were investigated according to different geographical locations. It was determined that the site-specific design spectrum significantly influences target displacements for performance-based assessments of buildings due to seismicity characteristics of the studied geographic location.}, subject = {Erdbeben}, language = {en} } @inproceedings{JaouadiLahmer, author = {Jaouadi, Zouhour and Lahmer, Tom}, title = {Topology optimization of structures subjected to multiple load cases by introducing the Epsilon constraint method}, series = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, booktitle = {Digital Proceedings, International Conference on the Applications of Computer Science and Mathematics in Architecture and Civil Engineering : July 20 - 22 2015, Bauhaus-University Weimar}, editor = {G{\"u}rlebeck, Klaus and Lahmer, Tom}, organization = {Bauhaus-Universit{\"a}t Weimar}, issn = {1611-4086}, doi = {10.25643/bauhaus-universitaet.2804}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170314-28042}, pages = {7}, abstract = {A topology optimization method has been developed for structures subjected to multiple load cases (Example of a bridge pier subjected to wind loads, traffic, superstructure...). We formulate the problem as a multi-criterial optimization problem, where the compliance is computed for each load case. Then, the Epsilon constraint method (method proposed by Chankong and Haimes, 1971) is adapted. The strategy of this method is based on the concept of minimizing the maximum compliance resulting from the critical load case while the other remaining compliances are considered in the constraints. In each iteration, the compliances of all load cases are computed and only the maximum one is minimized. The topology optimization process is switching from one load to another according to the variation of the resulting compliance. In this work we will motivate and explain the proposed methodology and provide some numerical examples.}, subject = {Angewandte Informatik}, language = {en} } @phdthesis{Jenabidehkordi, author = {Jenabidehkordi, Ali}, title = {An Efficient Adaptive PD Formulation for Complex Microstructures}, doi = {10.25643/bauhaus-universitaet.4742}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221124-47422}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {118}, abstract = {The computational costs of newly developed numerical simulation play a critical role in their acceptance within both academic use and industrial employment. Normally, the refinement of a method in the area of interest reduces the computational cost. This is unfortunately not true for most nonlocal simulation, since refinement typically increases the size of the material point neighborhood. Reducing the discretization size while keep- ing the neighborhood size will often require extra consideration. Peridy- namic (PD) is a newly developed numerical method with nonlocal nature. Its straightforward integral form equation of motion allows simulating dy- namic problems without any extra consideration required. The formation of crack and its propagation is known as natural to peridynamic. This means that discontinuity is a result of the simulation and does not demand any post-processing. As with other nonlocal methods, PD is considered an expensive method. The refinement of the nodal spacing while keeping the neighborhood size (i.e., horizon radius) constant, emerges to several nonphysical phenomena. This research aims to reduce the peridynamic computational and imple- mentation costs. A novel refinement approach is introduced. The pro- posed approach takes advantage of the PD flexibility in choosing the shape of the horizon by introducing multiple domains (with no intersections) to the nodes of the refinement zone. It will be shown that no ghost forces will be created when changing the horizon sizes in both subdomains. The approach is applied to both bond-based and state-based peridynamic and verified for a simple wave propagation refinement problem illustrating the efficiency of the method. Further development of the method for higher dimensions proves to have a direct relationship with the mesh sensitivity of the PD. A method for solving the mesh sensitivity of the PD is intro- duced. The application of the method will be examined by solving a crack propagation problem similar to those reported in the literature. New software architecture is proposed considering both academic and in- dustrial use. The available simulation tools for employing PD will be collected, and their advantages and drawbacks will be addressed. The challenges of implementing any node base nonlocal methods while max- imizing the software flexibility to further development and modification will be discussed and addressed. A software named Relation-Based Sim- ulator (RBS) is developed for examining the proposed architecture. The exceptional capabilities of RBS will be explored by simulating three dis- tinguished models. RBS is available publicly and open to further develop- ment. The industrial acceptance of the RBS will be tested by targeting its performance on one Mac and two Linux distributions.}, subject = {Peridynamik}, language = {en} } @phdthesis{Jenabidehkordi, author = {Jenabidehkordi, Ali}, title = {An efficient adaptive PD formulation for complex microstructures}, doi = {10.25643/bauhaus-universitaet.4738}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221116-47389}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {118}, abstract = {The computational costs of newly developed numerical simulation play a critical role in their acceptance within both academic use and industrial employment. Normally, the refinement of a method in the area of interest reduces the computational cost. This is unfortunately not true for most nonlocal simulation, since refinement typically increases the size of the material point neighborhood. Reducing the discretization size while keep- ing the neighborhood size will often require extra consideration. Peridynamic (PD) is a newly developed numerical method with nonlocal nature. Its straightforward integral form equation of motion allows simulating dynamic problems without any extra consideration required. The formation of crack and its propagation is known as natural to peridynamic. This means that discontinuity is a result of the simulation and does not demand any post-processing. As with other nonlocal methods, PD is considered an expensive method. The refinement of the nodal spacing while keeping the neighborhood size (i.e., horizon radius) constant, emerges to several nonphysical phenomena. This research aims to reduce the peridynamic computational and imple- mentation costs. A novel refinement approach is introduced. The pro- posed approach takes advantage of the PD flexibility in choosing the shape of the horizon by introducing multiple domains (with no intersections) to the nodes of the refinement zone. It will be shown that no ghost forces will be created when changing the horizon sizes in both subdomains. The approach is applied to both bond-based and state-based peridynamic and verified for a simple wave propagation refinement problem illustrating the efficiency of the method. Further development of the method for higher dimensions proves to have a direct relationship with the mesh sensitivity of the PD. A method for solving the mesh sensitivity of the PD is intro- duced. The application of the method will be examined by solving a crack propagation problem similar to those reported in the literature. New software architecture is proposed considering both academic and in- dustrial use. The available simulation tools for employing PD will be collected, and their advantages and drawbacks will be addressed. The challenges of implementing any node base nonlocal methods while max- imizing the software flexibility to further development and modification will be discussed and addressed. A software named Relation-Based Sim- ulator (RBS) is developed for examining the proposed architecture. The exceptional capabilities of RBS will be explored by simulating three distinguished models. RBS is available publicly and open to further develop- ment. The industrial acceptance of the RBS will be tested by targeting its performance on one Mac and two Linux distributions.}, subject = {Peridynamik}, language = {en} } @phdthesis{Jia, author = {Jia, Yue}, title = {Methods based on B-splines for model representation, numerical analysis and image registration}, doi = {10.25643/bauhaus-universitaet.2484}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20151210-24849}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {200}, abstract = {The thesis consists of inter-connected parts for modeling and analysis using newly developed isogeometric methods. The main parts are reproducing kernel triangular B-splines, extended isogeometric analysis for solving weakly discontinuous problems, collocation methods using superconvergent points, and B-spline basis in image registration applications. Each topic is oriented towards application of isogeometric analysis basis functions to ease the process of integrating the modeling and analysis phases of simulation. First, we develop reproducing a kernel triangular B-spline-based FEM for solving PDEs. We review the triangular B-splines and their properties. By definition, the triangular basis function is very flexible in modeling complicated domains. However, instability results when it is applied for analysis. We modify the triangular B-spline by a reproducing kernel technique, calculating a correction term for the triangular kernel function from the chosen surrounding basis. The improved triangular basis is capable to obtain the results with higher accuracy and almost optimal convergence rates. Second, we propose an extended isogeometric analysis for dealing with weakly discontinuous problems such as material interfaces. The original IGA is combined with XFEM-like enrichments which are continuous functions themselves but with discontinuous derivatives. Consequently, the resulting solution space can approximate solutions with weak discontinuities. The method is also applied to curved material interfaces, where the inverse mapping and the curved triangular elements are considered. Third, we develop an IGA collocation method using superconvergent points. The collocation methods are efficient because no numerical integration is needed. In particular when higher polynomial basis applied, the method has a lower computational cost than Galerkin methods. However, the positions of the collocation points are crucial for the accuracy of the method, as they affect the convergent rate significantly. The proposed IGA collocation method uses superconvergent points instead of the traditional Greville abscissae points. The numerical results show the proposed method can have better accuracy and optimal convergence rates, while the traditional IGA collocation has optimal convergence only for even polynomial degrees. Lastly, we propose a novel dynamic multilevel technique for handling image registration. It is application of the B-spline functions in image processing. The procedure considered aims to align a target image from a reference image by a spatial transformation. The method starts with an energy function which is the same as a FEM-based image registration. However, we simplify the solving procedure, working on the energy function directly. We dynamically solve for control points which are coefficients of B-spline basis functions. The new approach is more simple and fast. Moreover, it is also enhanced by a multilevel technique in order to prevent instabilities. The numerical testing consists of two artificial images, four real bio-medical MRI brain and CT heart images, and they show our registration method is accurate, fast and efficient, especially for large deformation problems.}, subject = {Finite-Elemente-Methode}, language = {en} } @article{JiangZhuangRabczuk, author = {Jiang, Jin-Wu and Zhuang, Xiaoying and Rabczuk, Timon}, title = {Orientation dependent thermal conductance in single-layer MoS 2}, series = {Scientific Reports}, journal = {Scientific Reports}, doi = {10.1038/srep02209}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170418-31417}, abstract = {We investigate the thermal conductivity in the armchair and zigzag MoS2 nanoribbons, by combining the non-equilibrium Green's function approach and the first-principles method. A strong orientation dependence is observed in the thermal conductivity. Particularly, the thermal conductivity for the armchair MoS2 nanoribbon is about 673.6 Wm-1 K-1 in the armchair nanoribbon, and 841.1 Wm-1 K-1 in the zigzag nanoribbon at room temperature. By calculating the Caroli transmission, we disclose the underlying mechanism for this strong orientation dependence to be the fewer phonon transport channels in the armchair MoS2 nanoribbon in the frequency range of [150, 200] cm-1. Through the scaling of the phonon dispersion, we further illustrate that the thermal conductivity calculated for the MoS2 nanoribbon is esentially in consistent with the superior thermal conductivity found for graphene.}, subject = {Mechanische Eigenschaft}, language = {en} } @article{JilteAhmadiKumaretal., author = {Jilte, Ravindra and Ahmadi, Mohammad Hossein and Kumar, Ravinder and Kalamkar, Vilas and Mosavi, Amir}, title = {Cooling Performance of a Novel Circulatory Flow Concentric Multi-Channel Heat Sink with Nanofluids}, series = {Nanomaterials}, volume = {2020}, journal = {Nanomaterials}, number = {Volume 10, Issue 4, 647}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/nano10040647}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200401-41241}, pages = {12}, abstract = {Heat rejection from electronic devices such as processors necessitates a high heat removal rate. The present study focuses on liquid-cooled novel heat sink geometry made from four channels (width 4 mm and depth 3.5 mm) configured in a concentric shape with alternate flow passages (slot of 3 mm gap). In this study, the cooling performance of the heat sink was tested under simulated controlled conditions.The lower bottom surface of the heat sink was heated at a constant heat flux condition based on dissipated power of 50 W and 70 W. The computations were carried out for different volume fractions of nanoparticles, namely 0.5\% to 5\%, and water as base fluid at a flow rate of 30 to 180 mL/min. The results showed a higher rate of heat rejection from the nanofluid cooled heat sink compared with water. The enhancement in performance was analyzed with the help of a temperature difference of nanofluid outlet temperature and water outlet temperature under similar operating conditions. The enhancement was ~2\% for 0.5\% volume fraction nanofluids and ~17\% for a 5\% volume fraction.}, subject = {Nanostrukturiertes Material}, language = {en} } @article{KargarSamadianfardParsaetal., author = {Kargar, Katayoun and Samadianfard, Saeed and Parsa, Javad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir and Chau, Kwok-Wing}, title = {Estimating longitudinal dispersion coefficient in natural streams using empirical models and machine learning algorithms}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1712260}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40775}, pages = {311 -- 322}, abstract = {The longitudinal dispersion coefficient (LDC) plays an important role in modeling the transport of pollutants and sediment in natural rivers. As a result of transportation processes, the concentration of pollutants changes along the river. Various studies have been conducted to provide simple equations for estimating LDC. In this study, machine learning methods, namely support vector regression, Gaussian process regression, M5 model tree (M5P) and random forest, and multiple linear regression were examined in predicting the LDC in natural streams. Data sets from 60 rivers around the world with different hydraulic and geometric features were gathered to develop models for LDC estimation. Statistical criteria, including correlation coefficient (CC), root mean squared error (RMSE) and mean absolute error (MAE), were used to scrutinize the models. The LDC values estimated by these models were compared with the corresponding results of common empirical models. The Taylor chart was used to evaluate the models and the results showed that among the machine learning models, M5P had superior performance, with CC of 0.823, RMSE of 454.9 and MAE of 380.9. The model of Sahay and Dutta, with CC of 0.795, RMSE of 460.7 and MAE of 306.1, gave more precise results than the other empirical models. The main advantage of M5P models is their ability to provide practical formulae. In conclusion, the results proved that the developed M5P model with simple formulations was superior to other machine learning models and empirical models; therefore, it can be used as a proper tool for estimating the LDC in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{KarimimoshaverHajivalieiShokrietal., author = {Karimimoshaver, Mehrdad and Hajivaliei, Hatameh and Shokri, Manouchehr and Khalesro, Shakila and Aram, Farshid and Shamshirband, Shahaboddin}, title = {A Model for Locating Tall Buildings through a Visual Analysis Approach}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 17, article 6072}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10176072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43350}, pages = {1 -- 25}, abstract = {Tall buildings have become an integral part of cities despite all their pros and cons. Some current tall buildings have several problems because of their unsuitable location; the problems include increasing density, imposing traffic on urban thoroughfares, blocking view corridors, etc. Some of these buildings have destroyed desirable views of the city. In this research, different criteria have been chosen, such as environment, access, social-economic, land-use, and physical context. These criteria and sub-criteria are prioritized and weighted by the analytic network process (ANP) based on experts' opinions, using Super Decisions V2.8 software. On the other hand, layers corresponding to sub-criteria were made in ArcGIS 10.3 simultaneously, then via a weighted overlay (map algebra), a locating plan was created. In the next step seven hypothetical tall buildings (20 stories), in the best part of the locating plan, were considered to evaluate how much of theses hypothetical buildings would be visible (fuzzy visibility) from the street and open spaces throughout the city. These processes have been modeled by MATLAB software, and the final fuzzy visibility plan was created by ArcGIS. Fuzzy visibility results can help city managers and planners to choose which location is suitable for a tall building and how much visibility may be appropriate. The proposed model can locate tall buildings based on technical and visual criteria in the future development of the city and it can be widely used in any city as long as the criteria and weights are localized.}, subject = {Geb{\"a}ude}, language = {en} } @phdthesis{Kessler2018, author = {Keßler, Andrea}, title = {Matrix-free voxel-based finite element method for materials with heterogeneous microstructures}, doi = {10.25643/bauhaus-universitaet.3844}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190116-38448}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {113}, year = {2018}, abstract = {Modern image detection techniques such as micro computer tomography (μCT), magnetic resonance imaging (MRI) and scanning electron microscopy (SEM) provide us with high resolution images of the microstructure of materials in a non-invasive and convenient way. They form the basis for the geometrical models of high-resolution analysis, so called image-based analysis. However especially in 3D, discretizations of these models reach easily the size of 100 Mill. degrees of freedoms and require extensive hardware resources in terms of main memory and computing power to solve the numerical model. Consequently, the focus of this work is to combine and adapt numerical solution methods to reduce the memory demand first and then the computation time and therewith enable an execution of the image-based analysis on modern computer desktops. Hence, the numerical model is a straightforward grid discretization of the voxel-based (pixels with a third dimension) geometry which omits the boundary detection algorithms and allows reduced storage of the finite element data structure and a matrix-free solution algorithm. This in turn reduce the effort of almost all applied grid-based solution techniques and results in memory efficient and numerically stable algorithms for the microstructural models. Two variants of the matrix-free algorithm are presented. The efficient iterative solution method of conjugate gradients is used with matrix-free applicable preconditioners such as the Jacobi and the especially suited multigrid method. The jagged material boundaries of the voxel-based mesh are smoothed through embedded boundary elements which contain different material information at the integration point and are integrated sub-cell wise though without additional boundary detection. The efficiency of the matrix-free methods can be retained.}, subject = {Dissertation}, language = {en} } @phdthesis{KhademiZahedi, author = {Khademi Zahedi, Reza}, title = {Stress Distribution in Buried Defective PE Pipes and Crack Propagation in Nanosheets}, doi = {10.25643/bauhaus-universitaet.4481}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210803-44814}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {235}, abstract = {Buried PE pipelines are the main choice for transporting hazardous hydrocarbon fluids and are used in urban gas distribution networks. Molecular dynamics (MD) simulations used to investigate material behavior at nanoscale.}, subject = {Gasleitung}, language = {en} } @unpublished{KhosraviSheikhKhozaniCooper, author = {Khosravi, Khabat and Sheikh Khozani, Zohreh and Cooper, James R.}, title = {Predicting stable gravel-bed river hydraulic geometry: A test of novel, advanced, hybrid data mining algorithms}, volume = {2021}, doi = {10.25643/bauhaus-universitaet.4499}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211004-44998}, abstract = {Accurate prediction of stable alluvial hydraulic geometry, in which erosion and sedimentation are in equilibrium, is one of the most difficult but critical topics in the field of river engineering. Data mining algorithms have been gaining more attention in this field due to their high performance and flexibility. However, an understanding of the potential for these algorithms to provide fast, cheap, and accurate predictions of hydraulic geometry is lacking. This study provides the first quantification of this potential. Using at-a-station field data, predictions of flow depth, water-surface width and longitudinal water surface slope are made using three standalone data mining techniques -, Instance-based Learning (IBK), KStar, Locally Weighted Learning (LWL) - along with four types of novel hybrid algorithms in which the standalone models are trained with Vote, Attribute Selected Classifier (ASC), Regression by Discretization (RBD), and Cross-validation Parameter Selection (CVPS) algorithms (Vote-IBK, Vote-Kstar, Vote-LWL, ASC-IBK, ASC-Kstar, ASC-LWL, RBD-IBK, RBD-Kstar, RBD-LWL, CVPSIBK, CVPS-Kstar, CVPS-LWL). Through a comparison of their predictive performance and a sensitivity analysis of the driving variables, the results reveal: (1) Shield stress was the most effective parameter in the prediction of all geometry dimensions; (2) hybrid models had a higher prediction power than standalone data mining models, empirical equations and traditional machine learning algorithms; (3) Vote-Kstar model had the highest performance in predicting depth and width, and ASC-Kstar in estimating slope, each providing very good prediction performance. Through these algorithms, the hydraulic geometry of any river can potentially be predicted accurately and with ease using just a few, readily available flow and channel parameters. Thus, the results reveal that these models have great potential for use in stable channel design in data poor catchments, especially in developing nations where technical modelling skills and understanding of the hydraulic and sediment processes occurring in the river system may be lacking.}, subject = {Maschinelles Lernen}, language = {en} } @unpublished{KhosraviSheikhKhozaniMao, author = {Khosravi, Khabat and Sheikh Khozani, Zohreh and Mao, Luka}, title = {A comparison between advanced hybrid machine learning algorithms and empirical equations applied to abutment scour depth prediction}, doi = {10.25643/bauhaus-universitaet.4388}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210311-43889}, pages = {43}, abstract = {Complex vortex flow patterns around bridge piers, especially during floods, cause scour process that can result in the failure of foundations. Abutment scour is a complex three-dimensional phenomenon that is difficult to predict especially with traditional formulas obtained using empirical approaches such as regressions. This paper presents a test of a standalone Kstar model with five novel hybrid algorithm of bagging (BA-Kstar), dagging (DA-Kstar), random committee (RC-Kstar), random subspace (RS-Kstar), and weighted instance handler wrapper (WIHWKstar) to predict scour depth (ds) for clear water condition. The dataset consists of 99 scour depth data from flume experiments (Dey and Barbhuiya, 2005) using abutment shapes such as vertical, semicircular and 45◦ wing. Four dimensionless parameter of relative flow depth (h/l), excess abutment Froude number (Fe), relative sediment size (d50/l) and relative submergence (d50/h) were considered for the prediction of relative scour depth (ds/l). A portion of the dataset was used for the calibration (70\%), and the remaining used for model validation. Pearson correlation coefficients helped deciding relevance of the input parameters combination and finally four different combinations of input parameters were used. The performance of the models was assessed visually and with quantitative metrics. Overall, the best input combination for vertical abutment shape is the combination of Fe, d50/l and h/l, while for semicircular and 45◦ wing the combination of the Fe and d50/l is the most effective input parameter combination. Our results show that incorporating Fe, d50/l and h/l lead to higher performance while involving d50/h reduced the models prediction power for vertical abutment shape and for semicircular and 45◦ wing involving h/l and d50/h lead to more error. The WIHW-Kstar provided the highest performance in scour depth prediction around vertical abutment shape while RC-Kstar model outperform of other models for scour depth prediction around semicircular and 45◦ wing.}, subject = {maschinelles Lernen}, language = {en} } @article{KumariHarirchianLahmeretal., author = {Kumari, Vandana and Harirchian, Ehsan and Lahmer, Tom and Rasulzade, Shahla}, title = {Evaluation of Machine Learning and Web-Based Process for Damage Score Estimation of Existing Buildings}, series = {Buildings}, volume = {2022}, journal = {Buildings}, number = {Volume 12, issue 5, article 578}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/buildings12050578}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220509-46387}, pages = {1 -- 23}, abstract = {The seismic vulnerability assessment of existing reinforced concrete (RC) buildings is a significant source of disaster mitigation plans and rescue services. Different countries evolved various Rapid Visual Screening (RVS) techniques and methodologies to deal with the devastating consequences of earthquakes on the structural characteristics of buildings and human casualties. Artificial intelligence (AI) methods, such as machine learning (ML) algorithm-based methods, are increasingly used in various scientific and technical applications. The investigation toward using these techniques in civil engineering applications has shown encouraging results and reduced human intervention, including uncertainties and biased judgment. In this study, several known non-parametric algorithms are investigated toward RVS using a dataset employing different earthquakes. Moreover, the methodology encourages the possibility of examining the buildings' vulnerability based on the factors related to the buildings' importance and exposure. In addition, a web-based application built on Django is introduced. The interface is designed with the idea to ease the seismic vulnerability investigation in real-time. The concept was validated using two case studies, and the achieved results showed the proposed approach's potential efficiency}, subject = {Maschinelles Lernen}, language = {en} } @article{LashkarAraKalantariSheikhKhozanietal., author = {Lashkar-Ara, Babak and Kalantari, Niloofar and Sheikh Khozani, Zohreh and Mosavi, Amir}, title = {Assessing Machine Learning versus a Mathematical Model to Estimate the Transverse Shear Stress Distribution in a Rectangular Channel}, series = {Mathematics}, volume = {2021}, journal = {Mathematics}, number = {Volume 9, Issue 6, Article 596}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math9060596}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210504-44197}, pages = {15}, abstract = {One of the most important subjects of hydraulic engineering is the reliable estimation of the transverse distribution in the rectangular channel of bed and wall shear stresses. This study makes use of the Tsallis entropy, genetic programming (GP) and adaptive neuro-fuzzy inference system (ANFIS) methods to assess the shear stress distribution (SSD) in the rectangular channel. To evaluate the results of the Tsallis entropy, GP and ANFIS models, laboratory observations were used in which shear stress was measured using an optimized Preston tube. This is then used to measure the SSD in various aspect ratios in the rectangular channel. To investigate the shear stress percentage, 10 data series with a total of 112 different data for were used. The results of the sensitivity analysis show that the most influential parameter for the SSD in smooth rectangular channel is the dimensionless parameter B/H, Where the transverse coordinate is B, and the flow depth is H. With the parameters (b/B), (B/H) for the bed and (z/H), (B/H) for the wall as inputs, the modeling of the GP was better than the other one. Based on the analysis, it can be concluded that the use of GP and ANFIS algorithms is more effective in estimating shear stress in smooth rectangular channels than the Tsallis entropy-based equations.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Liu, author = {Liu, Bokai}, title = {Stochastic multiscale modeling of polymeric nanocomposites using Data-driven techniques}, doi = {10.25643/bauhaus-universitaet.4637}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220503-46379}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {134}, abstract = {In recent years, lightweight materials, such as polymer composite materials (PNCs) have been studied and developed due to their excellent physical and chemical properties. Structures composed of these composite materials are widely used in aerospace engineering structures, automotive components, and electrical devices. The excellent and outstanding mechanical, thermal, and electrical properties of Carbon nanotube (CNT) make it an ideal filler to strengthen polymer materials' comparable properties. The heat transfer of composite materials has very promising engineering applications in many fields, especially in electronic devices and energy storage equipment. It is essential in high-energy density systems since electronic components need heat dissipation functionality. Or in other words, in electronic devices the generated heat should ideally be dissipated by light and small heat sinks. Polymeric composites consist of fillers embedded in a polymer matrix, the first ones will significantly affect the overall (macroscopic) performance of the material. There are many common carbon-based fillers such as single-walled carbon nanotubes (SWCNT), multi-walled carbon nanotubes (MWCNT), carbon nanobuds (CNB), fullerene, and graphene. Additives inside the matrix have become a popular subject for researchers. Some extraordinary characters, such as high-performance load, lightweight design, excellent chemical resistance, easy processing, and heat transfer, make the design of polymeric nanotube composites (PNCs) flexible. Due to the reinforcing effects with different fillers on composite materials, it has a higher degree of freedom and can be designed for the structure according to specific applications' needs. As already stated, our research focus will be on SWCNT enhanced PNCs. Since experiments are timeconsuming, sometimes expensive and cannot shed light into phenomena taking place for instance at the interfaces/interphases of composites, they are often complemented through theoretical and computational analysis. While most studies are based on deterministic approaches, there is a comparatively lower number of stochastic methods accounting for uncertainties in the input parameters. In deterministic models, the output of the model is fully determined by the parameter values and the initial conditions. However, uncertainties in the input parameters such as aspect ratio, volume fraction, thermal properties of fiber and matrix need to be taken into account for reliable predictions. In this research, a stochastic multiscale method is provided to study the influence of numerous uncertain input parameters on the thermal conductivity of the composite. Therefore, a hierarchical multi-scale method based on computational homogenization is presented in to predict the macroscopic thermal conductivity based on the fine-scale structure. In order to study the inner mechanism, we use the finite element method and employ surrogate models to conduct a Global Sensitivity Analysis (GSA). The SA is performed in order to quantify the influence of the conductivity of the fiber, matrix, Kapitza resistance, volume fraction and aspect ratio on the macroscopic conductivity. Therefore, we compute first-order and total-effect sensitivity indices with different surrogate models. As stochastic multiscale models are computational expensive, surrogate approaches are commonly exploited. With the emergence of high performance computing and artificial intelligence, machine learning has become a popular modeling tool for numerous applications. Machine learning (ML) is commonly used in regression and maps data through specific rules with algorithms to build input and output models. They are particularly useful for nonlinear input-output relationships when sufficient data is available. ML has also been used in the design of new materials and multiscale analysis. For instance, Artificial neural networks and integrated learning seem to be ideally for such a task. They can theoretically simulate any non-linear relationship through the connection of neurons. Mapping relationships are employed to carry out data-driven simulations of inputs and outputs in stochastic modeling. This research aims to develop a stochastic multi-scale computational models of PNCs in heat transfer. Multi-scale stochastic modeling with uncertainty analysis and machine learning methods consist of the following components: -Uncertainty Analysis. A surrogate based global sensitivity analysis is coupled with a hierarchical multi-scale method employing computational homogenization. The effect of the conductivity of the fibers and the matrix, the Kapitza resistance, volume fraction and aspect ratio on the 'macroscopic' conductivity of the composite is systematically studied. All selected surrogate models yield consistently the conclusions that the most influential input parameters are the aspect ratio followed by the volume fraction. The Kapitza Resistance has no significant effect on the thermal conductivity of the PNCs. The most accurate surrogate model in terms of the R2 value is the moving least square (MLS). -Hybrid Machine Learning Algorithms. A combination of artificial neural network (ANN) and particle swarm optimization (PSO) is applied to estimate the relationship between variable input and output parameters. The ANN is used for modeling the composite while PSO improves the prediction performance through an optimized global minimum search. The thermal conductivity of the fibers and the matrix, the kapitza resistance, volume fraction and aspect ratio are selected as input parameters. The output is the macroscopic (homogenized) thermal conductivity of the composite. The results show that the PSO significantly improves the predictive ability of this hybrid intelligent algorithm, which outperforms traditional neural networks. -Stochastic Integrated Machine Learning. A stochastic integrated machine learning based multiscale approach for the prediction of the macroscopic thermal conductivity in PNCs is developed. Seven types of machine learning models are exploited in this research, namely Multivariate Adaptive Regression Splines (MARS), Support Vector Machine (SVM), Regression Tree (RT), Bagging Tree (Bag), Random Forest (RF), Gradient Boosting Machine (GBM) and Cubist. They are used as components of stochastic modeling to construct the relationship between the variable of the inputs' uncertainty and the macroscopic thermal conductivity of PNCs. Particle Swarm Optimization (PSO) is used for hyper-parameter tuning to find the global optimal values leading to a significant reduction in the computational cost. The advantages and disadvantages of various methods are also analyzed in terms of computing time and model complexity to finally give a recommendation for the applicability of different models.}, subject = {Polymere}, language = {en} } @article{LizarazuHarirchianShaiketal., author = {Lizarazu, Jorge and Harirchian, Ehsan and Shaik, Umar Arif and Shareef, Mohammed and Antoni-Zdziobek, Annie and Lahmer, Tom}, title = {Application of machine learning-based algorithms to predict the stress-strain curves of additively manufactured mild steel out of its microstructural characteristics}, series = {Results in Engineering}, volume = {2023}, journal = {Results in Engineering}, number = {Volume 20 (2023)}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.rineng.2023.101587}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20231207-65028}, pages = {1 -- 12}, abstract = {The study presents a Machine Learning (ML)-based framework designed to forecast the stress-strain relationship of arc-direct energy deposited mild steel. Based on microstructural characteristics previously extracted using microscopy and X-ray diffraction, approximately 1000 new parameter sets are generated by applying the Latin Hypercube Sampling Method (LHSM). For each parameter set, a Representative Volume Element (RVE) is synthetically created via Voronoi Tessellation. Input raw data for ML-based algorithms comprises these parameter sets or RVE-images, while output raw data includes their corresponding stress-strain relationships calculated after a Finite Element (FE) procedure. Input data undergoes preprocessing involving standardization, feature selection, and image resizing. Similarly, the stress-strain curves, initially unsuitable for training traditional ML algorithms, are preprocessed using cubic splines and occasionally Principal Component Analysis (PCA). The later part of the study focuses on employing multiple ML algorithms, utilizing two main models. The first model predicts stress-strain curves based on microstructural parameters, while the second model does so solely from RVE images. The most accurate prediction yields a Root Mean Squared Error of around 5 MPa, approximately 1\% of the yield stress. This outcome suggests that ML models offer precise and efficient methods for characterizing dual-phase steels, establishing a framework for accurate results in material analysis.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Luther2010, author = {Luther, Torsten}, title = {Adaptation of atomistic and continuum methods for multiscale simulation of quasi-brittle intergranular damage}, doi = {10.25643/bauhaus-universitaet.1436}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20101101-15245}, school = {Bauhaus-Universit{\"a}t Weimar}, year = {2010}, abstract = {The numerical simulation of damage using phenomenological models on the macroscale was state of the art for many decades. However, such models are not able to capture the complex nature of damage, which simultaneously proceeds on multiple length scales. Furthermore, these phenomenological models usually contain damage parameters, which are physically not interpretable. Consequently, a reasonable experimental determination of these parameters is often impossible. In the last twenty years, the ongoing advance in computational capacities provided new opportunities for more and more detailed studies of the microstructural damage behavior. Today, multiphase models with several million degrees of freedom enable for the numerical simulation of micro-damage phenomena in naturally heterogeneous materials. Therewith, the application of multiscale concepts for the numerical investigation of the complex nature of damage can be realized. The presented thesis contributes to a hierarchical multiscale strategy for the simulation of brittle intergranular damage in polycrystalline materials, for example aluminum. The numerical investigation of physical damage phenomena on an atomistic microscale and the integration of these physically based information into damage models on the continuum meso- and macroscale is intended. Therefore, numerical methods for the damage analysis on the micro- and mesoscale including the scale transfer are presented and the transition to the macroscale is discussed. The investigation of brittle intergranular damage on the microscale is realized by the application of the nonlocal Quasicontinuum method, which fully describes the material behavior by atomistic potential functions, but reduces the number of atomic degrees of freedom by introducing kinematic couplings. Since this promising method is applied only by a limited group of researchers for special problems, necessary improvements have been realized in an own parallelized implementation of the 3D nonlocal Quasicontinuum method. The aim of this implementation was to develop and combine robust and efficient algorithms for a general use of the Quasicontinuum method, and therewith to allow for the atomistic damage analysis in arbitrary grain boundary configurations. The implementation is applied in analyses of brittle intergranular damage in ideal and nonideal grain boundary models of FCC aluminum, considering arbitrary misorientations. From the microscale simulations traction separation laws are derived, which describe grain boundary decohesion on the mesoscale. Traction separation laws are part of cohesive zone models to simulate the brittle interface decohesion in heterogeneous polycrystal structures. 2D and 3D mesoscale models are presented, which are able to reproduce crack initiation and propagation along cohesive interfaces in polycrystals. An improved Voronoi algorithm is developed in 2D to generate polycrystal material structures based on arbitrary distribution functions of grain size. The new model is more flexible in representing realistic grain size distributions. Further improvements of the 2D model are realized by the implementation and application of an orthotropic material model with Hill plasticity criterion to grains. The 2D and 3D polycrystal models are applied to analyze crack initiation and propagation in statically loaded samples of aluminum on the mesoscale without the necessity of initial damage definition.}, subject = {Mechanik}, language = {en} } @inproceedings{LutherKoenke, author = {Luther, Torsten and K{\"o}nke, Carsten}, title = {INVESTIGATION OF CRACK GROWTH IN POLYCRYSTALLINE MESOSTRUCTURES}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2988}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29886}, pages = {11}, abstract = {The design and application of high performance materials demands extensive knowledge of the materials damage behavior, which significantly depends on the meso- and microstructural complexity. Numerical simulations of crack growth on multiple length scales are promising tools to understand the damage phenomena in complex materials. In polycrystalline materials it has been observed that the grain boundary decohesion is one important mechanism that leads to micro crack initiation. Following this observation the paper presents a polycrystal mesoscale model consisting of grains with orthotropic material behavior and cohesive interfaces along grain boundaries, which is able to reproduce the crack initiation and propagation along grain boundaries in polycrystalline materials. With respect to the importance of modeling the geometry of the grain structure an advanced Voronoi algorithm is proposed to generate realistic polycrystalline material structures based on measured grain size distribution. The polycrystal model is applied to investigate the crack initiation and propagation in statically loaded representative volume elements of aluminum on the mesoscale without the necessity of initial damage definition. Future research work is planned to include the mesoscale model into a multiscale model for the damage analysis in polycrystalline materials.}, subject = {Architektur }, language = {en} } @phdthesis{Mai, author = {Mai, Luu}, title = {Structural Control Systems in High-speed Railway Bridges}, issn = {1610-7381}, doi = {10.25643/bauhaus-universitaet.2339}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20141223-23391}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {147}, abstract = {Structural vibration control of high-speed railway bridges using tuned mass dampers, semi-active tuned mass dampers, fluid viscous dampers and magnetorheological dampers to reduce resonant structural vibrations is studied. In this work, the addressed main issues include modeling of the dynamic interaction of the structures, optimization of the parameters of the dampers and comparison of their efficiency. A new approach to optimize multiple tuned mass damper systems on an uncertain model is proposed based on the H-infinity optimization criteria and the DK iteration procedure with norm-bounded uncertainties in frequency domain. The parameters of tuned mass dampers are optimized directly and simultaneously on different modes contributing significantly to the multi-resonant peaks to explore the different possible combinations of parameters. The effectiveness of the present method is also evaluated through comparison with a previous method. In the case of semi-active tuned mass dampers, an optimization algorithm is derived to control the magnetorheological damper in these semi-active damping systems. The use of the proposed algorithm can generate various combinations of control gains and state variables. This can lead to the improvement of the ability of MR dampers to track the desired control forces. An uncertain model to reduce detuning effects is also considered in this work. Next, for fluid viscous dampers, in order to tune the optimal parameters of fluid viscous dampers to the vicinity of the exact values, analytical formulae which can include structural damping are developed based on the perturbation method. The proposed formulae can also be considered as an improvement of the previous analytical formulae, especially for bridge beams with large structural damping. Finally, a new combination of magnetorheological dampers and a double-beam system to improve the performance of the primary structure vibration is proposed. An algorithm to control magnetorheological dampers in this system is developed by using standard linear matrix inequality techniques. Weight functions as a loop shaping procedure are also introduced in the feedback controllers to improve the tracking ability of magnetorheological damping forces. To this end, the effectiveness of magnetorheological dampers controlled by the proposed scheme, along with the effects of the uncertain and time-delay parameters on the models, are evaluated through numerical simulations. Additionally, a comparison of the dampers based on their performance is also considered in this work.}, subject = {High-speed railway bridge}, language = {en} } @phdthesis{Mauludin, author = {Mauludin, Luthfi Muhammad}, title = {Computational Modeling of Fracture in Encapsulation-Based Self-Healing Concrete Using Cohesive Elements}, doi = {10.25643/bauhaus-universitaet.4520}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211008-45204}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {170}, abstract = {Encapsulation-based self-healing concrete has received a lot of attention nowadays in civil engineering field. These capsules are embedded in the cementitious matrix during concrete mixing. When the cracks appear, the embedded capsules which are placed along the path of incoming crack are fractured and then release of healing agents in the vicinity of damage. The materials of capsules need to be designed in a way that they should be able to break with small deformation, so the internal fluid can be released to seal the crack. This study focuses on computational modeling of fracture in encapsulation-based selfhealing concrete. The numerical model of 2D and 3D with randomly packed aggreates and capsules have been developed to analyze fracture mechanism that plays a significant role in the fracture probability of capsules and consequently the self-healing process. The capsules are assumed to be made of Poly Methyl Methacrylate (PMMA) and the potential cracks are represented by pre-inserted cohesive elements with tension and shear softening laws along the element boundaries of the mortar matrix, aggregates, capsules, and at the interfaces between these phases. The effects of volume fraction, core-wall thickness ratio, and mismatch fracture properties of capsules on the load carrying capacity of self-healing concrete and fracture probability of the capsules are investigated. The output of this study will become valuable tool to assist not only the experimentalists but also the manufacturers in designing an appropriate capsule material for self-healing concrete.}, subject = {beton}, language = {en} } @article{MeiabadiMoradiKaramimoghadametal., author = {Meiabadi, Mohammad Saleh and Moradi, Mahmoud and Karamimoghadam, Mojtaba and Ardabili, Sina and Bodaghi, Mahdi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Modeling the Producibility of 3D Printing in Polylactic Acid Using Artificial Neural Networks and Fused Filament Fabrication}, series = {polymers}, volume = {2021}, journal = {polymers}, number = {Volume 13, issue 19, article 3219}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/polym13193219}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220110-45518}, pages = {1 -- 21}, abstract = {Polylactic acid (PLA) is a highly applicable material that is used in 3D printers due to some significant features such as its deformation property and affordable cost. For improvement of the end-use quality, it is of significant importance to enhance the quality of fused filament fabrication (FFF)-printed objects in PLA. The purpose of this investigation was to boost toughness and to reduce the production cost of the FFF-printed tensile test samples with the desired part thickness. To remove the need for numerous and idle printing samples, the response surface method (RSM) was used. Statistical analysis was performed to deal with this concern by considering extruder temperature (ET), infill percentage (IP), and layer thickness (LT) as controlled factors. The artificial intelligence method of artificial neural network (ANN) and ANN-genetic algorithm (ANN-GA) were further developed to estimate the toughness, part thickness, and production-cost-dependent variables. Results were evaluated by correlation coefficient and RMSE values. According to the modeling results, ANN-GA as a hybrid machine learning (ML) technique could enhance the accuracy of modeling by about 7.5, 11.5, and 4.5\% for toughness, part thickness, and production cost, respectively, in comparison with those for the single ANN method. On the other hand, the optimization results confirm that the optimized specimen is cost-effective and able to comparatively undergo deformation, which enables the usability of printed PLA objects.}, subject = {3D-Druck}, language = {en} } @article{MengNomanQasemShokrietal., author = {Meng, Yinghui and Noman Qasem, Sultan and Shokri, Manouchehr and Shamshirband, Shahaboddin}, title = {Dimension Reduction of Machine Learning-Based Forecasting Models Employing Principal Component Analysis}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 8, article 1233}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math8081233}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200811-42125}, pages = {15}, abstract = {In this research, an attempt was made to reduce the dimension of wavelet-ANFIS/ANN (artificial neural network/adaptive neuro-fuzzy inference system) models toward reliable forecasts as well as to decrease computational cost. In this regard, the principal component analysis was performed on the input time series decomposed by a discrete wavelet transform to feed the ANN/ANFIS models. The models were applied for dissolved oxygen (DO) forecasting in rivers which is an important variable affecting aquatic life and water quality. The current values of DO, water surface temperature, salinity, and turbidity have been considered as the input variable to forecast DO in a three-time step further. The results of the study revealed that PCA can be employed as a powerful tool for dimension reduction of input variables and also to detect inter-correlation of input variables. Results of the PCA-wavelet-ANN models are compared with those obtained from wavelet-ANN models while the earlier one has the advantage of less computational time than the later models. Dealing with ANFIS models, PCA is more beneficial to avoid wavelet-ANFIS models creating too many rules which deteriorate the efficiency of the ANFIS models. Moreover, manipulating the wavelet-ANFIS models utilizing PCA leads to a significant decreasing in computational time. Finally, it was found that the PCA-wavelet-ANN/ANFIS models can provide reliable forecasts of dissolved oxygen as an important water quality indicator in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{MortazaviPereiraJiangetal., author = {Mortazavi, Bohayra and Pereira, Luiz Felipe C. and Jiang, Jin-Wu and Rabczuk, Timon}, title = {Modelling heat conduction in polycrystalline hexagonal boron-nitride films}, series = {Scientific Reports}, journal = {Scientific Reports}, doi = {10.1038/srep13228}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170425-31534}, abstract = {We conducted extensive molecular dynamics simulations to investigate the thermal conductivity of polycrystalline hexagonal boron-nitride (h-BN) films. To this aim, we constructed large atomistic models of polycrystalline h-BN sheets with random and uniform grain configuration. By performing equilibrium molecular dynamics (EMD) simulations, we investigated the influence of the average grain size on the thermal conductivity of polycrystalline h-BN films at various temperatures. Using the EMD results, we constructed finite element models of polycrystalline h-BN sheets to probe the thermal conductivity of samples with larger grain sizes. Our multiscale investigations not only provide a general viewpoint regarding the heat conduction in h-BN films but also propose that polycrystalline h-BN sheets present high thermal conductivity comparable to monocrystalline sheets.}, subject = {W{\"a}rmeleitf{\"a}higkeit}, language = {en} } @article{MosaviQasemShokrietal., author = {Mosavi, Amir Hosein and Qasem, Sultan Noman and Shokri, Manouchehr and Band, Shahab S. and Mohammadzadeh, Ardashir}, title = {Fractional-Order Fuzzy Control Approach for Photovoltaic/Battery Systems under Unknown Dynamics, Variable Irradiation and Temperature}, series = {Electronics}, volume = {2020}, journal = {Electronics}, number = {Volume 9, issue 9, article 1455}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/electronics9091455}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43381}, pages = {1 -- 19}, abstract = {For this paper, the problem of energy/voltage management in photovoltaic (PV)/battery systems was studied, and a new fractional-order control system on basis of type-3 (T3) fuzzy logic systems (FLSs) was developed. New fractional-order learning rules are derived for tuning of T3-FLSs such that the stability is ensured. In addition, using fractional-order calculus, the robustness was studied versus dynamic uncertainties, perturbation of irradiation, and temperature and abruptly faults in output loads, and, subsequently, new compensators were proposed. In several examinations under difficult operation conditions, such as random temperature, variable irradiation, and abrupt changes in output load, the capability of the schemed controller was verified. In addition, in comparison with other methods, such as proportional-derivative-integral (PID), sliding mode controller (SMC), passivity-based control systems (PBC), and linear quadratic regulator (LQR), the superiority of the suggested method was demonstrated.}, subject = {Fuzzy-Logik}, language = {en} } @article{MosaviShokriMansoretal., author = {Mosavi, Amir Hosein and Shokri, Manouchehr and Mansor, Zulkefli and Qasem, Sultan Noman and Band, Shahab S. and Mohammadzadeh, Ardashir}, title = {Machine Learning for Modeling the Singular Multi-Pantograph Equations}, series = {Entropy}, volume = {2020}, journal = {Entropy}, number = {volume 22, issue 9, article 1041}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/e22091041}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43436}, pages = {1 -- 18}, abstract = {In this study, a new approach to basis of intelligent systems and machine learning algorithms is introduced for solving singular multi-pantograph differential equations (SMDEs). For the first time, a type-2 fuzzy logic based approach is formulated to find an approximated solution. The rules of the suggested type-2 fuzzy logic system (T2-FLS) are optimized by the square root cubature Kalman filter (SCKF) such that the proposed fineness function to be minimized. Furthermore, the stability and boundedness of the estimation error is proved by novel approach on basis of Lyapunov theorem. The accuracy and robustness of the suggested algorithm is verified by several statistical examinations. It is shown that the suggested method results in an accurate solution with rapid convergence and a lower computational cost.}, subject = {Fuzzy-Regelung}, language = {en} } @article{MosaviNajafiFaizollahzadehArdabilietal., author = {Mosavi, Amir and Najafi, Bahman and Faizollahzadeh Ardabili, Sina and Shamshirband, Shahaboddin and Rabczuk, Timon}, title = {An Intelligent Artificial Neural Network-Response Surface Methodology Method for Accessing the Optimum Biodiesel and Diesel Fuel Blending Conditions in a Diesel Engine from the Viewpoint of Exergy and Energy Analysis}, series = {Energies}, volume = {2018}, journal = {Energies}, number = {11, 4}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11040860}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180507-37467}, pages = {18}, abstract = {Biodiesel, as the main alternative fuel to diesel fuel which is produced from renewable and available resources, improves the engine emissions during combustion in diesel engines. In this study, the biodiesel is produced initially from waste cooking oil (WCO). The fuel samples are applied in a diesel engine and the engine performance has been considered from the viewpoint of exergy and energy approaches. Engine tests are performed at a constant 1500 rpm speed with various loads and fuel samples. The obtained experimental data are also applied to develop an artificial neural network (ANN) model. Response surface methodology (RSM) is employed to optimize the exergy and energy efficiencies. Based on the results of the energy analysis, optimal engine performance is obtained at 80\% of full load in presence of B10 and B20 fuels. However, based on the exergy analysis results, optimal engine performance is obtained at 80\% of full load in presence of B90 and B100 fuels. The optimum values of exergy and energy efficiencies are in the range of 25-30\% of full load, which is the same as the calculated range obtained from mathematical modeling.}, subject = {Biodiesel}, language = {en} } @article{MosaviShamshirbandEsmaeilbeikietal., author = {Mosavi, Amir and Shamshirband, Shahaboddin and Esmaeilbeiki, Fatemeh and Zarehaghi, Davoud and Neyshabouri, Mohammadreza and Samadianfard, Saeed and Ghorbani, Mohammad Ali and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Comparative analysis of hybrid models of firefly optimization algorithm with support vector machines and multilayer perceptron for predicting soil temperature at different depths}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, Issue 1}, doi = {10.1080/19942060.2020.1788644}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200911-42347}, pages = {939 -- 953}, abstract = {This research aims to model soil temperature (ST) using machine learning models of multilayer perceptron (MLP) algorithm and support vector machine (SVM) in hybrid form with the Firefly optimization algorithm, i.e. MLP-FFA and SVM-FFA. In the current study, measured ST and meteorological parameters of Tabriz and Ahar weather stations in a period of 2013-2015 are used for training and testing of the studied models with one and two days as a delay. To ascertain conclusive results for validation of the proposed hybrid models, the error metrics are benchmarked in an independent testing period. Moreover, Taylor diagrams utilized for that purpose. Obtained results showed that, in a case of one day delay, except in predicting ST at 5 cm below the soil surface (ST5cm) at Tabriz station, MLP-FFA produced superior results compared with MLP, SVM, and SVM-FFA models. However, for two days delay, MLP-FFA indicated increased accuracy in predicting ST5cm and ST 20cm of Tabriz station and ST10cm of Ahar station in comparison with SVM-FFA. Additionally, for all of the prescribed models, the performance of the MLP-FFA and SVM-FFA hybrid models in the testing phase was found to be meaningfully superior to the classical MLP and SVM models.}, subject = {Bodentemperatur}, language = {en} } @inproceedings{MostBucher, author = {Most, Thomas and Bucher, Christian}, title = {ADAPTIVE RESPONSE SURFACE APPROACH USING ARTIFICIAL NEURAL NETWORKS AND MOVING LEAST SQUARES}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2992}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29922}, pages = {13}, abstract = {In engineering science the modeling and numerical analysis of complex systems and relations plays an important role. In order to realize such an investigation, for example a stochastic analysis, in a reasonable computational time, approximation procedure have been developed. A very famous approach is the response surface method, where the relation between input and output quantities is represented for example by global polynomials or local interpolation schemes as Moving Least Squares (MLS). In recent years artificial neural networks (ANN) have been applied as well for such purposes. Recently an adaptive response surface approach for reliability analyses was proposed, which is very efficient concerning the number of expensive limit state function evaluations. Due to the applied simplex interpolation the procedure is limited to small dimensions. In this paper this approach is extended for larger dimensions using combined ANN and MLS response surfaces for evaluating the adaptation criterion with only one set of joined limit state points. As adaptation criterion a combination by using the maximum difference in the conditional probabilities of failure and the maximum difference in the approximated radii is applied. Compared to response surfaces on directional samples or to plain directional sampling the failure probability can be estimated with a much smaller number of limit state points.}, subject = {Architektur }, language = {en} } @inproceedings{MostBucherMacke, author = {Most, Thomas and Bucher, Christian and Macke, M.}, title = {A NATURAL NEIGHBOR BASED MOVING LEAST SQUARES APPROACH WITH INTERPOLATING WEIGHTING FUNCTION}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2994}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29943}, pages = {17}, abstract = {The Element-free Galerkin Method has become a very popular tool for the simulation of mechanical problems with moving boundaries. The internally applied Moving Least Squares approximation uses in general Gaussian or cubic weighting functions and has compact support. Due to the approximative character of this method the obtained shape functions do not fulfill the interpolation condition, which causes additional numerical effort for the imposition of the essential boundary conditions. The application of a singular weighting function, which leads to singular coefficient matrices at the nodes, can solve this problem, but requires a very careful placement of the integration points. Special procedures for the handling of such singular matrices were proposed in literature, which require additional numerical effort. In this paper a non-singular weighting function is presented, which leads to an exact fulfillment of the interpolation condition. This weighting function leads to regular values of the weights and the coefficient matrices in the whole interpolation domain even at the nodes. Furthermore this function gives much more stable results for varying size of the influence radius and for strongly distorted nodal arrangements than classical weighting function types. Nevertheless, for practical applications the results are similar as these obtained with the regularized weighting type presented by the authors in previous publications. Finally a new concept will be presented, which enables an efficient analysis of systems with strongly varying node density. In this concept the nodal influence domains are adapted depending on the nodal configuration by interpolating the influence radius for each direction from the distances to the natural neighbor nodes. This approach requires a Voronoi diagram of the domain, which is available in this study since Delaunay triangles are used as integration background cells. In the numerical examples it will be shown, that this method leads to a more uniform and reduced number of influencing nodes for systems with varying node density than the classical circular influence domains, which means that the small additional numerical effort for interpolating the influence radius leads to remarkable reduction of the total numerical cost in a linear analysis while obtaining similar results. For nonlinear calculations this advantage would be even more significant.}, subject = {Architektur }, language = {en} } @inproceedings{MostEckardtSchraderetal., author = {Most, Thomas and Eckardt, Stefan and Schrader, Kai and Deckner, T.}, title = {AN IMPROVED COHESIVE CRACK MODEL FOR COMBINED CRACK OPENING AND SLIDING UNDER CYCLIC LOADING}, editor = {G{\"u}rlebeck, Klaus and K{\"o}nke, Carsten}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.2993}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170327-29933}, pages = {20}, abstract = {The modeling of crack propagation in plain and reinforced concrete structures is still a field for many researchers. If a macroscopic description of the cohesive cracking process of concrete is applied, generally the Fictitious Crack Model is utilized, where a force transmission over micro cracks is assumed. In the most applications of this concept the cohesive model represents the relation between the normal crack opening and the normal stress, which is mostly defined as an exponential softening function, independently from the shear stresses in tangential direction. The cohesive forces are then calculated only from the normal stresses. By Carol et al. 1997 an improved model was developed using a coupled relation between the normal and shear damage based on an elasto-plastic constitutive formulation. This model is based on a hyperbolic yield surface depending on the normal and the shear stresses and on the tensile and shear strength. This model also represents the effect of shear traction induced crack opening. Due to the elasto-plastic formulation, where the inelastic crack opening is represented by plastic strains, this model is limited for applications with monotonic loading. In order to enable the application for cases with un- and reloading the existing model is extended in this study using a combined plastic-damage formulation, which enables the modeling of crack opening and crack closure. Furthermore the corresponding algorithmic implementation using a return mapping approach is presented and the model is verified by means of several numerical examples. Finally an investigation concerning the identification of the model parameters by means of neural networks is presented. In this analysis an inverse approximation of the model parameters is performed by using a given set of points of the load displacement curves as input values and the model parameters as output terms. It will be shown, that the elasto-plastic model parameters could be identified well with this approach, but require a huge number of simulations.}, subject = {Architektur }, language = {en} }