@article{KumariHarirchianLahmeretal., author = {Kumari, Vandana and Harirchian, Ehsan and Lahmer, Tom and Rasulzade, Shahla}, title = {Evaluation of Machine Learning and Web-Based Process for Damage Score Estimation of Existing Buildings}, series = {Buildings}, volume = {2022}, journal = {Buildings}, number = {Volume 12, issue 5, article 578}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/buildings12050578}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220509-46387}, pages = {1 -- 23}, abstract = {The seismic vulnerability assessment of existing reinforced concrete (RC) buildings is a significant source of disaster mitigation plans and rescue services. Different countries evolved various Rapid Visual Screening (RVS) techniques and methodologies to deal with the devastating consequences of earthquakes on the structural characteristics of buildings and human casualties. Artificial intelligence (AI) methods, such as machine learning (ML) algorithm-based methods, are increasingly used in various scientific and technical applications. The investigation toward using these techniques in civil engineering applications has shown encouraging results and reduced human intervention, including uncertainties and biased judgment. In this study, several known non-parametric algorithms are investigated toward RVS using a dataset employing different earthquakes. Moreover, the methodology encourages the possibility of examining the buildings' vulnerability based on the factors related to the buildings' importance and exposure. In addition, a web-based application built on Django is introduced. The interface is designed with the idea to ease the seismic vulnerability investigation in real-time. The concept was validated using two case studies, and the achieved results showed the proposed approach's potential efficiency}, subject = {Maschinelles Lernen}, language = {en} } @article{BandJanizadehChandraPaletal., author = {Band, Shahab S. and Janizadeh, Saeid and Chandra Pal, Subodh and Saha, Asish and Chakrabortty, Rabbin and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Novel Ensemble Approach of Deep Learning Neural Network (DLNN) Model and Particle Swarm Optimization (PSO) Algorithm for Prediction of Gully Erosion Susceptibility}, series = {Sensors}, volume = {2020}, journal = {Sensors}, number = {Volume 20, issue 19, article 5609}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/s20195609}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43341}, pages = {1 -- 27}, abstract = {This study aims to evaluate a new approach in modeling gully erosion susceptibility (GES) based on a deep learning neural network (DLNN) model and an ensemble particle swarm optimization (PSO) algorithm with DLNN (PSO-DLNN), comparing these approaches with common artificial neural network (ANN) and support vector machine (SVM) models in Shirahan watershed, Iran. For this purpose, 13 independent variables affecting GES in the study area, namely, altitude, slope, aspect, plan curvature, profile curvature, drainage density, distance from a river, land use, soil, lithology, rainfall, stream power index (SPI), and topographic wetness index (TWI), were prepared. A total of 132 gully erosion locations were identified during field visits. To implement the proposed model, the dataset was divided into the two categories of training (70\%) and testing (30\%). The results indicate that the area under the curve (AUC) value from receiver operating characteristic (ROC) considering the testing datasets of PSO-DLNN is 0.89, which indicates superb accuracy. The rest of the models are associated with optimal accuracy and have similar results to the PSO-DLNN model; the AUC values from ROC of DLNN, SVM, and ANN for the testing datasets are 0.87, 0.85, and 0.84, respectively. The efficiency of the proposed model in terms of prediction of GES was increased. Therefore, it can be concluded that the DLNN model and its ensemble with the PSO algorithm can be used as a novel and practical method to predict gully erosion susceptibility, which can help planners and managers to manage and reduce the risk of this phenomenon.}, subject = {Geoinformatik}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Rasulzade, Shahla and Lahmer, Tom and Raj Das, Rohan}, title = {A Synthesized Study Based on Machine Learning Approaches for Rapid Classifying Earthquake Damage Grades to RC Buildings}, series = {Applied Sciences}, volume = {2021}, journal = {Applied Sciences}, number = {Volume 11, issue 16, article 7540}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app11167540}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210818-44853}, pages = {1 -- 33}, abstract = {A vast number of existing buildings were constructed before the development and enforcement of seismic design codes, which run into the risk of being severely damaged under the action of seismic excitations. This poses not only a threat to the life of people but also affects the socio-economic stability in the affected area. Therefore, it is necessary to assess such buildings' present vulnerability to make an educated decision regarding risk mitigation by seismic strengthening techniques such as retrofitting. However, it is economically and timely manner not feasible to inspect, repair, and augment every old building on an urban scale. As a result, a reliable rapid screening methods, namely Rapid Visual Screening (RVS), have garnered increasing interest among researchers and decision-makers alike. In this study, the effectiveness of five different Machine Learning (ML) techniques in vulnerability prediction applications have been investigated. The damage data of four different earthquakes from Ecuador, Haiti, Nepal, and South Korea, have been utilized to train and test the developed models. Eight performance modifiers have been implemented as variables with a supervised ML. The investigations on this paper illustrate that the assessed vulnerability classes by ML techniques were very close to the actual damage levels observed in the buildings.}, subject = {Maschinelles Lernen}, language = {en} } @article{Hanna, author = {Hanna, John}, title = {Computational Modelling for the Effects of Capsular Clustering on Fracture of Encapsulation-Based Self-Healing Concrete Using XFEM and Cohesive Surface Technique}, series = {Applied Sciences}, volume = {2022}, journal = {Applied Sciences}, number = {Volume 12, issue 10, article 5112}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app12105112}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220721-46717}, pages = {1 -- 17}, abstract = {The fracture of microcapsules is an important issue to release the healing agent for healing the cracks in encapsulation-based self-healing concrete. The capsular clustering generated from the concrete mixing process is considered one of the critical factors in the fracture mechanism. Since there is a lack of studies in the literature regarding this issue, the design of self-healing concrete cannot be made without an appropriate modelling strategy. In this paper, the effects of microcapsule size and clustering on the fractured microcapsules are studied computationally. A simple 2D computational modelling approach is developed based on the eXtended Finite Element Method (XFEM) and cohesive surface technique. The proposed model shows that the microcapsule size and clustering have significant roles in governing the load-carrying capacity and the crack propagation pattern and determines whether the microcapsule will be fractured or debonded from the concrete matrix. The higher the microcapsule circumferential contact length, the higher the load-carrying capacity. When it is lower than 25\% of the microcapsule circumference, it will result in a greater possibility for the debonding of the microcapsule from the concrete. The greater the core/shell ratio (smaller shell thickness), the greater the likelihood of microcapsules being fractured.}, subject = {Beton}, language = {en} } @article{GhazvineiDarvishiMosavietal., author = {Ghazvinei, Pezhman Taherei and Darvishi, Hossein Hassanpour and Mosavi, Amir and Yusof, Khamaruzaman bin Wan and Alizamir, Meysam and Shamshirband, Shahaboddin and Chau, Kwok-Wing}, title = {Sugarcane growth prediction based on meteorological parameters using extreme learning machine and artificial neural network}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2018}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {12,1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2018.1526119}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181017-38129}, pages = {738 -- 749}, abstract = {Management strategies for sustainable sugarcane production need to deal with the increasing complexity and variability of the whole sugar system. Moreover, they need to accommodate the multiple goals of different industry sectors and the wider community. Traditional disciplinary approaches are unable to provide integrated management solutions, and an approach based on whole systems analysis is essential to bring about beneficial change to industry and the community. The application of this approach to water management, environmental management and cane supply management is outlined, where the literature indicates that the application of extreme learning machine (ELM) has never been explored in this realm. Consequently, the leading objective of the current research was set to filling this gap by applying ELM to launch swift and accurate model for crop production data-driven. The key learning has been the need for innovation both in the technical aspects of system function underpinned by modelling of sugarcane growth. Therefore, the current study is an attempt to establish an integrate model using ELM to predict the concluding growth amount of sugarcane. Prediction results were evaluated and further compared with artificial neural network (ANN) and genetic programming models. Accuracy of the ELM model is calculated using the statistics indicators of Root Means Square Error (RMSE), Pearson Coefficient (r), and Coefficient of Determination (R2) with promising results of 0.8, 0.47, and 0.89, respectively. The results also show better generalization ability in addition to faster learning curve. Thus, proficiency of the ELM for supplementary work on advancement of prediction model for sugarcane growth was approved with promising results.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @article{FaizollahzadehArdabiliNajafiAlizamiretal., author = {Faizollahzadeh Ardabili, Sina and Najafi, Bahman and Alizamir, Meysam and Mosavi, Amir and Shamshirband, Shahaboddin and Rabczuk, Timon}, title = {Using SVM-RSM and ELM-RSM Approaches for Optimizing the Production Process of Methyl and Ethyl Esters}, series = {Energies}, journal = {Energies}, number = {11, 2889}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11112889}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181025-38170}, pages = {1 -- 20}, abstract = {The production of a desired product needs an effective use of the experimental model. The present study proposes an extreme learning machine (ELM) and a support vector machine (SVM) integrated with the response surface methodology (RSM) to solve the complexity in optimization and prediction of the ethyl ester and methyl ester production process. The novel hybrid models of ELM-RSM and ELM-SVM are further used as a case study to estimate the yield of methyl and ethyl esters through a trans-esterification process from waste cooking oil (WCO) based on American Society for Testing and Materials (ASTM) standards. The results of the prediction phase were also compared with artificial neural networks (ANNs) and adaptive neuro-fuzzy inference system (ANFIS), which were recently developed by the second author of this study. Based on the results, an ELM with a correlation coefficient of 0.9815 and 0.9863 for methyl and ethyl esters, respectively, had a high estimation capability compared with that for SVM, ANNs, and ANFIS. Accordingly, the maximum production yield was obtained in the case of using ELM-RSM of 96.86\% for ethyl ester at a temperature of 68.48 °C, a catalyst value of 1.15 wt. \%, mixing intensity of 650.07 rpm, and an alcohol to oil molar ratio (A/O) of 5.77; for methyl ester, the production yield was 98.46\% at a temperature of 67.62 °C, a catalyst value of 1.1 wt. \%, mixing intensity of 709.42 rpm, and an A/O of 6.09. Therefore, ELM-RSM increased the production yield by 3.6\% for ethyl ester and 3.1\% for methyl ester, compared with those for the experimental data.}, subject = {Biodiesel}, language = {en} } @article{MosaviNajafiFaizollahzadehArdabilietal., author = {Mosavi, Amir and Najafi, Bahman and Faizollahzadeh Ardabili, Sina and Shamshirband, Shahaboddin and Rabczuk, Timon}, title = {An Intelligent Artificial Neural Network-Response Surface Methodology Method for Accessing the Optimum Biodiesel and Diesel Fuel Blending Conditions in a Diesel Engine from the Viewpoint of Exergy and Energy Analysis}, series = {Energies}, volume = {2018}, journal = {Energies}, number = {11, 4}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11040860}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180507-37467}, pages = {18}, abstract = {Biodiesel, as the main alternative fuel to diesel fuel which is produced from renewable and available resources, improves the engine emissions during combustion in diesel engines. In this study, the biodiesel is produced initially from waste cooking oil (WCO). The fuel samples are applied in a diesel engine and the engine performance has been considered from the viewpoint of exergy and energy approaches. Engine tests are performed at a constant 1500 rpm speed with various loads and fuel samples. The obtained experimental data are also applied to develop an artificial neural network (ANN) model. Response surface methodology (RSM) is employed to optimize the exergy and energy efficiencies. Based on the results of the energy analysis, optimal engine performance is obtained at 80\% of full load in presence of B10 and B20 fuels. However, based on the exergy analysis results, optimal engine performance is obtained at 80\% of full load in presence of B90 and B100 fuels. The optimum values of exergy and energy efficiencies are in the range of 25-30\% of full load, which is the same as the calculated range obtained from mathematical modeling.}, subject = {Biodiesel}, language = {en} } @article{RenZhuangOterkusetal., author = {Ren, Huilong and Zhuang, Xiaoying and Oterkus, Erkan and Zhu, Hehua and Rabczuk, Timon}, title = {Nonlocal strong forms of thin plate, gradient elasticity, magneto-electro-elasticity and phase-field fracture by nonlocal operator method}, series = {Engineering with Computers}, volume = {2021}, journal = {Engineering with Computers}, doi = {10.1007/s00366-021-01502-8}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211207-45388}, pages = {1 -- 22}, abstract = {The derivation of nonlocal strong forms for many physical problems remains cumbersome in traditional methods. In this paper, we apply the variational principle/weighted residual method based on nonlocal operator method for the derivation of nonlocal forms for elasticity, thin plate, gradient elasticity, electro-magneto-elasticity and phase-field fracture method. The nonlocal governing equations are expressed as an integral form on support and dual-support. The first example shows that the nonlocal elasticity has the same form as dual-horizon non-ordinary state-based peridynamics. The derivation is simple and general and it can convert efficiently many local physical models into their corresponding nonlocal forms. In addition, a criterion based on the instability of the nonlocal gradient is proposed for the fracture modelling in linear elasticity. Several numerical examples are presented to validate nonlocal elasticity and the nonlocal thin plate.}, subject = {Bruchmechanik}, language = {en} } @phdthesis{Schemmann, author = {Schemmann, Christoph}, title = {Optimierung von radialen Verdichterlaufr{\"a}dern unter Ber{\"u}cksichtigung empirischer und analytischer Vorinformationen mittels eines mehrstufigen Sampling Verfahrens}, doi = {10.25643/bauhaus-universitaet.3974}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190910-39748}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {233}, abstract = {Turbomachinery plays an important role in many cases of energy generation or conversion. Therefore, turbomachinery is a promising approaching point for optimization in order to increase the efficiency of energy use. In recent years, the use of automated optimization strategies in combination with numerical simulation has become increasingly popular in many fields of engineering. The complex interactions between fluid and solid mechanics encountered in turbomachines on the one hand and the high computational expense needed to calculate the performance on the other hand, have, however, prevented a widespread use of these techniques in this field of engineering. The objective of this work was the development of a strategy for efficient metamodel based optimization of centrifugal compressor impellers. In this context, the main focus is the reduction of the required numerical expense. The central idea followed in this research was the incorporation of preliminary information acquired from low-fidelity computation methods and empirical correlations into the sampling process to identify promising regions of the parameter space. This information was then used to concentrate the numerically expensive high-fidelity computations of the fluid dynamic and structure mechanic performance of the impeller in these regions while still maintaining a good coverage of the whole parameter space. The development of the optimization strategy can be divided into three main tasks. Firstly, the available preliminary information had to be researched and rated. This research identified loss models based on one dimensional flow physics and empirical correlations as the best suited method to predict the aerodynamic performance. The loss models were calibrated using available performance data to obtain a high prediction quality. As no sufficiently exact models for the prediction of the mechanical loading of the impellercould be identified, a metamodel based on finite element computations was chosen for this estimation. The second task was the development of a sampling method which concentrates samples in regions of the parameter space where high quality designs are predicted by the preliminary information while maintaining a good overall coverage. As available methods like rejection sampling or Markov-chain Monte-Carlo methods did not meet the requirements in terms of sample distribution and input correlation, a new multi-fidelity sampling method called "Filtered Sampling"has been developed. The last task was the development of an automated computational workflow. This workflow encompasses geometry parametrization, geometry generation, grid generation and computation of the aerodynamic performance and the structure mechanic loading. Special emphasis was put into the development of a geometry parametrization strategy based on fluid mechanic considerations to prevent the generation of physically inexpedient designs. Finally, the optimization strategy, which utilizes the previously developed tools, was successfully employed to carry out three optimization tasks. The efficiency of the method was proven by the first and second testcase where an existing compressor design was optimized by the presented method. The results were comparable to optimizations which did not take preliminary information into account, while the required computational expense cloud be halved. In the third testcase, the method was applied to generate a new impeller design. In contrast to the previous examples, this optimization featuredlargervariationsoftheimpellerdesigns. Therefore, theapplicability of the method to parameter spaces with significantly varying designs could be proven, too.}, subject = {Simulation}, language = {en} } @phdthesis{Tan, author = {Tan, Fengjie}, title = {Shape Optimization Design of Arch Type Dams under Uncertainties}, doi = {10.25643/bauhaus-universitaet.3960}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190819-39608}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Due to an increased need for hydro-electricity, water storage, and flood protection, it is assumed that a series of new dams will be built throughout the world. Comparing existing design methodologies for arch-type dams, model-based shape optimization can effectively reduce construction costs and leverage the properties of construction materials. To apply the means of shape optimization, suitable variables need to be chosen to formulate the objective function, which is the volume of the arch dam here. In order to increase the consistency with practical conditions, a great number of geometrical and behavioral constraints are included in the mathematical model. An optimization method, namely Genetic Algorithm is adopted which allows a global search. Traditional optimization techniques are realized based on a deterministic approach, which means that the material properties and loading conditions are assumed to be fixed values. As a result, the real-world structures that are optimized by these approaches suffer from uncertainties that one needs to be aware of. Hence, in any optimization process for arch dams, it is nec- essary to find a methodology that is capable of considering the influences of uncertainties and generating a solution which is robust enough against the uncertainties. The focus of this thesis is the formulation and the numerical method for the optimization of the arch dam under the uncertainties. The two main models, the probabilistic model, and non-probabilistic models are intro- duced and discussed. Classic procedures of probabilistic approaches un- der uncertainties, such as RDO (robust design optimization) and RBDO (reliability-based design optimization), are in general computationally ex- pensive and rely on estimates of the system's response variance and fail- ure probabilities. Instead, the robust optimization (RO) method which is based on the non-probabilistic model, will not follow a full probabilistic approach but works with pre-defined confidence levels. This leads to a bi-level optimization program where the volume of the dam is optimized under the worst combination of the uncertain parameters. By this, robust and reliable designs are obtained and the result is independent of any as- sumptions on stochastic properties of the random variables in the model. The optimization of an arch-type dam is realized here by a robust optimiza- tion method under load uncertainty, where hydraulic and thermal loads are considered. The load uncertainty is modeled as an ellipsoidal expression. Comparing with any traditional deterministic optimization (DO) method, which only concerns the minimum objective value and offers a solution candidate close to limit-states, the RO method provides a robust solution against uncertainties. All the above mentioned methods are applied to the optimization of the arch dam to compare with the optimal design with DO methods. The re- sults are compared and analyzed to discuss the advantages and drawbacks of each method. In order to reduce the computational cost, a ranking strategy and an ap- proximation model are further involved to do a preliminary screening. By means of these, the robust design can generate an improved arch dam structure which ensures both safety and serviceability during its lifetime.}, subject = {Wasserbau}, language = {en} }