@article{ShamshirbandJoloudariGhasemiGoletal., author = {Shamshirband, Shahaboddin and Joloudari, Javad Hassannataj and GhasemiGol, Mohammad and Saadatfar, Hamid and Mosavi, Amir and Nabipour, Narjes}, title = {FCS-MBFLEACH: Designing an Energy-Aware Fault Detection System for Mobile Wireless Sensor Networks}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {Volume 8, Issue 1, article 28}, publisher = {MDPI}, doi = {10.3390/math8010028}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200107-40541}, pages = {24}, abstract = {Wireless sensor networks (WSNs) include large-scale sensor nodes that are densely distributed over a geographical region that is completely randomized for monitoring, identifying, and analyzing physical events. The crucial challenge in wireless sensor networks is the very high dependence of the sensor nodes on limited battery power to exchange information wirelessly as well as the non-rechargeable battery of the wireless sensor nodes, which makes the management and monitoring of these nodes in terms of abnormal changes very difficult. These anomalies appear under faults, including hardware, software, anomalies, and attacks by raiders, all of which affect the comprehensiveness of the data collected by wireless sensor networks. Hence, a crucial contraption should be taken to detect the early faults in the network, despite the limitations of the sensor nodes. Machine learning methods include solutions that can be used to detect the sensor node faults in the network. The purpose of this study is to use several classification methods to compute the fault detection accuracy with different densities under two scenarios in regions of interest such as MB-FLEACH, one-class support vector machine (SVM), fuzzy one-class, or a combination of SVM and FCS-MBFLEACH methods. It should be noted that in the study so far, no super cluster head (SCH) selection has been performed to detect node faults in the network. The simulation outcomes demonstrate that the FCS-MBFLEACH method has the best performance in terms of the accuracy of fault detection, false-positive rate (FPR), average remaining energy, and network lifetime compared to other classification methods.}, subject = {Vernetzung}, language = {en} } @article{ShamshirbandBabanezhadMosavietal., author = {Shamshirband, Shahaboddin and Babanezhad, Meisam and Mosavi, Amir and Nabipour, Narjes and Hajnal, Eva and Nadai, Laszlo and Chau, Kwok-Wing}, title = {Prediction of flow characteristics in the bubble column reactor by the artificial pheromone-based communication of biological ants}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {volume 14, issue 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1715842}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200227-41013}, pages = {367 -- 378}, abstract = {A novel combination of the ant colony optimization algorithm (ACO)and computational fluid dynamics (CFD) data is proposed for modeling the multiphase chemical reactors. The proposed intelligent model presents a probabilistic computational strategy for predicting various levels of three-dimensional bubble column reactor (BCR) flow. The results prove an enhanced communication between ant colony prediction and CFD data in different sections of the BCR.}, subject = {Maschinelles Lernen}, language = {en} } @article{ShabaniSamadianfardSattarietal., author = {Shabani, Sevda and Samadianfard, Saeed and Sattari, Mohammad Taghi and Mosavi, Amir and Shamshirband, Shahaboddin and Kmet, Tibor and V{\´a}rkonyi-K{\´o}czy, Annam{\´a}ria R.}, title = {Modeling Pan Evaporation Using Gaussian Process Regression K-Nearest Neighbors Random Forest and Support Vector Machines; Comparative Analysis}, series = {Atmosphere}, volume = {2020}, journal = {Atmosphere}, number = {Volume 11, Issue 1, 66}, doi = {10.3390/atmos11010066}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200110-40561}, pages = {17}, abstract = {Evaporation is a very important process; it is one of the most critical factors in agricultural, hydrological, and meteorological studies. Due to the interactions of multiple climatic factors, evaporation is considered as a complex and nonlinear phenomenon to model. Thus, machine learning methods have gained popularity in this realm. In the present study, four machine learning methods of Gaussian Process Regression (GPR), K-Nearest Neighbors (KNN), Random Forest (RF) and Support Vector Regression (SVR) were used to predict the pan evaporation (PE). Meteorological data including PE, temperature (T), relative humidity (RH), wind speed (W), and sunny hours (S) collected from 2011 through 2017. The accuracy of the studied methods was determined using the statistical indices of Root Mean Squared Error (RMSE), correlation coefficient (R) and Mean Absolute Error (MAE). Furthermore, the Taylor charts utilized for evaluating the accuracy of the mentioned models. The results of this study showed that at Gonbad-e Kavus, Gorgan and Bandar Torkman stations, GPR with RMSE of 1.521 mm/day, 1.244 mm/day, and 1.254 mm/day, KNN with RMSE of 1.991 mm/day, 1.775 mm/day, and 1.577 mm/day, RF with RMSE of 1.614 mm/day, 1.337 mm/day, and 1.316 mm/day, and SVR with RMSE of 1.55 mm/day, 1.262 mm/day, and 1.275 mm/day had more appropriate performances in estimating PE values. It was found that GPR for Gonbad-e Kavus Station with input parameters of T, W and S and GPR for Gorgan and Bandar Torkmen stations with input parameters of T, RH, W and S had the most accurate predictions and were proposed for precise estimation of PE. The findings of the current study indicated that the PE values may be accurately estimated with few easily measured meteorological parameters.}, subject = {Maschinelles Lernen}, language = {en} } @article{SaadatfarKhosraviHassannatajJoloudarietal., author = {Saadatfar, Hamid and Khosravi, Samiyeh and Hassannataj Joloudari, Javad and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {A New K-Nearest Neighbors Classifier for Big Data Based on Efficient Data Pruning}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 2, article 286}, publisher = {MDPI}, doi = {10.3390/math8020286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40996}, pages = {12}, abstract = {The K-nearest neighbors (KNN) machine learning algorithm is a well-known non-parametric classification method. However, like other traditional data mining methods, applying it on big data comes with computational challenges. Indeed, KNN determines the class of a new sample based on the class of its nearest neighbors; however, identifying the neighbors in a large amount of data imposes a large computational cost so that it is no longer applicable by a single computing machine. One of the proposed techniques to make classification methods applicable on large datasets is pruning. LC-KNN is an improved KNN method which first clusters the data into some smaller partitions using the K-means clustering method; and then applies the KNN for each new sample on the partition which its center is the nearest one. However, because the clusters have different shapes and densities, selection of the appropriate cluster is a challenge. In this paper, an approach has been proposed to improve the pruning phase of the LC-KNN method by taking into account these factors. The proposed approach helps to choose a more appropriate cluster of data for looking for the neighbors, thus, increasing the classification accuracy. The performance of the proposed approach is evaluated on different real datasets. The experimental results show the effectiveness of the proposed approach and its higher classification accuracy and lower time cost in comparison to other recent relevant methods.}, subject = {Maschinelles Lernen}, language = {en} } @article{RabczukGuoZhuangetal., author = {Rabczuk, Timon and Guo, Hongwei and Zhuang, Xiaoying and Chen, Pengwan and Alajlan, Naif}, title = {Stochastic deep collocation method based on neural architecture search and transfer learning for heterogeneous porous media}, series = {Engineering with Computers}, volume = {2022}, journal = {Engineering with Computers}, publisher = {Springer}, address = {London}, doi = {10.1007/s00366-021-01586-2}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220209-45835}, pages = {1 -- 26}, abstract = {We present a stochastic deep collocation method (DCM) based on neural architecture search (NAS) and transfer learning for heterogeneous porous media. We first carry out a sensitivity analysis to determine the key hyper-parameters of the network to reduce the search space and subsequently employ hyper-parameter optimization to finally obtain the parameter values. The presented NAS based DCM also saves the weights and biases of the most favorable architectures, which is then used in the fine-tuning process. We also employ transfer learning techniques to drastically reduce the computational cost. The presented DCM is then applied to the stochastic analysis of heterogeneous porous material. Therefore, a three dimensional stochastic flow model is built providing a benchmark to the simulation of groundwater flow in highly heterogeneous aquifers. The performance of the presented NAS based DCM is verified in different dimensions using the method of manufactured solutions. We show that it significantly outperforms finite difference methods in both accuracy and computational cost.}, subject = {Maschinelles Lernen}, language = {en} } @article{PatzeltErfurtLudwig, author = {Patzelt, Max and Erfurt, Doreen and Ludwig, Horst-Michael}, title = {Quantification of cracks in concrete thin sections considering current methods of image analysis}, series = {Journal of Microscopy}, volume = {2022}, journal = {Journal of Microscopy}, number = {Volume 286, Issue 2}, doi = {10.1111/jmi.13091}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220811-46754}, pages = {154 -- 159}, abstract = {Image analysis is used in this work to quantify cracks in concrete thin sections via modern image processing. Thin sections were impregnated with a yellow epoxy resin, to increase the contrast between voids and other phases of the concrete. By the means of different steps of pre-processing, machine learning and python scripts, cracks can be quantified in an area of up to 40 cm2. As a result, the crack area, lengths and widths were estimated automatically within a single workflow. Crack patterns caused by freeze-thaw damages were investigated. To compare the inner degradation of the investigated thin sections, the crack density was used. Cracks in the thin sections were measured manually in two different ways for validation of the automatic determined results. On the one hand, the presented work shows that the width of cracks can be determined pixelwise, thus providing the plot of a width distribution. On the other hand, the automatically measured crack length differs in comparison to the manually measured ones.}, subject = {Beton}, language = {en} } @article{OuaerHosseiniAmaretal., author = {Ouaer, Hocine and Hosseini, Amir Hossein and Amar, Menad Nait and Ben Seghier, Mohamed El Amine and Ghriga, Mohammed Abdelfetah and Nabipour, Narjes and Andersen, P{\aa}l {\O}steb{\o} and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Rigorous Connectionist Models to Predict Carbon Dioxide Solubility in Various Ionic Liquids}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, Issue 1, 304}, publisher = {MDPI}, doi = {https://doi.org/10.3390/app10010304}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200107-40558}, pages = {18}, abstract = {Estimating the solubility of carbon dioxide in ionic liquids, using reliable models, is of paramount importance from both environmental and economic points of view. In this regard, the current research aims at evaluating the performance of two data-driven techniques, namely multilayer perceptron (MLP) and gene expression programming (GEP), for predicting the solubility of carbon dioxide (CO2) in ionic liquids (ILs) as the function of pressure, temperature, and four thermodynamical parameters of the ionic liquid. To develop the above techniques, 744 experimental data points derived from the literature including 13 ILs were used (80\% of the points for training and 20\% for validation). Two backpropagation-based methods, namely Levenberg-Marquardt (LM) and Bayesian Regularization (BR), were applied to optimize the MLP algorithm. Various statistical and graphical assessments were applied to check the credibility of the developed techniques. The results were then compared with those calculated using Peng-Robinson (PR) or Soave-Redlich-Kwong (SRK) equations of state (EoS). The highest coefficient of determination (R2 = 0.9965) and the lowest root mean square error (RMSE = 0.0116) were recorded for the MLP-LMA model on the full dataset (with a negligible difference to the MLP-BR model). The comparison of results from this model with the vastly applied thermodynamic equation of state models revealed slightly better performance, but the EoS approaches also performed well with R2 from 0.984 up to 0.996. Lastly, the newly established correlation based on the GEP model exhibited very satisfactory results with overall values of R2 = 0.9896 and RMSE = 0.0201.}, subject = {Maschinelles Lernen}, language = {en} } @article{NabipourMosaviBaghbanetal., author = {Nabipour, Narjes and Mosavi, Amir and Baghban, Alireza and Shamshirband, Shahaboddin and Felde, Imre}, title = {Extreme Learning Machine-Based Model for Solubility Estimation of Hydrocarbon Gases in Electrolyte Solutions}, series = {Processes}, volume = {2020}, journal = {Processes}, number = {Volume 8, Issue 1, 92}, publisher = {MDPI}, doi = {10.3390/pr8010092}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200113-40624}, pages = {12}, abstract = {Calculating hydrocarbon components solubility of natural gases is known as one of the important issues for operational works in petroleum and chemical engineering. In this work, a novel solubility estimation tool has been proposed for hydrocarbon gases—including methane, ethane, propane, and butane—in aqueous electrolyte solutions based on extreme learning machine (ELM) algorithm. Comparing the ELM outputs with a comprehensive real databank which has 1175 solubility points yielded R-squared values of 0.985 and 0.987 for training and testing phases respectively. Furthermore, the visual comparison of estimated and actual hydrocarbon solubility led to confirm the ability of proposed solubility model. Additionally, sensitivity analysis has been employed on the input variables of model to identify their impacts on hydrocarbon solubility. Such a comprehensive and reliable study can help engineers and scientists to successfully determine the important thermodynamic properties, which are key factors in optimizing and designing different industrial units such as refineries and petrochemical plants.}, subject = {Maschinelles Lernen}, language = {en} } @article{NabipourDehghaniMosavietal., author = {Nabipour, Narjes and Dehghani, Majid and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Short-Term Hydrological Drought Forecasting Based on Different Nature-Inspired Optimization Algorithms Hybridized With Artificial Neural Networks}, series = {IEEE Access}, volume = {2020}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2964584}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40796}, pages = {15210 -- 15222}, abstract = {Hydrological drought forecasting plays a substantial role in water resources management. Hydrological drought highly affects the water allocation and hydropower generation. In this research, short term hydrological drought forecasted based on the hybridized of novel nature-inspired optimization algorithms and Artificial Neural Networks (ANN). For this purpose, the Standardized Hydrological Drought Index (SHDI) and the Standardized Precipitation Index (SPI) were calculated in one, three, and six aggregated months. Then, three states where proposed for SHDI forecasting, and 36 input-output combinations were extracted based on the cross-correlation analysis. In the next step, newly proposed optimization algorithms, including Grasshopper Optimization Algorithm (GOA), Salp Swarm algorithm (SSA), Biogeography-based optimization (BBO), and Particle Swarm Optimization (PSO) hybridized with the ANN were utilized for SHDI forecasting and the results compared to the conventional ANN. Results indicated that the hybridized model outperformed compared to the conventional ANN. PSO performed better than the other optimization algorithms. The best models forecasted SHDI1 with R2 = 0.68 and RMSE = 0.58, SHDI3 with R 2 = 0.81 and RMSE = 0.45 and SHDI6 with R 2 = 0.82 and RMSE = 0.40.}, subject = {Maschinelles Lernen}, language = {en} } @article{MousaviSteinkeJuniorTeixeiraetal., author = {Mousavi, Seyed Nasrollah and Steinke J{\´u}nior, Renato and Teixeira, Eder Daniel and Bocchiola, Daniele and Nabipour, Narjes and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Predictive Modeling the Free Hydraulic Jumps Pressure through Advanced Statistical Methods}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {Volume 8, Issue 3, 323}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math8030323}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200402-41140}, pages = {16}, abstract = {Pressure fluctuations beneath hydraulic jumps potentially endanger the stability of stilling basins. This paper deals with the mathematical modeling of the results of laboratory-scale experiments to estimate the extreme pressures. Experiments were carried out on a smooth stilling basin underneath free hydraulic jumps downstream of an Ogee spillway. From the probability distribution of measured instantaneous pressures, pressures with different probabilities could be determined. It was verified that maximum pressure fluctuations, and the negative pressures, are located at the positions near the spillway toe. Also, minimum pressure fluctuations are located at the downstream of hydraulic jumps. It was possible to assess the cumulative curves of pressure data related to the characteristic points along the basin, and different Froude numbers. To benchmark the results, the dimensionless forms of statistical parameters include mean pressures (P*m), the standard deviations of pressure fluctuations (σ*X), pressures with different non-exceedance probabilities (P*k\%), and the statistical coefficient of the probability distribution (Nk\%) were assessed. It was found that an existing method can be used to interpret the present data, and pressure distribution in similar conditions, by using a new second-order fractional relationships for σ*X, and Nk\%. The values of the Nk\% coefficient indicated a single mean value for each probability.}, subject = {Maschinelles Lernen}, language = {en} } @article{MosaviShamshirbandEsmaeilbeikietal., author = {Mosavi, Amir and Shamshirband, Shahaboddin and Esmaeilbeiki, Fatemeh and Zarehaghi, Davoud and Neyshabouri, Mohammadreza and Samadianfard, Saeed and Ghorbani, Mohammad Ali and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Comparative analysis of hybrid models of firefly optimization algorithm with support vector machines and multilayer perceptron for predicting soil temperature at different depths}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, Issue 1}, doi = {10.1080/19942060.2020.1788644}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200911-42347}, pages = {939 -- 953}, abstract = {This research aims to model soil temperature (ST) using machine learning models of multilayer perceptron (MLP) algorithm and support vector machine (SVM) in hybrid form with the Firefly optimization algorithm, i.e. MLP-FFA and SVM-FFA. In the current study, measured ST and meteorological parameters of Tabriz and Ahar weather stations in a period of 2013-2015 are used for training and testing of the studied models with one and two days as a delay. To ascertain conclusive results for validation of the proposed hybrid models, the error metrics are benchmarked in an independent testing period. Moreover, Taylor diagrams utilized for that purpose. Obtained results showed that, in a case of one day delay, except in predicting ST at 5 cm below the soil surface (ST5cm) at Tabriz station, MLP-FFA produced superior results compared with MLP, SVM, and SVM-FFA models. However, for two days delay, MLP-FFA indicated increased accuracy in predicting ST5cm and ST 20cm of Tabriz station and ST10cm of Ahar station in comparison with SVM-FFA. Additionally, for all of the prescribed models, the performance of the MLP-FFA and SVM-FFA hybrid models in the testing phase was found to be meaningfully superior to the classical MLP and SVM models.}, subject = {Bodentemperatur}, language = {en} } @article{MengNomanQasemShokrietal., author = {Meng, Yinghui and Noman Qasem, Sultan and Shokri, Manouchehr and Shamshirband, Shahaboddin}, title = {Dimension Reduction of Machine Learning-Based Forecasting Models Employing Principal Component Analysis}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 8, article 1233}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math8081233}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200811-42125}, pages = {15}, abstract = {In this research, an attempt was made to reduce the dimension of wavelet-ANFIS/ANN (artificial neural network/adaptive neuro-fuzzy inference system) models toward reliable forecasts as well as to decrease computational cost. In this regard, the principal component analysis was performed on the input time series decomposed by a discrete wavelet transform to feed the ANN/ANFIS models. The models were applied for dissolved oxygen (DO) forecasting in rivers which is an important variable affecting aquatic life and water quality. The current values of DO, water surface temperature, salinity, and turbidity have been considered as the input variable to forecast DO in a three-time step further. The results of the study revealed that PCA can be employed as a powerful tool for dimension reduction of input variables and also to detect inter-correlation of input variables. Results of the PCA-wavelet-ANN models are compared with those obtained from wavelet-ANN models while the earlier one has the advantage of less computational time than the later models. Dealing with ANFIS models, PCA is more beneficial to avoid wavelet-ANFIS models creating too many rules which deteriorate the efficiency of the ANFIS models. Moreover, manipulating the wavelet-ANFIS models utilizing PCA leads to a significant decreasing in computational time. Finally, it was found that the PCA-wavelet-ANN/ANFIS models can provide reliable forecasts of dissolved oxygen as an important water quality indicator in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{MeiabadiMoradiKaramimoghadametal., author = {Meiabadi, Mohammad Saleh and Moradi, Mahmoud and Karamimoghadam, Mojtaba and Ardabili, Sina and Bodaghi, Mahdi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Modeling the Producibility of 3D Printing in Polylactic Acid Using Artificial Neural Networks and Fused Filament Fabrication}, series = {polymers}, volume = {2021}, journal = {polymers}, number = {Volume 13, issue 19, article 3219}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/polym13193219}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220110-45518}, pages = {1 -- 21}, abstract = {Polylactic acid (PLA) is a highly applicable material that is used in 3D printers due to some significant features such as its deformation property and affordable cost. For improvement of the end-use quality, it is of significant importance to enhance the quality of fused filament fabrication (FFF)-printed objects in PLA. The purpose of this investigation was to boost toughness and to reduce the production cost of the FFF-printed tensile test samples with the desired part thickness. To remove the need for numerous and idle printing samples, the response surface method (RSM) was used. Statistical analysis was performed to deal with this concern by considering extruder temperature (ET), infill percentage (IP), and layer thickness (LT) as controlled factors. The artificial intelligence method of artificial neural network (ANN) and ANN-genetic algorithm (ANN-GA) were further developed to estimate the toughness, part thickness, and production-cost-dependent variables. Results were evaluated by correlation coefficient and RMSE values. According to the modeling results, ANN-GA as a hybrid machine learning (ML) technique could enhance the accuracy of modeling by about 7.5, 11.5, and 4.5\% for toughness, part thickness, and production cost, respectively, in comparison with those for the single ANN method. On the other hand, the optimization results confirm that the optimized specimen is cost-effective and able to comparatively undergo deformation, which enables the usability of printed PLA objects.}, subject = {3D-Druck}, language = {en} } @article{LizarazuHarirchianShaiketal., author = {Lizarazu, Jorge and Harirchian, Ehsan and Shaik, Umar Arif and Shareef, Mohammed and Antoni-Zdziobek, Annie and Lahmer, Tom}, title = {Application of machine learning-based algorithms to predict the stress-strain curves of additively manufactured mild steel out of its microstructural characteristics}, series = {Results in Engineering}, volume = {2023}, journal = {Results in Engineering}, number = {Volume 20 (2023)}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.rineng.2023.101587}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20231207-65028}, pages = {1 -- 12}, abstract = {The study presents a Machine Learning (ML)-based framework designed to forecast the stress-strain relationship of arc-direct energy deposited mild steel. Based on microstructural characteristics previously extracted using microscopy and X-ray diffraction, approximately 1000 new parameter sets are generated by applying the Latin Hypercube Sampling Method (LHSM). For each parameter set, a Representative Volume Element (RVE) is synthetically created via Voronoi Tessellation. Input raw data for ML-based algorithms comprises these parameter sets or RVE-images, while output raw data includes their corresponding stress-strain relationships calculated after a Finite Element (FE) procedure. Input data undergoes preprocessing involving standardization, feature selection, and image resizing. Similarly, the stress-strain curves, initially unsuitable for training traditional ML algorithms, are preprocessed using cubic splines and occasionally Principal Component Analysis (PCA). The later part of the study focuses on employing multiple ML algorithms, utilizing two main models. The first model predicts stress-strain curves based on microstructural parameters, while the second model does so solely from RVE images. The most accurate prediction yields a Root Mean Squared Error of around 5 MPa, approximately 1\% of the yield stress. This outcome suggests that ML models offer precise and efficient methods for characterizing dual-phase steels, establishing a framework for accurate results in material analysis.}, subject = {Maschinelles Lernen}, language = {en} } @article{LashkarAraKalantariSheikhKhozanietal., author = {Lashkar-Ara, Babak and Kalantari, Niloofar and Sheikh Khozani, Zohreh and Mosavi, Amir}, title = {Assessing Machine Learning versus a Mathematical Model to Estimate the Transverse Shear Stress Distribution in a Rectangular Channel}, series = {Mathematics}, volume = {2021}, journal = {Mathematics}, number = {Volume 9, Issue 6, Article 596}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math9060596}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210504-44197}, pages = {15}, abstract = {One of the most important subjects of hydraulic engineering is the reliable estimation of the transverse distribution in the rectangular channel of bed and wall shear stresses. This study makes use of the Tsallis entropy, genetic programming (GP) and adaptive neuro-fuzzy inference system (ANFIS) methods to assess the shear stress distribution (SSD) in the rectangular channel. To evaluate the results of the Tsallis entropy, GP and ANFIS models, laboratory observations were used in which shear stress was measured using an optimized Preston tube. This is then used to measure the SSD in various aspect ratios in the rectangular channel. To investigate the shear stress percentage, 10 data series with a total of 112 different data for were used. The results of the sensitivity analysis show that the most influential parameter for the SSD in smooth rectangular channel is the dimensionless parameter B/H, Where the transverse coordinate is B, and the flow depth is H. With the parameters (b/B), (B/H) for the bed and (z/H), (B/H) for the wall as inputs, the modeling of the GP was better than the other one. Based on the analysis, it can be concluded that the use of GP and ANFIS algorithms is more effective in estimating shear stress in smooth rectangular channels than the Tsallis entropy-based equations.}, subject = {Maschinelles Lernen}, language = {en} } @article{KumariHarirchianLahmeretal., author = {Kumari, Vandana and Harirchian, Ehsan and Lahmer, Tom and Rasulzade, Shahla}, title = {Evaluation of Machine Learning and Web-Based Process for Damage Score Estimation of Existing Buildings}, series = {Buildings}, volume = {2022}, journal = {Buildings}, number = {Volume 12, issue 5, article 578}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/buildings12050578}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220509-46387}, pages = {1 -- 23}, abstract = {The seismic vulnerability assessment of existing reinforced concrete (RC) buildings is a significant source of disaster mitigation plans and rescue services. Different countries evolved various Rapid Visual Screening (RVS) techniques and methodologies to deal with the devastating consequences of earthquakes on the structural characteristics of buildings and human casualties. Artificial intelligence (AI) methods, such as machine learning (ML) algorithm-based methods, are increasingly used in various scientific and technical applications. The investigation toward using these techniques in civil engineering applications has shown encouraging results and reduced human intervention, including uncertainties and biased judgment. In this study, several known non-parametric algorithms are investigated toward RVS using a dataset employing different earthquakes. Moreover, the methodology encourages the possibility of examining the buildings' vulnerability based on the factors related to the buildings' importance and exposure. In addition, a web-based application built on Django is introduced. The interface is designed with the idea to ease the seismic vulnerability investigation in real-time. The concept was validated using two case studies, and the achieved results showed the proposed approach's potential efficiency}, subject = {Maschinelles Lernen}, language = {en} } @unpublished{KhosraviSheikhKhozaniCooper, author = {Khosravi, Khabat and Sheikh Khozani, Zohreh and Cooper, James R.}, title = {Predicting stable gravel-bed river hydraulic geometry: A test of novel, advanced, hybrid data mining algorithms}, volume = {2021}, doi = {10.25643/bauhaus-universitaet.4499}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211004-44998}, abstract = {Accurate prediction of stable alluvial hydraulic geometry, in which erosion and sedimentation are in equilibrium, is one of the most difficult but critical topics in the field of river engineering. Data mining algorithms have been gaining more attention in this field due to their high performance and flexibility. However, an understanding of the potential for these algorithms to provide fast, cheap, and accurate predictions of hydraulic geometry is lacking. This study provides the first quantification of this potential. Using at-a-station field data, predictions of flow depth, water-surface width and longitudinal water surface slope are made using three standalone data mining techniques -, Instance-based Learning (IBK), KStar, Locally Weighted Learning (LWL) - along with four types of novel hybrid algorithms in which the standalone models are trained with Vote, Attribute Selected Classifier (ASC), Regression by Discretization (RBD), and Cross-validation Parameter Selection (CVPS) algorithms (Vote-IBK, Vote-Kstar, Vote-LWL, ASC-IBK, ASC-Kstar, ASC-LWL, RBD-IBK, RBD-Kstar, RBD-LWL, CVPSIBK, CVPS-Kstar, CVPS-LWL). Through a comparison of their predictive performance and a sensitivity analysis of the driving variables, the results reveal: (1) Shield stress was the most effective parameter in the prediction of all geometry dimensions; (2) hybrid models had a higher prediction power than standalone data mining models, empirical equations and traditional machine learning algorithms; (3) Vote-Kstar model had the highest performance in predicting depth and width, and ASC-Kstar in estimating slope, each providing very good prediction performance. Through these algorithms, the hydraulic geometry of any river can potentially be predicted accurately and with ease using just a few, readily available flow and channel parameters. Thus, the results reveal that these models have great potential for use in stable channel design in data poor catchments, especially in developing nations where technical modelling skills and understanding of the hydraulic and sediment processes occurring in the river system may be lacking.}, subject = {Maschinelles Lernen}, language = {en} } @article{KargarSamadianfardParsaetal., author = {Kargar, Katayoun and Samadianfard, Saeed and Parsa, Javad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir and Chau, Kwok-Wing}, title = {Estimating longitudinal dispersion coefficient in natural streams using empirical models and machine learning algorithms}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1712260}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40775}, pages = {311 -- 322}, abstract = {The longitudinal dispersion coefficient (LDC) plays an important role in modeling the transport of pollutants and sediment in natural rivers. As a result of transportation processes, the concentration of pollutants changes along the river. Various studies have been conducted to provide simple equations for estimating LDC. In this study, machine learning methods, namely support vector regression, Gaussian process regression, M5 model tree (M5P) and random forest, and multiple linear regression were examined in predicting the LDC in natural streams. Data sets from 60 rivers around the world with different hydraulic and geometric features were gathered to develop models for LDC estimation. Statistical criteria, including correlation coefficient (CC), root mean squared error (RMSE) and mean absolute error (MAE), were used to scrutinize the models. The LDC values estimated by these models were compared with the corresponding results of common empirical models. The Taylor chart was used to evaluate the models and the results showed that among the machine learning models, M5P had superior performance, with CC of 0.823, RMSE of 454.9 and MAE of 380.9. The model of Sahay and Dutta, with CC of 0.795, RMSE of 460.7 and MAE of 306.1, gave more precise results than the other empirical models. The main advantage of M5P models is their ability to provide practical formulae. In conclusion, the results proved that the developed M5P model with simple formulations was superior to other machine learning models and empirical models; therefore, it can be used as a proper tool for estimating the LDC in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{HassannatajJoloudariHassannatajJoloudariSaadatfaretal., author = {Hassannataj Joloudari, Javad and Hassannataj Joloudari, Edris and Saadatfar, Hamid and GhasemiGol, Mohammad and Razavi, Seyyed Mohammad and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Nadai, Laszlo}, title = {Coronary Artery Disease Diagnosis: Ranking the Significant Features Using a Random Trees Model}, series = {International Journal of Environmental Research and Public Health, IJERPH}, volume = {2020}, journal = {International Journal of Environmental Research and Public Health, IJERPH}, number = {Volume 17, Issue 3, 731}, publisher = {MDPI}, doi = {10.3390/ijerph17030731}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40819}, pages = {24}, abstract = {Heart disease is one of the most common diseases in middle-aged citizens. Among the vast number of heart diseases, coronary artery disease (CAD) is considered as a common cardiovascular disease with a high death rate. The most popular tool for diagnosing CAD is the use of medical imaging, e.g., angiography. However, angiography is known for being costly and also associated with a number of side effects. Hence, the purpose of this study is to increase the accuracy of coronary heart disease diagnosis through selecting significant predictive features in order of their ranking. In this study, we propose an integrated method using machine learning. The machine learning methods of random trees (RTs), decision tree of C5.0, support vector machine (SVM), and decision tree of Chi-squared automatic interaction detection (CHAID) are used in this study. The proposed method shows promising results and the study confirms that the RTs model outperforms other models.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianLahmerRasulzade, author = {Harirchian, Ehsan and Lahmer, Tom and Rasulzade, Shahla}, title = {Earthquake Hazard Safety Assessment of Existing Buildings Using Optimized Multi-Layer Perceptron Neural Network}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {Volume 13, Issue 8, 2060}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13082060}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200504-41575}, pages = {16}, abstract = {The latest earthquakes have proven that several existing buildings, particularly in developing countries, are not secured from damages of earthquake. A variety of statistical and machine-learning approaches have been proposed to identify vulnerable buildings for the prioritization of retrofitting. The present work aims to investigate earthquake susceptibility through the combination of six building performance variables that can be used to obtain an optimal prediction of the damage state of reinforced concrete buildings using artificial neural network (ANN). In this regard, a multi-layer perceptron network is trained and optimized using a database of 484 damaged buildings from the D{\"u}zce earthquake in Turkey. The results demonstrate the feasibility and effectiveness of the selected ANN approach to classify concrete structural damage that can be used as a preliminary assessment technique to identify vulnerable buildings in disaster risk-management programs.}, subject = {Erdbeben}, language = {en} }