@unpublished{RezakazemiMosaviShirazian, author = {Rezakazemi, Mashallah and Mosavi, Amir and Shirazian, Saeed}, title = {ANFIS pattern for molecular membranes separation optimization}, volume = {2018}, doi = {10.25643/BAUHAUS-UNIVERSITAET.3821}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181122-38212}, pages = {1 -- 20}, abstract = {In this work, molecular separation of aqueous-organic was simulated by using combined soft computing-mechanistic approaches. The considered separation system was a microporous membrane contactor for separation of benzoic acid from water by contacting with an organic phase containing extractor molecules. Indeed, extractive separation is carried out using membrane technology where complex of solute-organic is formed at the interface. The main focus was to develop a simulation methodology for prediction of concentration distribution of solute (benzoic acid) in the feed side of the membrane system, as the removal efficiency of the system is determined by concentration distribution of the solute in the feed channel. The pattern of Adaptive Neuro-Fuzzy Inference System (ANFIS) was optimized by finding the optimum membership function, learning percentage, and a number of rules. The ANFIS was trained using the extracted data from the CFD simulation of the membrane system. The comparisons between the predicted concentration distribution by ANFIS and CFD data revealed that the optimized ANFIS pattern can be used as a predictive tool for simulation of the process. The R2 of higher than 0.99 was obtained for the optimized ANFIS model. The main privilege of the developed methodology is its very low computational time for simulation of the system and can be used as a rigorous simulation tool for understanding and design of membrane-based systems. Highlights are, Molecular separation using microporous membranes. Developing hybrid model based on ANFIS-CFD for the separation process, Optimization of ANFIS structure for prediction of separation process}, subject = {Fluid}, language = {en} } @article{HarirchianLahmerKumarietal., author = {Harirchian, Ehsan and Lahmer, Tom and Kumari, Vandana and Jadhav, Kirti}, title = {Application of Support Vector Machine Modeling for the Rapid Seismic Hazard Safety Evaluation of Existing Buildings}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {volume 13, issue 13, 3340}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13133340}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200707-41915}, pages = {15}, abstract = {The economic losses from earthquakes tend to hit the national economy considerably; therefore, models that are capable of estimating the vulnerability and losses of future earthquakes are highly consequential for emergency planners with the purpose of risk mitigation. This demands a mass prioritization filtering of structures to identify vulnerable buildings for retrofitting purposes. The application of advanced structural analysis on each building to study the earthquake response is impractical due to complex calculations, long computational time, and exorbitant cost. This exhibits the need for a fast, reliable, and rapid method, commonly known as Rapid Visual Screening (RVS). The method serves as a preliminary screening platform, using an optimum number of seismic parameters of the structure and predefined output damage states. In this study, the efficacy of the Machine Learning (ML) application in damage prediction through a Support Vector Machine (SVM) model as the damage classification technique has been investigated. The developed model was trained and examined based on damage data from the 1999 D{\"u}zce Earthquake in Turkey, where the building's data consists of 22 performance modifiers that have been implemented with supervised machine learning.}, subject = {Erdbeben}, language = {en} } @phdthesis{RadmardRahmani, author = {Radmard Rahmani, Hamid}, title = {Artificial Intelligence Approach for Seismic Control of Structures}, doi = {10.25643/bauhaus-universitaet.4135}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200417-41359}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Abstract In the first part of this research, the utilization of tuned mass dampers in the vibration control of tall buildings during earthquake excitations is studied. The main issues such as optimizing the parameters of the dampers and studying the effects of frequency content of the target earthquakes are addressed. Abstract The non-dominated sorting genetic algorithm method is improved by upgrading generic operators, and is utilized to develop a framework for determining the optimum placement and parameters of dampers in tall buildings. A case study is presented in which the optimal placement and properties of dampers are determined for a model of a tall building under different earthquake excitations through computer simulations. Abstract In the second part, a novel framework for the brain learning-based intelligent seismic control of smart structures is developed. In this approach, a deep neural network learns how to improve structural responses during earthquake excitations using feedback control. Abstract Reinforcement learning method is improved and utilized to develop a framework for training the deep neural network as an intelligent controller. The efficiency of the developed framework is examined through two case studies including a single-degree-of-freedom system and a high-rise building under different earthquake excitation records. Abstract The results show that the controller gradually develops an optimum control policy to reduce the vibrations of a structure under an earthquake excitation through a cyclical process of actions and observations. Abstract It is shown that the controller efficiently improves the structural responses under new earthquake excitations for which it was not trained. Moreover, it is shown that the controller has a stable performance under uncertainties.}, subject = {Erdbeben}, language = {en} } @article{LashkarAraKalantariSheikhKhozanietal., author = {Lashkar-Ara, Babak and Kalantari, Niloofar and Sheikh Khozani, Zohreh and Mosavi, Amir}, title = {Assessing Machine Learning versus a Mathematical Model to Estimate the Transverse Shear Stress Distribution in a Rectangular Channel}, series = {Mathematics}, volume = {2021}, journal = {Mathematics}, number = {Volume 9, Issue 6, Article 596}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math9060596}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210504-44197}, pages = {15}, abstract = {One of the most important subjects of hydraulic engineering is the reliable estimation of the transverse distribution in the rectangular channel of bed and wall shear stresses. This study makes use of the Tsallis entropy, genetic programming (GP) and adaptive neuro-fuzzy inference system (ANFIS) methods to assess the shear stress distribution (SSD) in the rectangular channel. To evaluate the results of the Tsallis entropy, GP and ANFIS models, laboratory observations were used in which shear stress was measured using an optimized Preston tube. This is then used to measure the SSD in various aspect ratios in the rectangular channel. To investigate the shear stress percentage, 10 data series with a total of 112 different data for were used. The results of the sensitivity analysis show that the most influential parameter for the SSD in smooth rectangular channel is the dimensionless parameter B/H, Where the transverse coordinate is B, and the flow depth is H. With the parameters (b/B), (B/H) for the bed and (z/H), (B/H) for the wall as inputs, the modeling of the GP was better than the other one. Based on the analysis, it can be concluded that the use of GP and ANFIS algorithms is more effective in estimating shear stress in smooth rectangular channels than the Tsallis entropy-based equations.}, subject = {Maschinelles Lernen}, language = {en} } @inproceedings{KoenigSchmitt, author = {K{\"o}nig, Reinhard and Schmitt, Gerhard}, title = {Backcasting and a new way of command in computational design : Proceedings}, series = {CAADence in Architecture Conference}, booktitle = {CAADence in Architecture Conference}, editor = {Szoboszlai, Mih{\´a}ly}, address = {Budapest}, doi = {10.25643/bauhaus-universitaet.2599}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-25996}, pages = {15 -- 25}, abstract = {It's not uncommon that analysis and simulation methods are used mainly to evaluate finished designs and to proof their quality. Whereas the potential of such methods is to lead or control a design process from the beginning on. Therefore, we introduce a design method that move away from a "what-if" forecasting philosophy and increase the focus on backcasting approaches. We use the power of computation by combining sophisticated methods to generate design with analysis methods to close the gap between analysis and synthesis of designs. For the development of a future-oriented computational design support we need to be aware of the human designer's role. A productive combination of the excellence of human cognition with the power of modern computing technology is needed. We call this approach "cognitive design computing". The computational part aim to mimic the way a designer's brain works by combining state-of-the-art optimization and machine learning approaches with available simulation methods. The cognition part respects the complex nature of design problems by the provision of models for human-computation interaction. This means that a design problem is distributed between computer and designer. In the context of the conference slogan "back to command", we ask how we may imagine the command over a cognitive design computing system. We expect that designers will need to let go control of some parts of the design process to machines, but in exchange they will get a new powerful command on complex computing processes. This means that designers have to explore the potentials of their role as commanders of partially automated design processes. In this contribution we describe an approach for the development of a future cognitive design computing system with the focus on urban design issues. The aim of this system is to enable an urban planner to treat a planning problem as a backcasting problem by defining what performance a design solution should achieve and to automatically query or generate a set of best possible solutions. This kind of computational planning process offers proof that the designer meets the original explicitly defined design requirements. A key way in which digital tools can support designers is by generating design proposals. Evolutionary multi-criteria optimization methods allow us to explore a multi-dimensional design space and provide a basis for the designer to evaluate contradicting requirements: a task urban planners are faced with frequently. We also reflect why designers will give more and more control to machines. Therefore, we investigate first approaches learn how designers use computational design support systems in combination with manual design strategies to deal with urban design problems by employing machine learning methods. By observing how designers work, it is possible to derive more complex artificial solution strategies that can help computers make better suggestions in the future.}, subject = {CAD}, language = {en} } @article{BandJanizadehChandraPaletal., author = {Band, Shahab S. and Janizadeh, Saeid and Chandra Pal, Subodh and Chowdhuri, Indrajit and Siabi, Zhaleh and Norouzi, Akbar and Melesse, Assefa M. and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Comparative Analysis of Artificial Intelligence Models for Accurate Estimation of Groundwater Nitrate Concentration}, series = {Sensors}, volume = {2020}, journal = {Sensors}, number = {Volume 20, issue 20, article 5763}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/s20205763}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43364}, pages = {1 -- 23}, abstract = {Prediction of the groundwater nitrate concentration is of utmost importance for pollution control and water resource management. This research aims to model the spatial groundwater nitrate concentration in the Marvdasht watershed, Iran, based on several artificial intelligence methods of support vector machine (SVM), Cubist, random forest (RF), and Bayesian artificial neural network (Baysia-ANN) machine learning models. For this purpose, 11 independent variables affecting groundwater nitrate changes include elevation, slope, plan curvature, profile curvature, rainfall, piezometric depth, distance from the river, distance from residential, Sodium (Na), Potassium (K), and topographic wetness index (TWI) in the study area were prepared. Nitrate levels were also measured in 67 wells and used as a dependent variable for modeling. Data were divided into two categories of training (70\%) and testing (30\%) for modeling. The evaluation criteria coefficient of determination (R2), mean absolute error (MAE), root mean square error (RMSE), and Nash-Sutcliffe efficiency (NSE) were used to evaluate the performance of the models used. The results of modeling the susceptibility of groundwater nitrate concentration showed that the RF (R2 = 0.89, RMSE = 4.24, NSE = 0.87) model is better than the other Cubist (R2 = 0.87, RMSE = 5.18, NSE = 0.81), SVM (R2 = 0.74, RMSE = 6.07, NSE = 0.74), Bayesian-ANN (R2 = 0.79, RMSE = 5.91, NSE = 0.75) models. The results of groundwater nitrate concentration zoning in the study area showed that the northern parts of the case study have the highest amount of nitrate, which is higher in these agricultural areas than in other areas. The most important cause of nitrate pollution in these areas is agriculture activities and the use of groundwater to irrigate these crops and the wells close to agricultural areas, which has led to the indiscriminate use of chemical fertilizers by irrigation or rainwater of these fertilizers is washed and penetrates groundwater and pollutes the aquifer.}, subject = {Grundwasser}, language = {en} } @article{MengNomanQasemShokrietal., author = {Meng, Yinghui and Noman Qasem, Sultan and Shokri, Manouchehr and Shamshirband, Shahaboddin}, title = {Dimension Reduction of Machine Learning-Based Forecasting Models Employing Principal Component Analysis}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 8, article 1233}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math8081233}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200811-42125}, pages = {15}, abstract = {In this research, an attempt was made to reduce the dimension of wavelet-ANFIS/ANN (artificial neural network/adaptive neuro-fuzzy inference system) models toward reliable forecasts as well as to decrease computational cost. In this regard, the principal component analysis was performed on the input time series decomposed by a discrete wavelet transform to feed the ANN/ANFIS models. The models were applied for dissolved oxygen (DO) forecasting in rivers which is an important variable affecting aquatic life and water quality. The current values of DO, water surface temperature, salinity, and turbidity have been considered as the input variable to forecast DO in a three-time step further. The results of the study revealed that PCA can be employed as a powerful tool for dimension reduction of input variables and also to detect inter-correlation of input variables. Results of the PCA-wavelet-ANN models are compared with those obtained from wavelet-ANN models while the earlier one has the advantage of less computational time than the later models. Dealing with ANFIS models, PCA is more beneficial to avoid wavelet-ANFIS models creating too many rules which deteriorate the efficiency of the ANFIS models. Moreover, manipulating the wavelet-ANFIS models utilizing PCA leads to a significant decreasing in computational time. Finally, it was found that the PCA-wavelet-ANN/ANFIS models can provide reliable forecasts of dissolved oxygen as an important water quality indicator in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{ShamshirbandJoloudariGhasemiGoletal., author = {Shamshirband, Shahaboddin and Joloudari, Javad Hassannataj and GhasemiGol, Mohammad and Saadatfar, Hamid and Mosavi, Amir and Nabipour, Narjes}, title = {FCS-MBFLEACH: Designing an Energy-Aware Fault Detection System for Mobile Wireless Sensor Networks}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {Volume 8, Issue 1, article 28}, publisher = {MDPI}, doi = {10.3390/math8010028}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200107-40541}, pages = {24}, abstract = {Wireless sensor networks (WSNs) include large-scale sensor nodes that are densely distributed over a geographical region that is completely randomized for monitoring, identifying, and analyzing physical events. The crucial challenge in wireless sensor networks is the very high dependence of the sensor nodes on limited battery power to exchange information wirelessly as well as the non-rechargeable battery of the wireless sensor nodes, which makes the management and monitoring of these nodes in terms of abnormal changes very difficult. These anomalies appear under faults, including hardware, software, anomalies, and attacks by raiders, all of which affect the comprehensiveness of the data collected by wireless sensor networks. Hence, a crucial contraption should be taken to detect the early faults in the network, despite the limitations of the sensor nodes. Machine learning methods include solutions that can be used to detect the sensor node faults in the network. The purpose of this study is to use several classification methods to compute the fault detection accuracy with different densities under two scenarios in regions of interest such as MB-FLEACH, one-class support vector machine (SVM), fuzzy one-class, or a combination of SVM and FCS-MBFLEACH methods. It should be noted that in the study so far, no super cluster head (SCH) selection has been performed to detect node faults in the network. The simulation outcomes demonstrate that the FCS-MBFLEACH method has the best performance in terms of the accuracy of fault detection, false-positive rate (FPR), average remaining energy, and network lifetime compared to other classification methods.}, subject = {Vernetzung}, language = {en} } @article{MeiabadiMoradiKaramimoghadametal., author = {Meiabadi, Mohammad Saleh and Moradi, Mahmoud and Karamimoghadam, Mojtaba and Ardabili, Sina and Bodaghi, Mahdi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Modeling the Producibility of 3D Printing in Polylactic Acid Using Artificial Neural Networks and Fused Filament Fabrication}, series = {polymers}, volume = {2021}, journal = {polymers}, number = {Volume 13, issue 19, article 3219}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/polym13193219}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220110-45518}, pages = {1 -- 21}, abstract = {Polylactic acid (PLA) is a highly applicable material that is used in 3D printers due to some significant features such as its deformation property and affordable cost. For improvement of the end-use quality, it is of significant importance to enhance the quality of fused filament fabrication (FFF)-printed objects in PLA. The purpose of this investigation was to boost toughness and to reduce the production cost of the FFF-printed tensile test samples with the desired part thickness. To remove the need for numerous and idle printing samples, the response surface method (RSM) was used. Statistical analysis was performed to deal with this concern by considering extruder temperature (ET), infill percentage (IP), and layer thickness (LT) as controlled factors. The artificial intelligence method of artificial neural network (ANN) and ANN-genetic algorithm (ANN-GA) were further developed to estimate the toughness, part thickness, and production-cost-dependent variables. Results were evaluated by correlation coefficient and RMSE values. According to the modeling results, ANN-GA as a hybrid machine learning (ML) technique could enhance the accuracy of modeling by about 7.5, 11.5, and 4.5\% for toughness, part thickness, and production cost, respectively, in comparison with those for the single ANN method. On the other hand, the optimization results confirm that the optimized specimen is cost-effective and able to comparatively undergo deformation, which enables the usability of printed PLA objects.}, subject = {3D-Druck}, language = {en} } @article{GhazvineiDarvishiMosavietal., author = {Ghazvinei, Pezhman Taherei and Darvishi, Hossein Hassanpour and Mosavi, Amir and Yusof, Khamaruzaman bin Wan and Alizamir, Meysam and Shamshirband, Shahaboddin and Chau, Kwok-Wing}, title = {Sugarcane growth prediction based on meteorological parameters using extreme learning machine and artificial neural network}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2018}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {12,1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2018.1526119}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181017-38129}, pages = {738 -- 749}, abstract = {Management strategies for sustainable sugarcane production need to deal with the increasing complexity and variability of the whole sugar system. Moreover, they need to accommodate the multiple goals of different industry sectors and the wider community. Traditional disciplinary approaches are unable to provide integrated management solutions, and an approach based on whole systems analysis is essential to bring about beneficial change to industry and the community. The application of this approach to water management, environmental management and cane supply management is outlined, where the literature indicates that the application of extreme learning machine (ELM) has never been explored in this realm. Consequently, the leading objective of the current research was set to filling this gap by applying ELM to launch swift and accurate model for crop production data-driven. The key learning has been the need for innovation both in the technical aspects of system function underpinned by modelling of sugarcane growth. Therefore, the current study is an attempt to establish an integrate model using ELM to predict the concluding growth amount of sugarcane. Prediction results were evaluated and further compared with artificial neural network (ANN) and genetic programming models. Accuracy of the ELM model is calculated using the statistics indicators of Root Means Square Error (RMSE), Pearson Coefficient (r), and Coefficient of Determination (R2) with promising results of 0.8, 0.47, and 0.89, respectively. The results also show better generalization ability in addition to faster learning curve. Thus, proficiency of the ELM for supplementary work on advancement of prediction model for sugarcane growth was approved with promising results.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} }