@article{AbbaspourGilandehMolaeeSabzietal., author = {Abbaspour-Gilandeh, Yousef and Molaee, Amir and Sabzi, Sajad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir}, title = {A Combined Method of Image Processing and Artificial Neural Network for the Identification of 13 Iranian Rice Cultivars}, series = {agronomy}, volume = {2020}, journal = {agronomy}, number = {Volume 10, Issue 1, 117}, publisher = {MDPI}, doi = {10.3390/agronomy10010117}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200123-40695}, pages = {21}, abstract = {Due to the importance of identifying crop cultivars, the advancement of accurate assessment of cultivars is considered essential. The existing methods for identifying rice cultivars are mainly time-consuming, costly, and destructive. Therefore, the development of novel methods is highly beneficial. The aim of the present research is to classify common rice cultivars in Iran based on color, morphologic, and texture properties using artificial intelligence (AI) methods. In doing so, digital images of 13 rice cultivars in Iran in three forms of paddy, brown, and white are analyzed through pre-processing and segmentation of using MATLAB. Ninety-two specificities, including 60 color, 14 morphologic, and 18 texture properties, were identified for each rice cultivar. In the next step, the normal distribution of data was evaluated, and the possibility of observing a significant difference between all specificities of cultivars was studied using variance analysis. In addition, the least significant difference (LSD) test was performed to obtain a more accurate comparison between cultivars. To reduce data dimensions and focus on the most effective components, principal component analysis (PCA) was employed. Accordingly, the accuracy of rice cultivar separations was calculated for paddy, brown rice, and white rice using discriminant analysis (DA), which was 89.2\%, 87.7\%, and 83.1\%, respectively. To identify and classify the desired cultivars, a multilayered perceptron neural network was implemented based on the most effective components. The results showed 100\% accuracy of the network in identifying and classifying all mentioned rice cultivars. Hence, it is concluded that the integrated method of image processing and pattern recognition methods, such as statistical classification and artificial neural networks, can be used for identifying and classification of rice cultivars.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianJadhavMohammadetal., author = {Harirchian, Ehsan and Jadhav, Kirti and Mohammad, Kifaytullah and Aghakouchaki Hosseini, Seyed Ehsan and Lahmer, Tom}, title = {A Comparative Study of MCDM Methods Integrated with Rapid Visual Seismic Vulnerability Assessment of Existing RC Structures}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 18, article 6411}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10186411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200918-42360}, pages = {24}, abstract = {Recently, the demand for residence and usage of urban infrastructure has been increased, thereby resulting in the elevation of risk levels of human lives over natural calamities. The occupancy demand has rapidly increased the construction rate, whereas the inadequate design of structures prone to more vulnerability. Buildings constructed before the development of seismic codes have an additional susceptibility to earthquake vibrations. The structural collapse causes an economic loss as well as setbacks for human lives. An application of different theoretical methods to analyze the structural behavior is expensive and time-consuming. Therefore, introducing a rapid vulnerability assessment method to check structural performances is necessary for future developments. The process, as mentioned earlier, is known as Rapid Visual Screening (RVS). This technique has been generated to identify, inventory, and screen structures that are potentially hazardous. Sometimes, poor construction quality does not provide some of the required parameters; in this case, the RVS process turns into a tedious scenario. Hence, to tackle such a situation, multiple-criteria decision-making (MCDM) methods for the seismic vulnerability assessment opens a new gateway. The different parameters required by RVS can be taken in MCDM. MCDM evaluates multiple conflicting criteria in decision making in several fields. This paper has aimed to bridge the gap between RVS and MCDM. Furthermore, to define the correlation between these techniques, implementation of the methodologies from Indian, Turkish, and Federal Emergency Management Agency (FEMA) codes has been done. The effects of seismic vulnerability of structures have been observed and compared.}, subject = {Erdbebensicherheit}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Raj Das, Rohan and Rasulzade, Shahla and Lahmer, Tom}, title = {A Machine Learning Framework for Assessing Seismic Hazard Safety of Reinforced Concrete Buildings}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7153}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207153}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42744}, pages = {18}, abstract = {Although averting a seismic disturbance and its physical, social, and economic disruption is practically impossible, using the advancements in computational science and numerical modeling shall equip humanity to predict its severity, understand the outcomes, and equip for post-disaster management. Many buildings exist amidst the developed metropolitan areas, which are senile and still in service. These buildings were also designed before establishing national seismic codes or without the introduction of construction regulations. In that case, risk reduction is significant for developing alternatives and designing suitable models to enhance the existing structure's performance. Such models will be able to classify risks and casualties related to possible earthquakes through emergency preparation. Thus, it is crucial to recognize structures that are susceptible to earthquake vibrations and need to be prioritized for retrofitting. However, each building's behavior under seismic actions cannot be studied through performing structural analysis, as it might be unrealistic because of the rigorous computations, long period, and substantial expenditure. Therefore, it calls for a simple, reliable, and accurate process known as Rapid Visual Screening (RVS), which serves as a primary screening platform, including an optimum number of seismic parameters and predetermined performance damage conditions for structures. In this study, the damage classification technique was studied, and the efficacy of the Machine Learning (ML) method in damage prediction via a Support Vector Machine (SVM) model was explored. The ML model is trained and tested separately on damage data from four different earthquakes, namely Ecuador, Haiti, Nepal, and South Korea. Each dataset consists of varying numbers of input data and eight performance modifiers. Based on the study and the results, the ML model using SVM classifies the given input data into the belonging classes and accomplishes the performance on hazard safety evaluation of buildings.}, subject = {Erdbeben}, language = {en} } @article{KarimimoshaverHajivalieiShokrietal., author = {Karimimoshaver, Mehrdad and Hajivaliei, Hatameh and Shokri, Manouchehr and Khalesro, Shakila and Aram, Farshid and Shamshirband, Shahaboddin}, title = {A Model for Locating Tall Buildings through a Visual Analysis Approach}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 17, article 6072}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10176072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43350}, pages = {1 -- 25}, abstract = {Tall buildings have become an integral part of cities despite all their pros and cons. Some current tall buildings have several problems because of their unsuitable location; the problems include increasing density, imposing traffic on urban thoroughfares, blocking view corridors, etc. Some of these buildings have destroyed desirable views of the city. In this research, different criteria have been chosen, such as environment, access, social-economic, land-use, and physical context. These criteria and sub-criteria are prioritized and weighted by the analytic network process (ANP) based on experts' opinions, using Super Decisions V2.8 software. On the other hand, layers corresponding to sub-criteria were made in ArcGIS 10.3 simultaneously, then via a weighted overlay (map algebra), a locating plan was created. In the next step seven hypothetical tall buildings (20 stories), in the best part of the locating plan, were considered to evaluate how much of theses hypothetical buildings would be visible (fuzzy visibility) from the street and open spaces throughout the city. These processes have been modeled by MATLAB software, and the final fuzzy visibility plan was created by ArcGIS. Fuzzy visibility results can help city managers and planners to choose which location is suitable for a tall building and how much visibility may be appropriate. The proposed model can locate tall buildings based on technical and visual criteria in the future development of the city and it can be widely used in any city as long as the criteria and weights are localized.}, subject = {Geb{\"a}ude}, language = {en} } @article{SaadatfarKhosraviHassannatajJoloudarietal., author = {Saadatfar, Hamid and Khosravi, Samiyeh and Hassannataj Joloudari, Javad and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {A New K-Nearest Neighbors Classifier for Big Data Based on Efficient Data Pruning}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 2, article 286}, publisher = {MDPI}, doi = {10.3390/math8020286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40996}, pages = {12}, abstract = {The K-nearest neighbors (KNN) machine learning algorithm is a well-known non-parametric classification method. However, like other traditional data mining methods, applying it on big data comes with computational challenges. Indeed, KNN determines the class of a new sample based on the class of its nearest neighbors; however, identifying the neighbors in a large amount of data imposes a large computational cost so that it is no longer applicable by a single computing machine. One of the proposed techniques to make classification methods applicable on large datasets is pruning. LC-KNN is an improved KNN method which first clusters the data into some smaller partitions using the K-means clustering method; and then applies the KNN for each new sample on the partition which its center is the nearest one. However, because the clusters have different shapes and densities, selection of the appropriate cluster is a challenge. In this paper, an approach has been proposed to improve the pruning phase of the LC-KNN method by taking into account these factors. The proposed approach helps to choose a more appropriate cluster of data for looking for the neighbors, thus, increasing the classification accuracy. The performance of the proposed approach is evaluated on different real datasets. The experimental results show the effectiveness of the proposed approach and its higher classification accuracy and lower time cost in comparison to other recent relevant methods.}, subject = {Maschinelles Lernen}, language = {en} } @article{SchirmerOsburg, author = {Schirmer, Ulrike and Osburg, Andrea}, title = {A new method for the quantification of adsorbed styrene acrylate copolymer particles on cementitious surfaces: a critical comparative study}, series = {SN Applied Sciences}, volume = {2020}, journal = {SN Applied Sciences}, number = {Volume 2, article 2061}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s42452-020-03825-5}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44729}, pages = {1 -- 11}, abstract = {The amount of adsorbed styrene acrylate copolymer (SA) particles on cementitious surfaces at the early stage of hydration was quantitatively determined using three different methodological approaches: the depletion method, the visible spectrophotometry (VIS) and the thermo-gravimetry coupled with mass spectrometry (TG-MS). Considering the advantages and disadvantages of each method, including the respectively required sample preparation, the results for four polymer-modified cement pastes, varying in polymer content and cement fineness, were evaluated. To some extent, significant discrepancies in the adsorption degrees were observed. There is a tendency that significantly lower amounts of adsorbed polymers were identified using TG-MS compared to values determined with the depletion method. Spectrophotometrically generated values were ​​lying in between these extremes. This tendency was found for three of the four cement pastes examined and is originated in sample preparation and methodical limitations. The main influencing factor is the falsification of the polymer concentration in the liquid phase during centrifugation. Interactions in the interface between sediment and supernatant are the cause. The newly developed method, using TG-MS for the quantification of SA particles, proved to be suitable for dealing with these revealed issues. Here, instead of the fluid phase, the sediment is examined with regard to the polymer content, on which the influence of centrifugation is considerably lower.}, subject = {Zement}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} } @article{HarirchianLahmerKumarietal., author = {Harirchian, Ehsan and Lahmer, Tom and Kumari, Vandana and Jadhav, Kirti}, title = {Application of Support Vector Machine Modeling for the Rapid Seismic Hazard Safety Evaluation of Existing Buildings}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {volume 13, issue 13, 3340}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13133340}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200707-41915}, pages = {15}, abstract = {The economic losses from earthquakes tend to hit the national economy considerably; therefore, models that are capable of estimating the vulnerability and losses of future earthquakes are highly consequential for emergency planners with the purpose of risk mitigation. This demands a mass prioritization filtering of structures to identify vulnerable buildings for retrofitting purposes. The application of advanced structural analysis on each building to study the earthquake response is impractical due to complex calculations, long computational time, and exorbitant cost. This exhibits the need for a fast, reliable, and rapid method, commonly known as Rapid Visual Screening (RVS). The method serves as a preliminary screening platform, using an optimum number of seismic parameters of the structure and predefined output damage states. In this study, the efficacy of the Machine Learning (ML) application in damage prediction through a Support Vector Machine (SVM) model as the damage classification technique has been investigated. The developed model was trained and examined based on damage data from the 1999 D{\"u}zce Earthquake in Turkey, where the building's data consists of 22 performance modifiers that have been implemented with supervised machine learning.}, subject = {Erdbeben}, language = {en} } @article{BecherVoelkerRodehorstetal., author = {Becher, Lia and V{\"o}lker, Conrad and Rodehorst, Volker and Kuhne, Michael}, title = {Background-oriented schlieren technique for two-dimensional visualization of convective indoor air flows}, series = {Optics and Lasers in Engineering}, volume = {2020}, journal = {Optics and Lasers in Engineering}, number = {Volume 134, article 106282}, doi = {https://doi.org/10.1016/j.optlaseng.2020.106282}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220810-46972}, pages = {9}, abstract = {This article focuses on further developments of the background-oriented schlieren (BOS) technique to visualize convective indoor air flow, which is usually defined by very small density gradients. Since the light rays deflect when passing through fluids with different densities, BOS can detect the resulting refractive index gradients as integration along a line of sight. In this paper, the BOS technique is used to yield a two-dimensional visualization of small density gradients. The novelty of the described method is the implementation of a highly sensitive BOS setup to visualize the ascending thermal plume from a heated thermal manikin with temperature differences of minimum 1 K. To guarantee steady boundary conditions, the thermal manikin was seated in a climate laboratory. For the experimental investigations, a high-resolution DLSR camera was used capturing a large field of view with sufficient detail accuracy. Several parameters such as various backgrounds, focal lengths, room air temperatures, and distances between the object of investigation, camera, and structured background were tested to find the most suitable parameters to visualize convective indoor air flow. Besides these measurements, this paper presents the analyzing method using cross-correlation algorithms and finally the results of visualizing the convective indoor air flow with BOS. The highly sensitive BOS setup presented in this article complements the commonly used invasive methods that highly influence weak air flows.}, subject = {Raumklima}, language = {en} } @article{ReichertOlneyLahmer, author = {Reichert, Ina and Olney, Peter and Lahmer, Tom}, title = {Combined approach for optimal sensor placement and experimental verification in the context of tower-like structures}, series = {Journal of Civil Structural Health Monitoring}, volume = {2021}, journal = {Journal of Civil Structural Health Monitoring}, number = {volume 11}, publisher = {Heidelberg}, address = {Springer}, doi = {10.1007/s13349-020-00448-7}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44701}, pages = {223 -- 234}, abstract = {When it comes to monitoring of huge structures, main issues are limited time, high costs and how to deal with the big amount of data. In order to reduce and manage them, respectively, methods from the field of optimal design of experiments are useful and supportive. Having optimal experimental designs at hand before conducting any measurements is leading to a highly informative measurement concept, where the sensor positions are optimized according to minimal errors in the structures' models. For the reduction of computational time a combined approach using Fisher Information Matrix and mean-squared error in a two-step procedure is proposed under the consideration of different error types. The error descriptions contain random/aleatoric and systematic/epistemic portions. Applying this combined approach on a finite element model using artificial acceleration time measurement data with artificially added errors leads to the optimized sensor positions. These findings are compared to results from laboratory experiments on the modeled structure, which is a tower-like structure represented by a hollow pipe as the cantilever beam. Conclusively, the combined approach is leading to a sound experimental design that leads to a good estimate of the structure's behavior and model parameters without the need of preliminary measurements for model updating.}, subject = {Strukturmechanik}, language = {en} }