@article{DehghaniSalehiMosavietal., author = {Dehghani, Majid and Salehi, Somayeh and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Ghamisi, Pedram}, title = {Spatial Analysis of Seasonal Precipitation over Iran: Co-Variation with Climate Indices}, series = {ISPRS, International Journal of Geo-Information}, volume = {2020}, journal = {ISPRS, International Journal of Geo-Information}, number = {Volume 9, Issue 2, 73}, publisher = {MDPI}, doi = {10.3390/ijgi9020073}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40740}, pages = {23}, abstract = {Temporary changes in precipitation may lead to sustained and severe drought or massive floods in different parts of the world. Knowing the variation in precipitation can effectively help the water resources decision-makers in water resources management. Large-scale circulation drivers have a considerable impact on precipitation in different parts of the world. In this research, the impact of El Ni{\~n}o-Southern Oscillation (ENSO), Pacific Decadal Oscillation (PDO), and North Atlantic Oscillation (NAO) on seasonal precipitation over Iran was investigated. For this purpose, 103 synoptic stations with at least 30 years of data were utilized. The Spearman correlation coefficient between the indices in the previous 12 months with seasonal precipitation was calculated, and the meaningful correlations were extracted. Then, the month in which each of these indices has the highest correlation with seasonal precipitation was determined. Finally, the overall amount of increase or decrease in seasonal precipitation due to each of these indices was calculated. Results indicate the Southern Oscillation Index (SOI), NAO, and PDO have the most impact on seasonal precipitation, respectively. Additionally, these indices have the highest impact on the precipitation in winter, autumn, spring, and summer, respectively. SOI has a diverse impact on winter precipitation compared to the PDO and NAO, while in the other seasons, each index has its special impact on seasonal precipitation. Generally, all indices in different phases may decrease the seasonal precipitation up to 100\%. However, the seasonal precipitation may increase more than 100\% in different seasons due to the impact of these indices. The results of this study can be used effectively in water resources management and especially in dam operation.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{AbuBakar, author = {Abu Bakar, Ilyani Akmar}, title = {Computational Analysis of Woven Fabric Composites: Single- and Multi-Objective Optimizations and Sensitivity Analysis in Meso-scale Structures}, issn = {1610-7381}, doi = {10.25643/bauhaus-universitaet.4176}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200605-41762}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {151}, abstract = {This study permits a reliability analysis to solve the mechanical behaviour issues existing in the current structural design of fabric structures. Purely predictive material models are highly desirable to facilitate an optimized design scheme and to significantly reduce time and cost at the design stage, such as experimental characterization. The present study examined the role of three major tasks; a) single-objective optimization, b) sensitivity analyses and c) multi-objective optimization on proposed weave structures for woven fabric composites. For single-objective optimization task, the first goal is to optimize the elastic properties of proposed complex weave structure under unit cells basis based on periodic boundary conditions. We predict the geometric characteristics towards skewness of woven fabric composites via Evolutionary Algorithm (EA) and a parametric study. We also demonstrate the effect of complex weave structures on the fray tendency in woven fabric composites via tightness evaluation. We utilize a procedure which does not require a numerical averaging process for evaluating the elastic properties of woven fabric composites. The fray tendency and skewness of woven fabrics depends upon the behaviour of the floats which is related to the factor of weave. Results of this study may suggest a broader view for further research into the effects of complex weave structures or may provide an alternative to the fray and skewness problems of current weave structure in woven fabric composites. A comprehensive study is developed on the complex weave structure model which adopts the dry woven fabric of the most potential pattern in singleobjective optimization incorporating the uncertainties parameters of woven fabric composites. The comprehensive study covers the regression-based and variance-based sensitivity analyses. The second task goal is to introduce the fabric uncertainties parameters and elaborate how they can be incorporated into finite element models on macroscopic material parameters such as elastic modulus and shear modulus of dry woven fabric subjected to uni-axial and biaxial deformations. Significant correlations in the study, would indicate the need for a thorough investigation of woven fabric composites under uncertainties parameters. The study describes here could serve as an alternative to identify effective material properties without prolonged time consumption and expensive experimental tests. The last part focuses on a hierarchical stochastic multi-scale optimization approach (fine-scale and coarse-scale optimizations) under geometrical uncertainties parameters for hybrid composites considering complex weave structure. The fine-scale optimization is to determine the best lamina pattern that maximizes its macroscopic elastic properties, conducted by EA under the following uncertain mesoscopic parameters: yarn spacing, yarn height, yarn width and misalignment of yarn angle. The coarse-scale optimization has been carried out to optimize the stacking sequences of symmetric hybrid laminated composite plate with uncertain mesoscopic parameters by employing the Ant Colony Algorithm (ACO). The objective functions of the coarse-scale optimization are to minimize the cost (C) and weight (W) of the hybrid laminated composite plate considering the fundamental frequency and the buckling load factor as the design constraints. Based on the uncertainty criteria of the design parameters, the appropriate variation required for the structural design standards can be evaluated using the reliability tool, and then an optimized design decision in consideration of cost can be subsequently determined.}, subject = {Verbundwerkstoff}, language = {en} } @article{SadeghzadehMaddahAhmadietal., author = {Sadeghzadeh, Milad and Maddah, Heydar and Ahmadi, Mohammad Hossein and Khadang, Amirhosein and Ghazvini, Mahyar and Mosavi, Amir Hosein and Nabipour, Narjes}, title = {Prediction of Thermo-Physical Properties of TiO2-Al2O3/Water Nanoparticles by Using Artificial Neural Network}, series = {Nanomaterials}, volume = {2020}, journal = {Nanomaterials}, number = {Volume 10, Issue 4, 697}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/nano10040697}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200421-41308}, abstract = {In this paper, an artificial neural network is implemented for the sake of predicting the thermal conductivity ratio of TiO2-Al2O3/water nanofluid. TiO2-Al2O3/water in the role of an innovative type of nanofluid was synthesized by the sol-gel method. The results indicated that 1.5 vol.\% of nanofluids enhanced the thermal conductivity by up to 25\%. It was shown that the heat transfer coefficient was linearly augmented with increasing nanoparticle concentration, but its variation with temperature was nonlinear. It should be noted that the increase in concentration may cause the particles to agglomerate, and then the thermal conductivity is reduced. The increase in temperature also increases the thermal conductivity, due to an increase in the Brownian motion and collision of particles. In this research, for the sake of predicting the thermal conductivity of TiO2-Al2O3/water nanofluid based on volumetric concentration and temperature functions, an artificial neural network is implemented. In this way, for predicting thermal conductivity, SOM (self-organizing map) and BP-LM (Back Propagation-Levenberq-Marquardt) algorithms were used. Based on the results obtained, these algorithms can be considered as an exceptional tool for predicting thermal conductivity. Additionally, the correlation coefficient values were equal to 0.938 and 0.98 when implementing the SOM and BP-LM algorithms, respectively, which is highly acceptable. View Full-Text}, subject = {W{\"a}rmeleitf{\"a}higkeit}, language = {en} } @article{KarimimoshaverHajivalieiShokrietal., author = {Karimimoshaver, Mehrdad and Hajivaliei, Hatameh and Shokri, Manouchehr and Khalesro, Shakila and Aram, Farshid and Shamshirband, Shahaboddin}, title = {A Model for Locating Tall Buildings through a Visual Analysis Approach}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 17, article 6072}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10176072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43350}, pages = {1 -- 25}, abstract = {Tall buildings have become an integral part of cities despite all their pros and cons. Some current tall buildings have several problems because of their unsuitable location; the problems include increasing density, imposing traffic on urban thoroughfares, blocking view corridors, etc. Some of these buildings have destroyed desirable views of the city. In this research, different criteria have been chosen, such as environment, access, social-economic, land-use, and physical context. These criteria and sub-criteria are prioritized and weighted by the analytic network process (ANP) based on experts' opinions, using Super Decisions V2.8 software. On the other hand, layers corresponding to sub-criteria were made in ArcGIS 10.3 simultaneously, then via a weighted overlay (map algebra), a locating plan was created. In the next step seven hypothetical tall buildings (20 stories), in the best part of the locating plan, were considered to evaluate how much of theses hypothetical buildings would be visible (fuzzy visibility) from the street and open spaces throughout the city. These processes have been modeled by MATLAB software, and the final fuzzy visibility plan was created by ArcGIS. Fuzzy visibility results can help city managers and planners to choose which location is suitable for a tall building and how much visibility may be appropriate. The proposed model can locate tall buildings based on technical and visual criteria in the future development of the city and it can be widely used in any city as long as the criteria and weights are localized.}, subject = {Geb{\"a}ude}, language = {en} } @article{ShabaniSamadianfardSattarietal., author = {Shabani, Sevda and Samadianfard, Saeed and Sattari, Mohammad Taghi and Mosavi, Amir and Shamshirband, Shahaboddin and Kmet, Tibor and V{\´a}rkonyi-K{\´o}czy, Annam{\´a}ria R.}, title = {Modeling Pan Evaporation Using Gaussian Process Regression K-Nearest Neighbors Random Forest and Support Vector Machines; Comparative Analysis}, series = {Atmosphere}, volume = {2020}, journal = {Atmosphere}, number = {Volume 11, Issue 1, 66}, doi = {10.3390/atmos11010066}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200110-40561}, pages = {17}, abstract = {Evaporation is a very important process; it is one of the most critical factors in agricultural, hydrological, and meteorological studies. Due to the interactions of multiple climatic factors, evaporation is considered as a complex and nonlinear phenomenon to model. Thus, machine learning methods have gained popularity in this realm. In the present study, four machine learning methods of Gaussian Process Regression (GPR), K-Nearest Neighbors (KNN), Random Forest (RF) and Support Vector Regression (SVR) were used to predict the pan evaporation (PE). Meteorological data including PE, temperature (T), relative humidity (RH), wind speed (W), and sunny hours (S) collected from 2011 through 2017. The accuracy of the studied methods was determined using the statistical indices of Root Mean Squared Error (RMSE), correlation coefficient (R) and Mean Absolute Error (MAE). Furthermore, the Taylor charts utilized for evaluating the accuracy of the mentioned models. The results of this study showed that at Gonbad-e Kavus, Gorgan and Bandar Torkman stations, GPR with RMSE of 1.521 mm/day, 1.244 mm/day, and 1.254 mm/day, KNN with RMSE of 1.991 mm/day, 1.775 mm/day, and 1.577 mm/day, RF with RMSE of 1.614 mm/day, 1.337 mm/day, and 1.316 mm/day, and SVR with RMSE of 1.55 mm/day, 1.262 mm/day, and 1.275 mm/day had more appropriate performances in estimating PE values. It was found that GPR for Gonbad-e Kavus Station with input parameters of T, W and S and GPR for Gorgan and Bandar Torkmen stations with input parameters of T, RH, W and S had the most accurate predictions and were proposed for precise estimation of PE. The findings of the current study indicated that the PE values may be accurately estimated with few easily measured meteorological parameters.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianLahmerBuddhirajuetal., author = {Harirchian, Ehsan and Lahmer, Tom and Buddhiraju, Sreekanth and Mohammad, Kifaytullah and Mosavi, Amir}, title = {Earthquake Safety Assessment of Buildings through Rapid Visual Screening}, series = {Buildings}, volume = {2020}, journal = {Buildings}, number = {Volume 10, Issue 3}, publisher = {MDPI}, doi = {10.3390/buildings10030051}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200331-41153}, pages = {15}, abstract = {Earthquake is among the most devastating natural disasters causing severe economical, environmental, and social destruction. Earthquake safety assessment and building hazard monitoring can highly contribute to urban sustainability through identification and insight into optimum materials and structures. While the vulnerability of structures mainly depends on the structural resistance, the safety assessment of buildings can be highly challenging. In this paper, we consider the Rapid Visual Screening (RVS) method, which is a qualitative procedure for estimating structural scores for buildings suitable for medium- to high-seismic cases. This paper presents an overview of the common RVS methods, i.e., FEMA P-154, IITK-GGSDMA, and EMPI. To examine the accuracy and validation, a practical comparison is performed between their assessment and observed damage of reinforced concrete buildings from a street survey in the Bing{\"o}l region, Turkey, after the 1 May 2003 earthquake. The results demonstrate that the application of RVS methods for preliminary damage estimation is a vital tool. Furthermore, the comparative analysis showed that FEMA P-154 creates an assessment that overestimates damage states and is not economically viable, while EMPI and IITK-GGSDMA provide more accurate and practical estimation, respectively.}, subject = {Maschinelles Lernen}, language = {en} } @article{SaqlaiGhaniKhanetal., author = {Saqlai, Syed Muhammad and Ghani, Anwar and Khan, Imran and Ahmed Khan Ghayyur, Shahbaz and Shamshirband, Shahaboddin and Nabipour, Narjes and Shokri, Manouchehr}, title = {Image Analysis Using Human Body Geometry and Size Proportion Science for Action Classification}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {volume 10, issue 16, article 5453}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10165453}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200904-42322}, pages = {24}, abstract = {Gestures are one of the basic modes of human communication and are usually used to represent different actions. Automatic recognition of these actions forms the basis for solving more complex problems like human behavior analysis, video surveillance, event detection, and sign language recognition, etc. Action recognition from images is a challenging task as the key information like temporal data, object trajectory, and optical flow are not available in still images. While measuring the size of different regions of the human body i.e., step size, arms span, length of the arm, forearm, and hand, etc., provides valuable clues for identification of the human actions. In this article, a framework for classification of the human actions is presented where humans are detected and localized through faster region-convolutional neural networks followed by morphological image processing techniques. Furthermore, geometric features from human blob are extracted and incorporated into the classification rules for the six human actions i.e., standing, walking, single-hand side wave, single-hand top wave, both hands side wave, and both hands top wave. The performance of the proposed technique has been evaluated using precision, recall, omission error, and commission error. The proposed technique has been comparatively analyzed in terms of overall accuracy with existing approaches showing that it performs well in contrast to its counterparts.}, subject = {Bildanalyse}, language = {en} } @article{Batra, author = {Batra, Ritika}, title = {Gauging the stakeholders' perspective: towards PPP in building sectors and housing}, series = {Journal of Housing and the Built Environment}, volume = {2020}, journal = {Journal of Housing and the Built Environment}, number = {volume 35}, publisher = {SpringerNature}, address = {Cham}, doi = {10.1007/s10901-020-09754-4}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210413-44124}, pages = {1123 -- 1156}, abstract = {While Public-Private Partnership (PPP) is widely adopted across various sectors, it raises a question on its meagre utilisation in the housing sector. This paper, therefore, gauges the perspective of the stakeholders in the building industry towards the application of PPP in various building sectors together with housing. It assesses the performance reliability of PPP for housing by learning possible take-aways from other sectors. The role of key stakeholders in the industry becomes highly responsible for an informed understanding and decision-making. To this end, a two-tier investigation was conducted including surveys and expert interviews, with several stakeholders in the PPP industry in Europe, involving the public sector, private sector, consultants, as well as other community/user representatives. The survey results demonstrated the success rate with PPPs, major factors important for PPPs such as profitability or end-user acceptability, the prevalent practices and trends in the PPP world, and the majority of support expressed in favour of the suitability of PPP for housing. The interviews added more detailed dimensions to the understanding of the PPP industry, its functioning and enabling the formation of a comprehensive outlook. The results present the perspective, approaches, and experiences of stakeholders over PPP practices, current trends and scenarios and their take on PPP in housing. It shall aid in understanding the challenges prevalent in the PPP approach for implementation in housing and enable the policymakers and industry stakeholders to make provisions for higher uptake to accelerate housing provision.}, subject = {{\"O}ffentlich-private Partnerschaft}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} } @phdthesis{HosseinNezhadShirazi, author = {Hossein Nezhad Shirazi, Ali}, title = {Multi-Scale Modeling of Lithium ion Batteries: a thermal management approach and molecular dynamic studies}, doi = {10.25643/bauhaus-universitaet.4098}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200214-40986}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Rechargeable lithium ion batteries (LIBs) play a very significant role in power supply and storage. In recent decades, LIBs have caught tremendous attention in mobile communication, portable electronics, and electric vehicles. Furthermore, global warming has become a worldwide issue due to the ongoing production of greenhouse gases. It motivates solutions such as renewable sources of energy. Solar and wind energies are the most important ones in renewable energy sources. By technology progress, they will definitely require batteries to store the produced power to make a balance between power generation and consumption. Nowadays,rechargeable batteries such as LIBs are considered as one of the best solutions. They provide high specific energy and high rate performance while their rate of self-discharge is low. Performance of LIBs can be improved through the modification of battery characteristics. The size of solid particles in electrodes can impact the specific energy and the cyclability of batteries. It can improve the amount of lithium content in the electrode which is a vital parameter in capacity and capability of a battery. There exist diferent sources of heat generation in LIBs such as heat produced during electrochemical reactions, internal resistance in battery. The size of electrode's electroactive particles can directly affect the produced heat in battery. It will be shown that the smaller size of solid particle enhance the thermal characteristics of LIBs. Thermal issues such as overheating, temperature maldistribution in the battery, and thermal runaway have confined applications of LIBs. Such thermal challenges reduce the Life cycle of LIBs. As well, they may lead to dangerous conditions such as fire or even explosion in batteries. However, recent advances in fabrication of advanced materials such as graphene and carbon nanotubes with extraordinary thermal conductivity and electrical properties propose new opportunities to enhance their performance. Since experimental works are expensive, our objective is to use computational methods to investigate the thermal issues in LIBS. Dissipation of the heat produced in the battery can improve the cyclability and specific capacity of LIBs. In real applications, packs of LIB consist several battery cells that are used as the power source. Therefore, it is worth to investigate thermal characteristic of battery packs under their cycles of charging/discharging operations at different applied current rates. To remove the produced heat in batteries, they can be surrounded by materials with high thermal conductivity. Parafin wax absorbs high energy since it has a high latent heat. Absorption high amounts of energy occurs at constant temperature without phase change. As well, thermal conductivity of parafin can be magnified with nano-materials such as graphene, CNT, and fullerene to form a nano-composite medium. Improving the thermal conductivity of LIBs increase the heat dissipation from batteries which is a vital issue in systems of battery thermal management. The application of two-dimensional (2D) materials has been on the rise since exfoliation the graphene from bulk graphite. 2D materials are single-layered in an order of nanosizes which show superior thermal, mechanical, and optoelectronic properties. They are potential candidates for energy storage and supply, particularly in lithium ion batteries as electrode material. The high thermal conductivity of graphene and graphene-like materials can play a significant role in thermal management of batteries. However, defects always exist in nano-materials since there is no ideal fabrication process. One of the most important defects in materials are nano-crack which can dramatically weaken the mechanical properties of the materials. Newly synthesized crystalline carbon nitride with the stoichiometry of C3N have attracted many attentions due to its extraordinary mechanical and thermal properties. The other nano-material is phagraphene which shows anisotropic mechanical characteristics which is ideal in production of nanocomposite. It shows ductile fracture behavior when subjected under uniaxial loadings. It is worth to investigate their thermo-mechanical properties in its pristine and defective states. We hope that the findings of our work not only be useful for both experimental and theoretical researches but also help to design advanced electrodes for LIBs.}, subject = {Akkumulator}, language = {en} } @article{ArtusKoch, author = {Artus, Mathias and Koch, Christian}, title = {State of the art in damage information modeling for RC bridges - A literature review}, series = {Advanced Engineering Informatics}, volume = {2020}, journal = {Advanced Engineering Informatics}, number = {volume 46, article 101171}, publisher = {Elsevier Science}, address = {Amsterdam}, doi = {10.1016/j.aei.2020.101171}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220506-46390}, pages = {1 -- 16}, abstract = {In Germany, bridges have an average age of 40 years. A bridge consumes between 0.4\% and 2\% of its construction cost per year over its entire life cycle. This means that up to 80\% of the construction cost are additionally needed for operation, inspection, maintenance, and destruction. Current practices rely either on paperbased inspections or on abstract specialist software. Every application in the inspection and maintenance sector uses its own data model for structures, inspections, defects, and maintenance. Due to this, data and properties have to be transferred manually, otherwise a converter is necessary for every data exchange between two applications. To overcome this issue, an adequate model standard for inspections, damage, and maintenance is necessary. Modern 3D models may serve as a single source of truth, which has been suggested in the Building Information Modeling (BIM) concept. Further, these models offer a clear visualization of the built infrastructure, and improve not only the planning and construction phases, but also the operation phase of construction projects. BIM is established mostly in the Architecture, Engineering, and Construction (AEC) sector to plan and construct new buildings. Currently, BIM does not cover the whole life cycle of a building, especially not inspection and maintenance. Creating damage models needs the building model first, because a defect is dependent on the building component, its properties and material. Hence, a building information model is necessary to obtain meaningful conclusions from damage information. This paper analyzes the requirements, which arise from practice, and the research that has been done in modeling damage and related information for bridges. With a look at damage categories and use cases related to inspection and maintenance, scientific literature is discussed and synthesized. Finally, research gaps and needs are identified and discussed.}, subject = {Building Information Modeling}, language = {de} } @article{MengNomanQasemShokrietal., author = {Meng, Yinghui and Noman Qasem, Sultan and Shokri, Manouchehr and Shamshirband, Shahaboddin}, title = {Dimension Reduction of Machine Learning-Based Forecasting Models Employing Principal Component Analysis}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 8, article 1233}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math8081233}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200811-42125}, pages = {15}, abstract = {In this research, an attempt was made to reduce the dimension of wavelet-ANFIS/ANN (artificial neural network/adaptive neuro-fuzzy inference system) models toward reliable forecasts as well as to decrease computational cost. In this regard, the principal component analysis was performed on the input time series decomposed by a discrete wavelet transform to feed the ANN/ANFIS models. The models were applied for dissolved oxygen (DO) forecasting in rivers which is an important variable affecting aquatic life and water quality. The current values of DO, water surface temperature, salinity, and turbidity have been considered as the input variable to forecast DO in a three-time step further. The results of the study revealed that PCA can be employed as a powerful tool for dimension reduction of input variables and also to detect inter-correlation of input variables. Results of the PCA-wavelet-ANN models are compared with those obtained from wavelet-ANN models while the earlier one has the advantage of less computational time than the later models. Dealing with ANFIS models, PCA is more beneficial to avoid wavelet-ANFIS models creating too many rules which deteriorate the efficiency of the ANFIS models. Moreover, manipulating the wavelet-ANFIS models utilizing PCA leads to a significant decreasing in computational time. Finally, it was found that the PCA-wavelet-ANN/ANFIS models can provide reliable forecasts of dissolved oxygen as an important water quality indicator in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @misc{Freire, type = {Master Thesis}, author = {Freire, Kamai}, title = {Panafricanism and African Revolution in Brazilian Music}, doi = {10.25643/bauhaus-universitaet.4353}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210216-43536}, school = {Hochschule f{\"u}r Musik FRANZ LISZT}, pages = {163}, abstract = {This research departs from the teachings of Kwame Ture on the difference between mobilization and organization in the panafricanist struggle to analyze then the use of Music within the anti-racist and anti-colonialist struggle in Brazil.}, subject = {Saz}, language = {en} } @article{MousaviSteinkeJuniorTeixeiraetal., author = {Mousavi, Seyed Nasrollah and Steinke J{\´u}nior, Renato and Teixeira, Eder Daniel and Bocchiola, Daniele and Nabipour, Narjes and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Predictive Modeling the Free Hydraulic Jumps Pressure through Advanced Statistical Methods}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {Volume 8, Issue 3, 323}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math8030323}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200402-41140}, pages = {16}, abstract = {Pressure fluctuations beneath hydraulic jumps potentially endanger the stability of stilling basins. This paper deals with the mathematical modeling of the results of laboratory-scale experiments to estimate the extreme pressures. Experiments were carried out on a smooth stilling basin underneath free hydraulic jumps downstream of an Ogee spillway. From the probability distribution of measured instantaneous pressures, pressures with different probabilities could be determined. It was verified that maximum pressure fluctuations, and the negative pressures, are located at the positions near the spillway toe. Also, minimum pressure fluctuations are located at the downstream of hydraulic jumps. It was possible to assess the cumulative curves of pressure data related to the characteristic points along the basin, and different Froude numbers. To benchmark the results, the dimensionless forms of statistical parameters include mean pressures (P*m), the standard deviations of pressure fluctuations (σ*X), pressures with different non-exceedance probabilities (P*k\%), and the statistical coefficient of the probability distribution (Nk\%) were assessed. It was found that an existing method can be used to interpret the present data, and pressure distribution in similar conditions, by using a new second-order fractional relationships for σ*X, and Nk\%. The values of the Nk\% coefficient indicated a single mean value for each probability.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianLahmerKumarietal., author = {Harirchian, Ehsan and Lahmer, Tom and Kumari, Vandana and Jadhav, Kirti}, title = {Application of Support Vector Machine Modeling for the Rapid Seismic Hazard Safety Evaluation of Existing Buildings}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {volume 13, issue 13, 3340}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13133340}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200707-41915}, pages = {15}, abstract = {The economic losses from earthquakes tend to hit the national economy considerably; therefore, models that are capable of estimating the vulnerability and losses of future earthquakes are highly consequential for emergency planners with the purpose of risk mitigation. This demands a mass prioritization filtering of structures to identify vulnerable buildings for retrofitting purposes. The application of advanced structural analysis on each building to study the earthquake response is impractical due to complex calculations, long computational time, and exorbitant cost. This exhibits the need for a fast, reliable, and rapid method, commonly known as Rapid Visual Screening (RVS). The method serves as a preliminary screening platform, using an optimum number of seismic parameters of the structure and predefined output damage states. In this study, the efficacy of the Machine Learning (ML) application in damage prediction through a Support Vector Machine (SVM) model as the damage classification technique has been investigated. The developed model was trained and examined based on damage data from the 1999 D{\"u}zce Earthquake in Turkey, where the building's data consists of 22 performance modifiers that have been implemented with supervised machine learning.}, subject = {Erdbeben}, language = {en} } @article{SchirmerOsburg, author = {Schirmer, Ulrike and Osburg, Andrea}, title = {A new method for the quantification of adsorbed styrene acrylate copolymer particles on cementitious surfaces: a critical comparative study}, series = {SN Applied Sciences}, volume = {2020}, journal = {SN Applied Sciences}, number = {Volume 2, article 2061}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s42452-020-03825-5}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44729}, pages = {1 -- 11}, abstract = {The amount of adsorbed styrene acrylate copolymer (SA) particles on cementitious surfaces at the early stage of hydration was quantitatively determined using three different methodological approaches: the depletion method, the visible spectrophotometry (VIS) and the thermo-gravimetry coupled with mass spectrometry (TG-MS). Considering the advantages and disadvantages of each method, including the respectively required sample preparation, the results for four polymer-modified cement pastes, varying in polymer content and cement fineness, were evaluated. To some extent, significant discrepancies in the adsorption degrees were observed. There is a tendency that significantly lower amounts of adsorbed polymers were identified using TG-MS compared to values determined with the depletion method. Spectrophotometrically generated values were ​​lying in between these extremes. This tendency was found for three of the four cement pastes examined and is originated in sample preparation and methodical limitations. The main influencing factor is the falsification of the polymer concentration in the liquid phase during centrifugation. Interactions in the interface between sediment and supernatant are the cause. The newly developed method, using TG-MS for the quantification of SA particles, proved to be suitable for dealing with these revealed issues. Here, instead of the fluid phase, the sediment is examined with regard to the polymer content, on which the influence of centrifugation is considerably lower.}, subject = {Zement}, language = {en} } @article{HarirchianJadhavMohammadetal., author = {Harirchian, Ehsan and Jadhav, Kirti and Mohammad, Kifaytullah and Aghakouchaki Hosseini, Seyed Ehsan and Lahmer, Tom}, title = {A Comparative Study of MCDM Methods Integrated with Rapid Visual Seismic Vulnerability Assessment of Existing RC Structures}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 18, article 6411}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10186411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200918-42360}, pages = {24}, abstract = {Recently, the demand for residence and usage of urban infrastructure has been increased, thereby resulting in the elevation of risk levels of human lives over natural calamities. The occupancy demand has rapidly increased the construction rate, whereas the inadequate design of structures prone to more vulnerability. Buildings constructed before the development of seismic codes have an additional susceptibility to earthquake vibrations. The structural collapse causes an economic loss as well as setbacks for human lives. An application of different theoretical methods to analyze the structural behavior is expensive and time-consuming. Therefore, introducing a rapid vulnerability assessment method to check structural performances is necessary for future developments. The process, as mentioned earlier, is known as Rapid Visual Screening (RVS). This technique has been generated to identify, inventory, and screen structures that are potentially hazardous. Sometimes, poor construction quality does not provide some of the required parameters; in this case, the RVS process turns into a tedious scenario. Hence, to tackle such a situation, multiple-criteria decision-making (MCDM) methods for the seismic vulnerability assessment opens a new gateway. The different parameters required by RVS can be taken in MCDM. MCDM evaluates multiple conflicting criteria in decision making in several fields. This paper has aimed to bridge the gap between RVS and MCDM. Furthermore, to define the correlation between these techniques, implementation of the methodologies from Indian, Turkish, and Federal Emergency Management Agency (FEMA) codes has been done. The effects of seismic vulnerability of structures have been observed and compared.}, subject = {Erdbebensicherheit}, language = {en} } @article{HarirchianLahmer, author = {Harirchian, Ehsan and Lahmer, Tom}, title = {Improved Rapid Visual Earthquake Hazard Safety Evaluation of Existing Buildings Using a Type-2 Fuzzy Logic Model}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, Issue 3, 2375}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10072375}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200331-41161}, pages = {14}, abstract = {Rapid Visual Screening (RVS) is a procedure that estimates structural scores for buildings and prioritizes their retrofit and upgrade requirements. Despite the speed and simplicity of RVS, many of the collected parameters are non-commensurable and include subjectivity due to visual observations. This might cause uncertainties in the evaluation, which emphasizes the use of a fuzzy-based method. This study aims to propose a novel RVS methodology based on the interval type-2 fuzzy logic system (IT2FLS) to set the priority of vulnerable building to undergo detailed assessment while covering uncertainties and minimizing their effects during evaluation. The proposed method estimates the vulnerability of a building, in terms of Damage Index, considering the number of stories, age of building, plan irregularity, vertical irregularity, building quality, and peak ground velocity, as inputs with a single output variable. Applicability of the proposed method has been investigated using a post-earthquake damage database of reinforced concrete buildings from the Bing{\"o}l and D{\"u}zce earthquakes in Turkey.}, subject = {Fuzzy-Logik}, language = {en} } @phdthesis{Reformat, author = {Reformat, Martin}, title = {Zementmahlung - Untersuchungen zum Zusammenhang von Mahlaggregat und Materialeigenschaften}, isbn = {978-3-00-067121-0}, doi = {10.25643/bauhaus-universitaet.4279}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201102-42794}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {224}, abstract = {Die Mahlung als Zerkleinerungsprozess stellt seit den Anf{\"a}ngen der Menschheit eine der wichtigsten Verarbeitungsformen von Materialien aller Art dar - von der Getreidemahlung, {\"u}ber das Aufschließen von Heilkr{\"a}utern in M{\"o}rsern bis hin zur Herstellung von Tonern f{\"u}r Drucker und Kopierer. Besonders die Zementmahlung ist in modernen Gesellschaften sowohl ein wirtschaftlicher als auch ein {\"o}kologischer Faktor. Mehr als zwei Drittel der elektrischen Energie der Zementproduktion werden f{\"u}r Rohmehl- und Klinker- bzw. Kompositmaterialmahlung verbraucht. Dies ist nur ein Grund, warum der Mahlprozess zunehmend in den Fokus vieler Forschungs- und Entwicklungsvorhaben r{\"u}ckt. Die Komplexit{\"a}t der Zementmahlung steigt im zunehmenden Maße an. Die simple „Mahlung auf Zementfeinheit" ist seit langem obsolet. Zemente werden maßgeschneidert, mit verschiedensten Kombinationsprodukten, getrennt oder gemeinsam, in unterschiedlichen Mahlaggregaten oder mit ganz neuen Ans{\"a}tzen gefertigt. Dar{\"u}ber hinaus gewinnt auch der Sektor des Baustoffrecyclings, mit allen damit verbundenen Herausforderungen, immer mehr an Bedeutung. Bei der Fragestellung, wie der Mahlprozess einerseits leistungsf{\"a}hige Produkte erzeugen kann und andererseits die zunehmenden Anforderungen an Nachhaltigkeit erf{\"u}llt, steht das Mahlaggregat im Mittelpunkt der Betrachtungen. Dementsprechend gliedert sich, neben einer eingehenden Literaturrecherche zum Wissensstand, die vorliegende Arbeit in zwei {\"u}bergeordnete Teile: Im ersten Teil werden Untersuchungen an konventionellen Mahlaggregaten mit in der Zementindustrie verwendeten Kernprodukten wie Portlandzementklinker, Kalkstein, Flugasche und H{\"u}ttensand angestellt. Um eine m{\"o}glichst effektive Mahlung von Zement und Kompositmaterialien zu gew{\"a}hrleisten, ist es wichtig, die Auswirkung von M{\"u}hlenparametern zu kennen. Hierf{\"u}r wurde eine umfangreiche Versuchsmatrix aufgestellt und abgearbeitet. Das Spektrum der Analysemethoden war ebenfalls umfangreich und wurde sowohl auf die gemahlenen Materialien als auch auf die daraus hergestellten Zemente und Betone angewendet. Es konnte gezeigt werden, dass vor allem die Unterscheidung zwischen Mahlk{\"o}rperm{\"u}hlen und mahlk{\"o}rperlosen M{\"u}hlen entscheidenden Einfluss auf die Granulometrie und somit auch auf die Zementperformance hat. Besonders stark wurden die Verarbeitungseigenschaften, insbesondere der Wasseranspruch und damit auch das Porengef{\"u}ge und schließlich Druckfestigkeiten sowie Dauerhaftigkeitseigenschaften der aus diesen Zementen hergestellten Betone, beeinflusst. Bei Untersuchungen zur gemeinsamen Mahlung von Kalkstein und Klinker f{\"u}hrten ung{\"u}nstige Anreicherungseffekte des gut mahlbaren Kalksteins sowie tonigen Nebenbestandteilen zu einer schlechteren Performance in allen Zementpr{\"u}fungen. Der zweite Teil widmet sich der Hochenergiemahlung. Die dahinterstehende Technik wird seit Jahrzehnten in anderen Wirtschaftsbranchen, wie der Pharmazie, Biologie oder auch Lebensmittelindustrie angewendet und ist seit einiger Zeit auch in der Zementforschung anzutreffen. Beispielhaft seien hier die Planeten- und R{\"u}hrwerkskugelm{\"u}hle als Vertreter genannt. Neben grundlegenden Untersuchungen an Zementklinker und konventionellen Kompositmaterialien wie H{\"u}ttensand und Kalkstein wurde auch die Haupt-Zementklinkerphase Alit untersucht. Die Hochenergiemahlung von konventionellen Kompositmaterialien generierte zus{\"a}tzliche Reaktivit{\"a}t bei gleicher Granulometrie gegen{\"u}ber der herk{\"o}mmlichen Mahlung. Dies wurde vor allem bei per se reaktivem Zementklinker als auch bei latent-hydraulischem H{\"u}ttensand beobachtet. Gemahlene Flugaschen konnten nur im geringen Maße weiter aktiviert werden. Der generelle Einfluss von Oberfl{\"a}chenvergr{\"o}ßerung, Strukturdefekten und Relaxationseffekten eines Mahlproduktes wurden eingehend untersucht und gewichtet. Die Ergebnisse bei der Hochenergiemahlung von Alit zeigten, dass die durch Mahlung eingebrachten Strukturdefekte eine Erh{\"o}hung der Reaktivit{\"a}t zur Folge haben. Hierbei konnte festgestellt werden, das maßgeblich Oberfl{\"a}chendefekte, strukturelle (Volumen-)defekte und als Konterpart Selbstheilungseffekte die reaktivit{\"a}tsbestimmenden Faktoren sind. Weiterhin wurden Versuche zur Mahlung von Altbetonbrechsand durchgef{\"u}hrt. Im Speziellen wurde untersucht, inwieweit eine R{\"u}ckf{\"u}hrung von Altbetonbrechsand, als unverwertbarer Teil des Betonbruchs, in Form eines Zement-Kompositmaterials in den Baustoffkreislauf m{\"o}glich ist. Die hierf{\"u}r verwendete Mahltechnik umfasst sowohl konventionelle M{\"u}hlen als auch Hochenergiem{\"u}hlen. Es wurden Kompositzemente mit variiertem Recyclingmaterialanteil hergestellt und auf grundlegende Eigenschaften untersucht. Zur Bewertung der Produktqualit{\"a}t wurde der sogenannte „Aktivierungskoeffizient" eingef{\"u}hrt. Es stellte sich heraus, dass die R{\"u}ckf{\"u}hrung von Altbetonbrechsand als potentielles Kompositmaterial wesentlich vom Anteil des Zementsteins abh{\"a}ngt. So konnte beispielsweise reiner Zementstein als aufgemahlenes Kompositmaterial eine bessere Performance gegen{\"u}ber dem mit Gesteinsk{\"o}rnung beaufschlagtem Altbetonbrechsand ausweisen. Bezogen auf die gemessenen Hydratationsw{\"a}rmen und Druckfestigkeiten nahm der Aktivierungskoeffzient mit fallendem Abstraktionsgrad ab. Ebenfalls sank der Aktivierungskoeffizient mit steigendem Substitutionsgrad. Als Vergleich wurden dieselben Materialien in konventionellen M{\"u}hlen aufbereitet. Die hier erzielten Ergebnisse k{\"o}nnen teilweise der Hochenergiemahlung als gleichwertig beurteilt werden. Folglich ist bei der Aktivierung von Recyclingmaterialien weniger die Mahltechnik als der Anteil an aktivierbarem Zementstein ausschlaggebend.}, subject = {Zement}, language = {de} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Raj Das, Rohan and Rasulzade, Shahla and Lahmer, Tom}, title = {A Machine Learning Framework for Assessing Seismic Hazard Safety of Reinforced Concrete Buildings}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7153}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207153}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42744}, pages = {18}, abstract = {Although averting a seismic disturbance and its physical, social, and economic disruption is practically impossible, using the advancements in computational science and numerical modeling shall equip humanity to predict its severity, understand the outcomes, and equip for post-disaster management. Many buildings exist amidst the developed metropolitan areas, which are senile and still in service. These buildings were also designed before establishing national seismic codes or without the introduction of construction regulations. In that case, risk reduction is significant for developing alternatives and designing suitable models to enhance the existing structure's performance. Such models will be able to classify risks and casualties related to possible earthquakes through emergency preparation. Thus, it is crucial to recognize structures that are susceptible to earthquake vibrations and need to be prioritized for retrofitting. However, each building's behavior under seismic actions cannot be studied through performing structural analysis, as it might be unrealistic because of the rigorous computations, long period, and substantial expenditure. Therefore, it calls for a simple, reliable, and accurate process known as Rapid Visual Screening (RVS), which serves as a primary screening platform, including an optimum number of seismic parameters and predetermined performance damage conditions for structures. In this study, the damage classification technique was studied, and the efficacy of the Machine Learning (ML) method in damage prediction via a Support Vector Machine (SVM) model was explored. The ML model is trained and tested separately on damage data from four different earthquakes, namely Ecuador, Haiti, Nepal, and South Korea. Each dataset consists of varying numbers of input data and eight performance modifiers. Based on the study and the results, the ML model using SVM classifies the given input data into the belonging classes and accomplishes the performance on hazard safety evaluation of buildings.}, subject = {Erdbeben}, language = {en} }