@phdthesis{AbuBakar, author = {Abu Bakar, Ilyani Akmar}, title = {Computational Analysis of Woven Fabric Composites: Single- and Multi-Objective Optimizations and Sensitivity Analysis in Meso-scale Structures}, issn = {1610-7381}, doi = {10.25643/bauhaus-universitaet.4176}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200605-41762}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {151}, abstract = {This study permits a reliability analysis to solve the mechanical behaviour issues existing in the current structural design of fabric structures. Purely predictive material models are highly desirable to facilitate an optimized design scheme and to significantly reduce time and cost at the design stage, such as experimental characterization. The present study examined the role of three major tasks; a) single-objective optimization, b) sensitivity analyses and c) multi-objective optimization on proposed weave structures for woven fabric composites. For single-objective optimization task, the first goal is to optimize the elastic properties of proposed complex weave structure under unit cells basis based on periodic boundary conditions. We predict the geometric characteristics towards skewness of woven fabric composites via Evolutionary Algorithm (EA) and a parametric study. We also demonstrate the effect of complex weave structures on the fray tendency in woven fabric composites via tightness evaluation. We utilize a procedure which does not require a numerical averaging process for evaluating the elastic properties of woven fabric composites. The fray tendency and skewness of woven fabrics depends upon the behaviour of the floats which is related to the factor of weave. Results of this study may suggest a broader view for further research into the effects of complex weave structures or may provide an alternative to the fray and skewness problems of current weave structure in woven fabric composites. A comprehensive study is developed on the complex weave structure model which adopts the dry woven fabric of the most potential pattern in singleobjective optimization incorporating the uncertainties parameters of woven fabric composites. The comprehensive study covers the regression-based and variance-based sensitivity analyses. The second task goal is to introduce the fabric uncertainties parameters and elaborate how they can be incorporated into finite element models on macroscopic material parameters such as elastic modulus and shear modulus of dry woven fabric subjected to uni-axial and biaxial deformations. Significant correlations in the study, would indicate the need for a thorough investigation of woven fabric composites under uncertainties parameters. The study describes here could serve as an alternative to identify effective material properties without prolonged time consumption and expensive experimental tests. The last part focuses on a hierarchical stochastic multi-scale optimization approach (fine-scale and coarse-scale optimizations) under geometrical uncertainties parameters for hybrid composites considering complex weave structure. The fine-scale optimization is to determine the best lamina pattern that maximizes its macroscopic elastic properties, conducted by EA under the following uncertain mesoscopic parameters: yarn spacing, yarn height, yarn width and misalignment of yarn angle. The coarse-scale optimization has been carried out to optimize the stacking sequences of symmetric hybrid laminated composite plate with uncertain mesoscopic parameters by employing the Ant Colony Algorithm (ACO). The objective functions of the coarse-scale optimization are to minimize the cost (C) and weight (W) of the hybrid laminated composite plate considering the fundamental frequency and the buckling load factor as the design constraints. Based on the uncertainty criteria of the design parameters, the appropriate variation required for the structural design standards can be evaluated using the reliability tool, and then an optimized design decision in consideration of cost can be subsequently determined.}, subject = {Verbundwerkstoff}, language = {en} } @article{SadeghzadehMaddahAhmadietal., author = {Sadeghzadeh, Milad and Maddah, Heydar and Ahmadi, Mohammad Hossein and Khadang, Amirhosein and Ghazvini, Mahyar and Mosavi, Amir Hosein and Nabipour, Narjes}, title = {Prediction of Thermo-Physical Properties of TiO2-Al2O3/Water Nanoparticles by Using Artificial Neural Network}, series = {Nanomaterials}, volume = {2020}, journal = {Nanomaterials}, number = {Volume 10, Issue 4, 697}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/nano10040697}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200421-41308}, abstract = {In this paper, an artificial neural network is implemented for the sake of predicting the thermal conductivity ratio of TiO2-Al2O3/water nanofluid. TiO2-Al2O3/water in the role of an innovative type of nanofluid was synthesized by the sol-gel method. The results indicated that 1.5 vol.\% of nanofluids enhanced the thermal conductivity by up to 25\%. It was shown that the heat transfer coefficient was linearly augmented with increasing nanoparticle concentration, but its variation with temperature was nonlinear. It should be noted that the increase in concentration may cause the particles to agglomerate, and then the thermal conductivity is reduced. The increase in temperature also increases the thermal conductivity, due to an increase in the Brownian motion and collision of particles. In this research, for the sake of predicting the thermal conductivity of TiO2-Al2O3/water nanofluid based on volumetric concentration and temperature functions, an artificial neural network is implemented. In this way, for predicting thermal conductivity, SOM (self-organizing map) and BP-LM (Back Propagation-Levenberq-Marquardt) algorithms were used. Based on the results obtained, these algorithms can be considered as an exceptional tool for predicting thermal conductivity. Additionally, the correlation coefficient values were equal to 0.938 and 0.98 when implementing the SOM and BP-LM algorithms, respectively, which is highly acceptable. View Full-Text}, subject = {W{\"a}rmeleitf{\"a}higkeit}, language = {en} } @article{SaadatfarKhosraviHassannatajJoloudarietal., author = {Saadatfar, Hamid and Khosravi, Samiyeh and Hassannataj Joloudari, Javad and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {A New K-Nearest Neighbors Classifier for Big Data Based on Efficient Data Pruning}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 2, article 286}, publisher = {MDPI}, doi = {10.3390/math8020286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40996}, pages = {12}, abstract = {The K-nearest neighbors (KNN) machine learning algorithm is a well-known non-parametric classification method. However, like other traditional data mining methods, applying it on big data comes with computational challenges. Indeed, KNN determines the class of a new sample based on the class of its nearest neighbors; however, identifying the neighbors in a large amount of data imposes a large computational cost so that it is no longer applicable by a single computing machine. One of the proposed techniques to make classification methods applicable on large datasets is pruning. LC-KNN is an improved KNN method which first clusters the data into some smaller partitions using the K-means clustering method; and then applies the KNN for each new sample on the partition which its center is the nearest one. However, because the clusters have different shapes and densities, selection of the appropriate cluster is a challenge. In this paper, an approach has been proposed to improve the pruning phase of the LC-KNN method by taking into account these factors. The proposed approach helps to choose a more appropriate cluster of data for looking for the neighbors, thus, increasing the classification accuracy. The performance of the proposed approach is evaluated on different real datasets. The experimental results show the effectiveness of the proposed approach and its higher classification accuracy and lower time cost in comparison to other recent relevant methods.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Kavrakov, author = {Kavrakov, Igor}, title = {Synergistic Framework for Analysis and Model Assessment in Bridge Aerodynamics and Aeroelasticity}, publisher = {Bauhaus-Universit{\"a}tsverlag}, address = {Weimar}, isbn = {978-3-95773-284-2}, doi = {10.25643/bauhaus-universitaet.4109}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200316-41099}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {314}, abstract = {Wind-induced vibrations often represent a major design criterion for long-span bridges. This work deals with the assessment and development of models for aerodynamic and aeroelastic analyses of long-span bridges. Computational Fluid Dynamics (CFD) and semi-analytical aerodynamic models are employed to compute the bridge response due to both turbulent and laminar free-stream. For the assessment of these models, a comparative methodology is developed that consists of two steps, a qualitative and a quantitative one. The first, qualitative, step involves an extension of an existing approach based on Category Theory and its application to the field of bridge aerodynamics. Initially, the approach is extended to consider model comparability and completeness. Then, the complexity of the CFD and twelve semi-analytical models are evaluated based on their mathematical constructions, yielding a diagrammatic representation of model quality. In the second, quantitative, step of the comparative methodology, the discrepancy of a system response quantity for time-dependent aerodynamic models is quantified using comparison metrics for time-histories. Nine metrics are established on a uniform basis to quantify the discrepancies in local and global signal features that are of interest in bridge aerodynamics. These signal features involve quantities such as phase, time-varying frequency and magnitude content, probability density, non-stationarity, and nonlinearity. The two-dimensional (2D) Vortex Particle Method is used for the discretization of the Navier-Stokes equations including a Pseudo-three dimensional (Pseudo-3D) extension within an existing CFD solver. The Pseudo-3D Vortex Method considers the 3D structural behavior for aeroelastic analyses by positioning 2D fluid strips along a line-like structure. A novel turbulent Pseudo-3D Vortex Method is developed by combining the laminar Pseudo-3D VPM and a previously developed 2D method for the generation of free-stream turbulence. Using analytical derivations, it is shown that the fluid velocity correlation is maintained between the CFD strips. Furthermore, a new method is presented for the determination of the complex aerodynamic admittance under deterministic sinusoidal gusts using the Vortex Particle Method. The sinusoidal gusts are simulated by modeling the wakes of flapping airfoils in the CFD domain with inflow vortex particles. Positioning a section downstream yields sinusoidal forces that are used for determining all six components of the complex aerodynamic admittance. A closed-form analytical relation is derived, based on an existing analytical model. With this relation, the inflow particles' strength can be related with the target gust amplitudes a priori. The developed methodologies are combined in a synergistic framework, which is applied to both fundamental examples and practical case studies. Where possible, the results are verified and validated. The outcome of this work is intended to shed some light on the complex wind-bridge interaction and suggest appropriate modeling strategies for an enhanced design.}, subject = {Br{\"u}cke}, language = {en} } @article{ShamshirbandBabanezhadMosavietal., author = {Shamshirband, Shahaboddin and Babanezhad, Meisam and Mosavi, Amir and Nabipour, Narjes and Hajnal, Eva and Nadai, Laszlo and Chau, Kwok-Wing}, title = {Prediction of flow characteristics in the bubble column reactor by the artificial pheromone-based communication of biological ants}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {volume 14, issue 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1715842}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200227-41013}, pages = {367 -- 378}, abstract = {A novel combination of the ant colony optimization algorithm (ACO)and computational fluid dynamics (CFD) data is proposed for modeling the multiphase chemical reactors. The proposed intelligent model presents a probabilistic computational strategy for predicting various levels of three-dimensional bubble column reactor (BCR) flow. The results prove an enhanced communication between ant colony prediction and CFD data in different sections of the BCR.}, subject = {Maschinelles Lernen}, language = {en} } @article{AmirinasabShamshirbandChronopoulosetal., author = {Amirinasab, Mehdi and Shamshirband, Shahaboddin and Chronopoulos, Anthony Theodore and Mosavi, Amir and Nabipour, Narjes}, title = {Energy-Efficient Method for Wireless Sensor Networks Low-Power Radio Operation in Internet of Things}, series = {electronics}, volume = {2020}, journal = {electronics}, number = {volume 9, issue 2, 320}, publisher = {MDPI}, doi = {10.3390/electronics9020320}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40954}, pages = {20}, abstract = {The radio operation in wireless sensor networks (WSN) in Internet of Things (IoT)applications is the most common source for power consumption. Consequently, recognizing and controlling the factors affecting radio operation can be valuable for managing the node power consumption. Among essential factors affecting radio operation, the time spent for checking the radio is of utmost importance for monitoring power consumption. It can lead to false WakeUp or idle listening in radio duty cycles and ContikiMAC. ContikiMAC is a low-power radio duty-cycle protocol in Contiki OS used in WakeUp mode, as a clear channel assessment (CCA) for checking radio status periodically. This paper presents a detailed analysis of radio WakeUp time factors of ContikiMAC. Furthermore, we propose a lightweight CCA (LW-CCA) as an extension to ContikiMAC to reduce the Radio Duty-Cycles in false WakeUps and idle listening though using dynamic received signal strength indicator (RSSI) status check time. The simulation results in the Cooja simulator show that LW-CCA reduces about 8\% energy consumption in nodes while maintaining up to 99\% of the packet delivery rate (PDR).}, subject = {Internet der Dinge}, language = {en} } @article{NabipourDehghaniMosavietal., author = {Nabipour, Narjes and Dehghani, Majid and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Short-Term Hydrological Drought Forecasting Based on Different Nature-Inspired Optimization Algorithms Hybridized With Artificial Neural Networks}, series = {IEEE Access}, volume = {2020}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2964584}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40796}, pages = {15210 -- 15222}, abstract = {Hydrological drought forecasting plays a substantial role in water resources management. Hydrological drought highly affects the water allocation and hydropower generation. In this research, short term hydrological drought forecasted based on the hybridized of novel nature-inspired optimization algorithms and Artificial Neural Networks (ANN). For this purpose, the Standardized Hydrological Drought Index (SHDI) and the Standardized Precipitation Index (SPI) were calculated in one, three, and six aggregated months. Then, three states where proposed for SHDI forecasting, and 36 input-output combinations were extracted based on the cross-correlation analysis. In the next step, newly proposed optimization algorithms, including Grasshopper Optimization Algorithm (GOA), Salp Swarm algorithm (SSA), Biogeography-based optimization (BBO), and Particle Swarm Optimization (PSO) hybridized with the ANN were utilized for SHDI forecasting and the results compared to the conventional ANN. Results indicated that the hybridized model outperformed compared to the conventional ANN. PSO performed better than the other optimization algorithms. The best models forecasted SHDI1 with R2 = 0.68 and RMSE = 0.58, SHDI3 with R 2 = 0.81 and RMSE = 0.45 and SHDI6 with R 2 = 0.82 and RMSE = 0.40.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Oucif, author = {Oucif, Chahmi}, title = {Analytical Modeling of Self-Healing and Super Healing in Cementitious Materials}, doi = {10.25643/bauhaus-universitaet.4229}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200831-42296}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {208}, abstract = {Self-healing materials have recently become more popular due to their capability to autonomously and autogenously repair the damage in cementitious materials. The concept of self-healing gives the damaged material the ability to recover its stiffness. This gives a difference in comparing with a material that is not subjected to healing. Once this material is damaged, it cannot sustain loading due to the stiffness degradation. Numerical modeling of self-healing materials is still in its infancy. Multiple experimental researches were conducted in literature to describe the behavior of self-healing of cementitious materials. However, few numerical investigations were undertaken. The thesis presents an analytical framework of self-healing and super healing materials based on continuum damage-healing mechanics. Through this framework, we aim to describe the recovery and strengthening of material stiffness and strength. A simple damage healing law is proposed and applied on concrete material. The proposed damage-healing law is based on a new time-dependent healing variable. The damage-healing model is applied on isotropic concrete material at the macroscale under tensile load. Both autonomous and autogenous self-healing mechanisms are simulated under different loading conditions. These two mechanisms are denoted in the present work by coupled and uncoupled self-healing mechanisms, respectively. We assume in the coupled self-healing that the healing occurs at the same time with damage evolution, while we assume in the uncoupled self-healing that the healing occurs when the material is deformed and subjected to a rest period (damage is constant). In order to describe both coupled and uncoupled healing mechanisms, a one-dimensional element is subjected to different types of loading history. In the same context, derivation of nonlinear self-healing theory is given, and comparison of linear and nonlinear damage-healing models is carried out using both coupled and uncoupled self-healing mechanisms. The nonlinear healing theory includes generalized nonlinear and quadratic healing models. The healing efficiency is studied by varying the values of the healing rest period and the parameter describing the material characteristics. In addition, theoretical formulation of different self-healing variables is presented for both isotropic and anisotropic maerials. The healing variables are defined based on the recovery in elastic modulus, shear modulus, Poisson's ratio, and bulk modulus. The evolution of the healing variable calculated based on cross-section as function of the healing variable calculated based on elastic stiffness is presented in both hypotheses of elastic strain equivalence and elastic energy equivalence. The components of the fourth-rank healing tensor are also obtained in the case of isotropic elasticity, plane stress and plane strain. Recent research revealed that self-healing presents a crucial solution also for the strengthening of the materials. This new concept has been termed ``Super Healing``. Once the stiffness of the material is recovered, further healing can result as a strengthening material. In the present thesis, new theory of super healing materials is defined in isotropic and anisotropic cases using sound mathematical and mechanical principles which are applied in linear and nonlinear super healing theories. Additionally, the link of the proposed theory with the theory of undamageable materials is outlined. In order to describe the super healing efficiency in linear and nonlinear theories, the ratio of effective stress to nominal stress is calculated as function of the super healing variable. In addition, the hypotheses of elastic strain and elastic energy equivalence are applied. In the same context, new super healing matrix in plane strain is proposed based on continuum damage-healing mechanics. In the present work, we also focus on numerical modeling of impact behavior of reinforced concrete slabs using the commercial finite element package Abaqus/Explicit. Plain and reinforced concrete slabs of unconfined compressive strength 41 MPa are simulated under impact of ogive-nosed hard projectile. The constitutive material modeling of the concrete and steel reinforcement bars is performed using the Johnson-Holmquist-2 damage and the Johnson-Cook plasticity material models, respectively. Damage diameters and residual velocities obtained by the numerical model are compared with the experimental results and effect of steel reinforcement and projectile diameter is studied.}, subject = {Schaden}, language = {en} } @phdthesis{Chan, author = {Chan, Chiu Ling}, title = {Smooth representation of thin shells and volume structures for isogeometric analysis}, doi = {10.25643/bauhaus-universitaet.4208}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200812-42083}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {162}, abstract = {The purpose of this study is to develop self-contained methods for obtaining smooth meshes which are compatible with isogeometric analysis (IGA). The study contains three main parts. We start by developing a better understanding of shapes and splines through the study of an image-related problem. Then we proceed towards obtaining smooth volumetric meshes of the given voxel-based images. Finally, we treat the smoothness issue on the multi-patch domains with C1 coupling. Following are the highlights of each part. First, we present a B-spline convolution method for boundary representation of voxel-based images. We adopt the filtering technique to compute the B-spline coefficients and gradients of the images effectively. We then implement the B-spline convolution for developing a non-rigid images registration method. The proposed method is in some sense of "isoparametric", for which all the computation is done within the B-splines framework. Particularly, updating the images by using B-spline composition promote smooth transformation map between the images. We show the possible medical applications of our method by applying it for registration of brain images. Secondly, we develop a self-contained volumetric parametrization method based on the B-splines boundary representation. We aim to convert a given voxel-based data to a matching C1 representation with hierarchical cubic splines. The concept of the osculating circle is employed to enhance the geometric approximation, where it is done by a single template and linear transformations (scaling, translations, and rotations) without the need for solving an optimization problem. Moreover, we use the Laplacian smoothing and refinement techniques to avoid irregular meshes and to improve mesh quality. We show with several examples that the method is capable of handling complex 2D and 3D configurations. In particular, we parametrize the 3D Stanford bunny which contains irregular shapes and voids. Finally, we propose the B´ezier ordinates approach and splines approach for C1 coupling. In the first approach, the new basis functions are defined in terms of the B´ezier Bernstein polynomials. For the second approach, the new basis is defined as a linear combination of C0 basis functions. The methods are not limited to planar or bilinear mappings. They allow the modeling of solutions to fourth order partial differential equations (PDEs) on complex geometric domains, provided that the given patches are G1 continuous. Both methods have their advantages. In particular, the B´ezier approach offer more degree of freedoms, while the spline approach is more computationally efficient. In addition, we proposed partial degree elevation to overcome the C1-locking issue caused by the over constraining of the solution space. We demonstrate the potential of the resulting C1 basis functions for application in IGA which involve fourth order PDEs such as those appearing in Kirchhoff-Love shell models, Cahn-Hilliard phase field application, and biharmonic problems.}, subject = {Modellierung}, language = {en} } @article{MousaviSteinkeJuniorTeixeiraetal., author = {Mousavi, Seyed Nasrollah and Steinke J{\´u}nior, Renato and Teixeira, Eder Daniel and Bocchiola, Daniele and Nabipour, Narjes and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Predictive Modeling the Free Hydraulic Jumps Pressure through Advanced Statistical Methods}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {Volume 8, Issue 3, 323}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math8030323}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200402-41140}, pages = {16}, abstract = {Pressure fluctuations beneath hydraulic jumps potentially endanger the stability of stilling basins. This paper deals with the mathematical modeling of the results of laboratory-scale experiments to estimate the extreme pressures. Experiments were carried out on a smooth stilling basin underneath free hydraulic jumps downstream of an Ogee spillway. From the probability distribution of measured instantaneous pressures, pressures with different probabilities could be determined. It was verified that maximum pressure fluctuations, and the negative pressures, are located at the positions near the spillway toe. Also, minimum pressure fluctuations are located at the downstream of hydraulic jumps. It was possible to assess the cumulative curves of pressure data related to the characteristic points along the basin, and different Froude numbers. To benchmark the results, the dimensionless forms of statistical parameters include mean pressures (P*m), the standard deviations of pressure fluctuations (σ*X), pressures with different non-exceedance probabilities (P*k\%), and the statistical coefficient of the probability distribution (Nk\%) were assessed. It was found that an existing method can be used to interpret the present data, and pressure distribution in similar conditions, by using a new second-order fractional relationships for σ*X, and Nk\%. The values of the Nk\% coefficient indicated a single mean value for each probability.}, subject = {Maschinelles Lernen}, language = {en} } @article{FathiSajadzadehMohammadiSheshkaletal., author = {Fathi, Sadegh and Sajadzadeh, Hassan and Mohammadi Sheshkal, Faezeh and Aram, Farshid and Pinter, Gergo and Felde, Imre and Mosavi, Amir}, title = {The Role of Urban Morphology Design on Enhancing Physical Activity and Public Health}, series = {International Journal of Environmental Research and Public Health}, volume = {2020}, journal = {International Journal of Environmental Research and Public Health}, number = {Volume 17, Issue 7, 2359}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/ijerph17072359}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200402-41225}, pages = {29}, abstract = {Along with environmental pollution, urban planning has been connected to public health. The research indicates that the quality of built environments plays an important role in reducing mental disorders and overall health. The structure and shape of the city are considered as one of the factors influencing happiness and health in urban communities and the type of the daily activities of citizens. The aim of this study was to promote physical activity in the main structure of the city via urban design in a way that the main form and morphology of the city can encourage citizens to move around and have physical activity within the city. Functional, physical, cultural-social, and perceptual-visual features are regarded as the most important and effective criteria in increasing physical activities in urban spaces, based on literature review. The environmental quality of urban spaces and their role in the physical activities of citizens in urban spaces were assessed by using the questionnaire tool and analytical network process (ANP) of structural equation modeling. Further, the space syntax method was utilized to evaluate the role of the spatial integration of urban spaces on improving physical activities. Based on the results, consideration of functional diversity, spatial flexibility and integration, security, and the aesthetic and visual quality of urban spaces plays an important role in improving the physical health of citizens in urban spaces. Further, more physical activities, including motivation for walking and the sense of public health and happiness, were observed in the streets having higher linkage and space syntax indexes with their surrounding texture.}, subject = {Morphologie}, language = {en} } @phdthesis{Rost, author = {Rost, Grit}, title = {Entwicklung eines Toolboxmodells als Planungswerkzeug f{\"u}r ein transdisziplin{\"a}res Wasserressourcenmanagement am Beispiel der Stadt Darkhan, Mongolei}, publisher = {Rhombus}, address = {Berlin}, isbn = {978-3-941216-94-5}, doi = {10.25643/bauhaus-universitaet.4287}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201113-42874}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {172}, abstract = {Im Rahmen der Dissertation wurde ein Toolboxmodell f{\"u}r transdisziplin{\"a}res Wasserressourcenmanagement entwickelt. Das Modell liefert den methodischen Rahmen Wasserressourcen nachhaltig und transdisziplin{\"a}r zu bewirtschaften. Der Begriff der Nachhaltigkeit und eine Konkretisierung der nachhaltigen Bewirtschaftung globaler Wasserressourcen scheinen un{\"u}berschaubar und suggerieren die Forderung nach einer neuen Weltformel. Die globale Bedeutung der Wasserressourcen, die f{\"u}r Regionen spezifischen Besonderheiten des nat{\"u}rlichen Wasserhaushalts und der anthropogenen Nutzung, die Zeitskala und die Kontextualisierung in alle betroffenen und benachbarten Disziplinen deuten auf die Komplexit{\"a}t der Thematik hin. Es wird eine Systematisierung des Planungsprozesses von Wasserressourcen notwendig, anhand derer eine holistische Herangehensweise mit einer Strategieentwicklung f{\"u}r Regionen spezifischer Schwerpunktprobleme erfolgt. Ziel der Arbeit ist die Erarbeitung einer Strategie zur Systematisierung nach diesen Forderungen und die Bereitstellung eines Toolboxmodelles als Planungswerkzeug f{\"u}r das transdisziplin{\"a}re Wasserressourcenmanagement. Das Toolboxmodell stellt den konzeptionellen Rahmen f{\"u}r die Bewirtschaftung von Wasserressourcen mit der Anwendung transdisziplin{\"a}rer Forschungsmethoden bereit. Wesentliche Herausforderung bei der Anwendung der transdisziplin{\"a}ren Methode sind die Implementierung verschiedener Skalenbereiche, der Umgang mit der Komplexit{\"a}t von Daten, das Bewahren von Transparenz und Objektivit{\"a}t sowie die Erm{\"o}glichung eines auf andere Regionen {\"u}bertragbaren Planungsprozesses. Die theoretischen Grundlagen naturwissenschaftlicher Forschung zur Nachhaltigkeit haben ihren Ursprung in den biologischen und geographischen Disziplinen. Das Ineinandergreifen naturr{\"a}umlicher Zusammenh{\"a}nge und der Einfluss anthropogener Nutzung und technischer Innovationen auf den Naturhaushalt sind Kern der Kausalit{\"a}t {\"u}bergreifenden Denkens und Verstehens. Mit dem Ansatz des integrierten Wasserressourcenmanagements (IWRM) erfolgt die Ber{\"u}cksichtigung wirtschaftlicher und sozio{\"o}konomischer Ziele in den Planungsprozess f{\"u}r {\"o}kologisch nachhaltige Wasserwirtschaft. Das Instrument der Wasserrahmenrichtlinie (EU-WRRL) ist auf eine Gew{\"a}sser{\"o}kologie ausgerichtete Richtlinie, welche die Integration verschiedener Interessenvertreter in den Planungsprozess vorsieht. Das Konzept der neuartigen Sanit{\"a}rsysteme basiert auf Stofffl{\"u}ssen zwischen konkurrierenden Handlungsbereichen, wie Abfall-, Ressourcen- und Landwirtschaft. Den integrierten Ans{\"a}tzen fehlt eine {\"u}bergeordnete gemeinsame Zielstrategie - eine sogenannte Phase Null. Diese Phase Null - das Lernen aller 7 Zusammenfassung 157 relevanten, konkurrierenden und harmonisierenden Handlungsfelder eines Planungshorizontes wird durch eine transdisziplin{\"a}re Perspektive erm{\"o}glicht. W{\"a}hrend bei der integralen Perspektive eine disziplinorientierte Kooperation im Vordergrund steht, verlangt die transdisziplin{\"a}re Perspektive nach einer problemorientierten Kooperation zwischen den Interessenvertretern (Werlen 2015). Die bestehenden Konzepte und Richtlinien f{\"u}r das nachhaltige Management von Wasserressourcen sind etabliert und evaluiert. Der Literatur zur Folge ist eine Weiterentwicklung nach der Perspektive der Transdisziplinarit{\"a}t erforderlich. Das Toolboxmodell f{\"u}r integrales Wasserressourcenmanagement entspricht einem Planungstool bestehend aus Werkzeugen f{\"u}r die Anwendung wissenschaftlicher Methoden. Die Zusammenstellung der Methoden/Werkzeuge erf{\"u}llt im Rahmen die Methode transdisziplin{\"a}rer Forschung. Das Werkzeug zum Aufstellen der relevanten Handlungsfelder umfasst die Charakterisierung eines Untersuchungsgebietes und Planungsrahmens, die kausale Verkn{\"u}pfung des Bewirtschaftungskonzeptes und konkurrierender sowie sich unterst{\"u}tzender Stakeholder. Mit dem Werkzeug der Kontextualisierung und Indikatorenaufstellung wird eine Methode der stufenweisen und von einer Skala unabh{\"a}ngigen Bewertung des Umweltzustandes f{\"u}r die Zielpriorisierung vorgenommen. Damit wird das Toolboxmodell dem Problem der Komplexit{\"a}t und Datenverf{\"u}gbarkeit gerecht. Anhand der eingesetzten ABC Methode, werden die Bewertungsgr{\"o}ßen differenziert strukturiert auf verschiedene Skalen und Datenressourcen (A=Ersterkennung,B=Zeigerwerte, C=Modell/Index). Die ABC-Methode erm{\"o}glicht die Planung bereits mit unsicherer und l{\"u}ckenhafter Datengrundlage, ist jederzeit erweiterbar und bietet somit eine operative Wissensgenerierung w{\"a}hrend des Gestaltungsprozesses. F{\"u}r das Werkzeug zur Bewertung und Priorisierung wird der Algorithmus der Composite Programmierung angewandt. Diese Methode der Mehrfachzielplanung erf{\"u}llt den Anspruch der permanenten Erweiterbarkeit und der transparenten und objektiven Entscheidungsfindung. Die Komplexit{\"a}t des transdisziplin{\"a}ren Wasserressourcenmanagements kann durch die Methode der Composite Programmierung systematisiert werden. Das wesentliche Ergebnis der Arbeit stellt die erfolgreiche Erarbeitung und Anwendung des Tool-boxmodells f{\"u}r das transdisziplin{\"a}re Wasserressourcenmanagement im Untersuchungsgebiet Stadt Darkhan in der Mongolei dar. Auf Grund seiner besonderen hydrologischen und strukturellen Situa-tion wird die Relevanz eines nachhaltigen Bewirtschaftungskonzeptes deutlich. Im Rahmen des Querschnittsmoduls des MoMo-Projektes wurde eine f{\"u}r das Toolboxmodell geeignete Datengrundlage erarbeitet. Planungsrelevante Handlungsfelder wurden im Rahmen eines Workshops mit verschiedenen Interessenvertretern erarbeitet. Im Ergebnis dessen wurde die Systematik eines Zielbaumes mit Hauptzielen und untergeordneten Teilzielen als Grundlage der Priorisierung nach den holistischen Anspruch der transdisziplin{\"a}ren Forschung aufgestellt. F{\"u}r die Messbarkeit, in-wieweit Teilziele erreicht sind oder Handlungsbedarf besteht, wurden Indikatoren erarbeitet. Die Indikatoren-Aufstellung erfolgte exemplarisch f{\"u}r das Handlungsfeld Siedlungswasserwirtschaft in allen Skalen des ABC-Systems. Die im BMBF-MoMo Projekt generierte umfassende Datengrundlage erm{\"o}glichte die Anwendung und Evaluierung des Toolboxmodells mit unterschiedlichem quantitativem und qualitativem Dateninput. Verschiedene Kombination von A (Ersterkennung), B (Zeigerwerte) und C (Modell/Index) als Grundlage der Priorisierung mit der Compostite Programmierung erm{\"o}glichten die Durchf{\"u}hrung und Bewertung des transdisziplin{\"a}ren Planungstools. Die er-mittelten Rangfolgen von Teilzielen mit unterschiedlichen Bewertungsvarianten ergaben {\"a}hnliche Tendenzen. Das ist ein Hinweis daf{\"u}r, dass f{\"u}r die zuk{\"u}nftige Anwendung des Toolboxmodells die operative Wissensgenerierung, d.h. das schrittweise Hinzuf{\"u}gen neu ermittelter, gesicherterer Daten, funktioniert. Eine schwierige Datenverf{\"u}gbarkeit oder eine noch im Prozess befindliche wissenschaftliche Analyse sollen keine Hindernisse f{\"u}r eine schrittweise und erweiterbare Zielpriorisierung und Maßnahmenplanung sein. Trotz der Komplexit{\"a}t des transdisziplin{\"a}ren Ansatzes wird durch die Anwendung des Toolboxmodells eine effiziente und zielorientierte Handlungspriorisierung erm{\"o}glicht. Die Effizienz wird erreicht durch ressourcenschonende und flexible, Ziel fokussierte Datenermittlung. Zeit und Kosten im Planungsprozess k{\"o}nnen eingespart werden. Die erzielte Priorisierung von letztlich Handlungsempfehlungen erfolgt individuell auf die Eigenart des Untersuchungsgebietes angepasst, was hinsichtlich seiner Wirkung als erfolgsversprechend gilt.}, subject = {Wasserreserve}, language = {de} } @phdthesis{Salavati, author = {Salavati, Mohammad}, title = {Multi-Scale Modeling of Mechanical and Electrochemical Properties of 1D and 2D Nanomaterials, Application in Battery Energy Storage Systems}, doi = {10.25643/bauhaus-universitaet.4183}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200623-41830}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {166}, abstract = {Material properties play a critical role in durable products manufacturing. Estimation of the precise characteristics in different scales requires complex and expensive experimental measurements. Potentially, computational methods can provide a platform to determine the fundamental properties before the final experiment. Multi-scale computational modeling leads to the modeling of the various time, and length scales include nano, micro, meso, and macro scales. These scales can be modeled separately or in correlation with coarser scales. Depend on the interested scales modeling, the right selection of multi-scale methods leads to reliable results and affordable computational cost. The present dissertation deals with the problems in various length and time scales using computational methods include density functional theory (DFT), molecular mechanics (MM), molecular dynamics (MD), and finite element (FE) methods. Physical and chemical interactions in lower scales determine the coarser scale properties. Particles interaction modeling and exploring fundamental properties are significant challenges of computational science. Downscale modelings need more computational effort due to a large number of interacted atoms/particles. To deal with this problem and bring up a fine-scale (nano) as a coarse-scale (macro) problem, we extended an atomic-continuum framework. The discrete atomic models solve as a continuum problem using the computationally efficient FE method. MM or force field method based on a set of assumptions approximates a solution on the atomic scale. In this method, atoms and bonds model as a harmonic oscillator with a system of mass and springs. The negative gradient of the potential energy equal to the forces on each atom. In this way, each bond's total potential energy includes bonded, and non-bonded energies are simulated as equivalent structural strain energies. Finally, the chemical nature of the atomic bond is modeled as a piezoelectric beam element that solves by the FE method. Exploring novel materials with unique properties is a demand for various industrial applications. During the last decade, many two-dimensional (2D) materials have been synthesized and shown outstanding properties. Investigation of the probable defects during the formation/fabrication process and studying their strength under severe service life are the critical tasks to explore performance prospects. We studied various defects include nano crack, notch, and point vacancy (Stone-Wales defect) defects employing MD analysis. Classical MD has been used to simulate a considerable amount of molecules at micro-, and meso- scales. Pristine and defective nanosheet structures considered under the uniaxial tensile loading at various temperatures using open-source LAMMPS codes. The results were visualized with the open-source software of OVITO and VMD. Quantum based first principle calculations have been conducting at electronic scales and known as the most accurate Ab initio methods. However, they are computationally expensive to apply for large systems. We used density functional theory (DFT) to estimate the mechanical and electrochemical response of the 2D materials. Many-body Schr{\"o}dinger's equation describes the motion and interactions of the solid-state particles. Solid describes as a system of positive nuclei and negative electrons, all electromagnetically interacting with each other, where the wave function theory describes the quantum state of the set of particles. However, dealing with the 3N coordinates of the electrons, nuclei, and N coordinates of the electrons spin components makes the governing equation unsolvable for just a few interacted atoms. Some assumptions and theories like Born Oppenheimer and Hartree-Fock mean-field and Hohenberg-Kohn theories are needed to treat with this equation. First, Born Oppenheimer approximation reduces it to the only electronic coordinates. Then Kohn and Sham, based on Hartree-Fock and Hohenberg-Kohn theories, assumed an equivalent fictitious non-interacting electrons system as an electron density functional such that their ground state energies are equal to a set of interacting electrons. Exchange-correlation energy functionals are responsible for satisfying the equivalency between both systems. The exact form of the exchange-correlation functional is not known. However, there are widely used methods to derive functionals like local density approximation (LDA), Generalized gradient approximation (GGA), and hybrid functionals (e.g., B3LYP). In our study, DFT performed using VASP codes within the GGA/PBE approximation, and visualization/post-processing of the results realized via open-source software of VESTA. The extensive DFT calculations are conducted 2D nanomaterials prospects as anode/cathode electrode materials for batteries. Metal-ion batteries' performance strongly depends on the design of novel electrode material. Two-dimensional (2D) materials have developed a remarkable interest in using as an electrode in battery cells due to their excellent properties. Desirable battery energy storage systems (BESS) must satisfy the high energy density, safe operation, and efficient production costs. Batteries have been using in electronic devices and provide a solution to the environmental issues and store the discontinuous energies generated from renewable wind or solar power plants. Therefore, exploring optimal electrode materials can improve storage capacity and charging/discharging rates, leading to the design of advanced batteries. Our results in multiple scales highlight not only the proposed and employed methods' efficiencies but also promising prospect of recently synthesized nanomaterials and their applications as an anode material. In this way, first, a novel approach developed for the modeling of the 1D nanotube as a continuum piezoelectric beam element. The results converged and matched closely with those from experiments and other more complex models. Then mechanical properties of nanosheets estimated and the failure mechanisms results provide a useful guide for further use in prospect applications. Our results indicated a comprehensive and useful vision concerning the mechanical properties of nanosheets with/without defects. Finally, mechanical and electrochemical properties of the several 2D nanomaterials are explored for the first time—their application performance as an anode material illustrates high potentials in manufacturing super-stretchable and ultrahigh-capacity battery energy storage systems (BESS). Our results exhibited better performance in comparison to the available commercial anode materials.}, subject = {Batterie}, language = {en} }