@phdthesis{Alkam, author = {Alkam, Feras}, title = {Vibration-based Monitoring of Concrete Catenary Poles using Bayesian Inference}, volume = {2021}, publisher = {Bauhaus-Universit{\"a}tsverlag}, address = {Weimar}, doi = {10.25643/bauhaus-universitaet.4433}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210526-44338}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {177}, abstract = {This work presents a robust status monitoring approach for detecting damage in cantilever structures based on logistic functions. Also, a stochastic damage identification approach based on changes of eigenfrequencies is proposed. The proposed algorithms are verified using catenary poles of electrified railways track. The proposed damage features overcome the limitation of frequency-based damage identification methods available in the literature, which are valid to detect damage in structures to Level 1 only. Changes in eigenfrequencies of cantilever structures are enough to identify possible local damage at Level 3, i.e., to cover damage detection, localization, and quantification. The proposed algorithms identified the damage with relatively small errors, even at a high noise level.}, subject = {Parameteridentifikation}, language = {en} } @phdthesis{Brehm2011, author = {Brehm, Maik}, title = {Vibration-based model updating: Reduction and quantification of uncertainties}, doi = {10.25643/bauhaus-universitaet.1465}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20110926-15553}, school = {Bauhaus-Universit{\"a}t Weimar}, year = {2011}, abstract = {Numerical models and their combination with advanced solution strategies are standard tools for many engineering disciplines to design or redesign structures and to optimize designs with the purpose to improve specific requirements. As the successful application of numerical models depends on their suitability to represent the behavior related to the intended use, they should be validated by experimentally obtained results. If the discrepancy between numerically derived and experimentally obtained results is not acceptable, a model revision or a revision of the experiment need to be considered. Model revision is divided into two classes, the model updating and the basic revision of the numerical model. The presented thesis is related to a special branch of model updating, the vibration-based model updating. Vibration-based model updating is a tool to improve the correlation of the numerical model by adjusting uncertain model input parameters by means of results extracted from vibration tests. Evidently, uncertainties related to the experiment, the numerical model, or the applied numerical solving strategies can influence the correctness of the identified model input parameters. The reduction of uncertainties for two critical problems and the quantification of uncertainties related to the investigation of several nominally identical structures are the main emphases of this thesis. First, the reduction of uncertainties by optimizing reference sensor positions is considered. The presented approach relies on predicted power spectral amplitudes and an initial finite element model as a basis to define the assessment criterion for predefined sensor positions. In combination with geometry-based design variables, which represent the sensor positions, genetic and particle swarm optimization algorithms are applied. The applicability of the proposed approach is demonstrated on a numerical benchmark study of a simply supported beam and a case study of a real test specimen. Furthermore, the theory of determining the predicted power spectral amplitudes is validated with results from vibration tests. Second, the possibility to reduce uncertainties related to an inappropriate assignment for numerically derived and experimentally obtained modes is investigated. In the context of vibration-based model updating, the correct pairing is essential. The most common criterion for indicating corresponding mode shapes is the modal assurance criterion. Unfortunately, this criterion fails in certain cases and is not reliable for automatic approaches. Hence, an alternative criterion, the energy-based modal assurance criterion, is proposed. This criterion combines the mathematical characteristic of orthogonality with the physical properties of the structure by modal strain energies. A numerical example and a case study with experimental data are presented to show the advantages of the proposed energy-based modal assurance criterion in comparison to the traditional modal assurance criterion. Third, the application of optimization strategies combined with information theory based objective functions is analyzed for the purpose of stochastic model updating. This approach serves as an alternative to the common sensitivity-based stochastic model updating strategies. Their success depends strongly on the defined initial model input parameters. In contrast, approaches based on optimization strategies can be more flexible. It can be demonstrated, that the investigated nature inspired optimization strategies in combination with Bhattacharyya distance and Kullback-Leibler divergence are appropriate. The obtained accuracies and the respective computational effort are comparable with sensitivity-based stochastic model updating strategies. The application of model updating procedures to improve the quality and suitability of a numerical model is always related to additional costs. The presented innovative approaches will contribute to reduce and quantify uncertainties within a vibration-based model updating process. Therefore, the increased benefit can compensate the additional effort, which is necessary to apply model updating procedures.}, subject = {Dynamik}, language = {en} } @phdthesis{Nickerson, author = {Nickerson, Seth}, title = {Thermo-Mechanical Behavior of Honeycomb, Porous, Microcracked Ceramics}, doi = {10.25643/bauhaus-universitaet.3975}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190911-39753}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {The underlying goal of this work is to reduce the uncertainty related to thermally induced stress prediction. This is accomplished by considering use of non-linear material behavior, notably path dependent thermal hysteresis behavior in the elastic properties. Primary novel factors of this work center on two aspects. 1. Broad material characterization and mechanistic material understanding, giving insight into why this class of material behaves in characteristic manners. 2. Development and implementation of a thermal hysteresis material model and its use to determine impact on overall macroscopic stress predictions. Results highlight microcracking evolution and behavior as the dominant mechanism for material property complexity in this class of materials. Additionally, it was found that for the cases studied, thermal hysteresis behavior impacts relevant peak stress predictions of a heavy-duty diesel particulate filter undergoing a drop-to-idle regeneration by less than ~15\% for all conditions tested. It is also found that path independent heating curves may be utilized for a linear solution assumption to simplify analysis. This work brings forth a newly conceived concept of a 3 state, 4 path, thermally induced microcrack evolution process; demonstrates experimental behavior that is consistent with the proposed mechanisms, develops a mathematical framework that describes the process and quantifies the impact in a real world application space.}, subject = {Keramik}, language = {en} } @phdthesis{Mai, author = {Mai, Luu}, title = {Structural Control Systems in High-speed Railway Bridges}, issn = {1610-7381}, doi = {10.25643/bauhaus-universitaet.2339}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20141223-23391}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {147}, abstract = {Structural vibration control of high-speed railway bridges using tuned mass dampers, semi-active tuned mass dampers, fluid viscous dampers and magnetorheological dampers to reduce resonant structural vibrations is studied. In this work, the addressed main issues include modeling of the dynamic interaction of the structures, optimization of the parameters of the dampers and comparison of their efficiency. A new approach to optimize multiple tuned mass damper systems on an uncertain model is proposed based on the H-infinity optimization criteria and the DK iteration procedure with norm-bounded uncertainties in frequency domain. The parameters of tuned mass dampers are optimized directly and simultaneously on different modes contributing significantly to the multi-resonant peaks to explore the different possible combinations of parameters. The effectiveness of the present method is also evaluated through comparison with a previous method. In the case of semi-active tuned mass dampers, an optimization algorithm is derived to control the magnetorheological damper in these semi-active damping systems. The use of the proposed algorithm can generate various combinations of control gains and state variables. This can lead to the improvement of the ability of MR dampers to track the desired control forces. An uncertain model to reduce detuning effects is also considered in this work. Next, for fluid viscous dampers, in order to tune the optimal parameters of fluid viscous dampers to the vicinity of the exact values, analytical formulae which can include structural damping are developed based on the perturbation method. The proposed formulae can also be considered as an improvement of the previous analytical formulae, especially for bridge beams with large structural damping. Finally, a new combination of magnetorheological dampers and a double-beam system to improve the performance of the primary structure vibration is proposed. An algorithm to control magnetorheological dampers in this system is developed by using standard linear matrix inequality techniques. Weight functions as a loop shaping procedure are also introduced in the feedback controllers to improve the tracking ability of magnetorheological damping forces. To this end, the effectiveness of magnetorheological dampers controlled by the proposed scheme, along with the effects of the uncertain and time-delay parameters on the models, are evaluated through numerical simulations. Additionally, a comparison of the dampers based on their performance is also considered in this work.}, subject = {High-speed railway bridge}, language = {en} } @phdthesis{KhademiZahedi, author = {Khademi Zahedi, Reza}, title = {Stress Distribution in Buried Defective PE Pipes and Crack Propagation in Nanosheets}, doi = {10.25643/bauhaus-universitaet.4481}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210803-44814}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {235}, abstract = {Buried PE pipelines are the main choice for transporting hazardous hydrocarbon fluids and are used in urban gas distribution networks. Molecular dynamics (MD) simulations used to investigate material behavior at nanoscale.}, subject = {Gasleitung}, language = {en} } @phdthesis{Vu, author = {Vu, Bac Nam}, title = {Stochastic uncertainty quantification for multiscale modeling of polymeric nanocomposites}, doi = {10.25643/bauhaus-universitaet.2555}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160322-25551}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {265}, abstract = {Nanostructured materials are extensively applied in many fields of material science for new industrial applications, particularly in the automotive, aerospace industry due to their exceptional physical and mechanical properties. Experimental testing of nanomaterials is expensive, timeconsuming,challenging and sometimes unfeasible. Therefore,computational simulations have been employed as alternative method to predict macroscopic material properties. The behavior of polymeric nanocomposites (PNCs) are highly complex. The origins of macroscopic material properties reside in the properties and interactions taking place on finer scales. It is therefore essential to use multiscale modeling strategy to properly account for all large length and time scales associated with these material systems, which across many orders of magnitude. Numerous multiscale models of PNCs have been established, however, most of them connect only two scales. There are a few multiscale models for PNCs bridging four length scales (nano-, micro-, meso- and macro-scales). In addition, nanomaterials are stochastic in nature and the prediction of macroscopic mechanical properties are influenced by many factors such as fine-scale features. The predicted mechanical properties obtained by traditional approaches significantly deviate from the measured values in experiments due to neglecting uncertainty of material features. This discrepancy is indicated that the effective macroscopic properties of materials are highly sensitive to various sources of uncertainty, such as loading and boundary conditions and material characteristics, etc., while very few stochastic multiscale models for PNCs have been developed. Therefore, it is essential to construct PNC models within the framework of stochastic modeling and quantify the stochastic effect of the input parameters on the macroscopic mechanical properties of those materials. This study aims to develop computational models at four length scales (nano-, micro-, meso- and macro-scales) and hierarchical upscaling approaches bridging length scales from nano- to macro-scales. A framework for uncertainty quantification (UQ) applied to predict the mechanical properties of the PNCs in dependence of material features at different scales is studied. Sensitivity and uncertainty analysis are of great helps in quantifying the effect of input parameters, considering both main and interaction effects, on the mechanical properties of the PNCs. To achieve this major goal, the following tasks are carried out: At nano-scale, molecular dynamics (MD) were used to investigate deformation mechanism of glassy amorphous polyethylene (PE) in dependence of temperature and strain rate. Steered molecular dynamics (SMD)were also employed to investigate interfacial characteristic of the PNCs. At mico-scale, we developed an atomistic-based continuum model represented by a representative volume element (RVE) in which the SWNT's properties and the SWNT/polymer interphase are modeled at nano-scale, the surrounding polymer matrix is modeled by solid elements. Then, a two-parameter model was employed at meso-scale. A hierarchical multiscale approach has been developed to obtain the structure-property relations at one length scale and transfer the effect to the higher length scales. In particular, we homogenized the RVE into an equivalent fiber. The equivalent fiber was then employed in a micromechanical analysis (i.e. Mori-Tanaka model) to predict the effective macroscopic properties of the PNC. Furthermore, an averaging homogenization process was also used to obtain the effective stiffness of the PCN at meso-scale. Stochastic modeling and uncertainty quantification consist of the following ingredients: - Simple random sampling, Latin hypercube sampling, Sobol' quasirandom sequences, Iman and Conover's method (inducing correlation in Latin hypercube sampling) are employed to generate independent and dependent sample data, respectively. - Surrogate models, such as polynomial regression, moving least squares (MLS), hybrid method combining polynomial regression and MLS, Kriging regression, and penalized spline regression, are employed as an approximation of a mechanical model. The advantage of the surrogate models is the high computational efficiency and robust as they can be constructed from a limited amount of available data. - Global sensitivity analysis (SA) methods, such as variance-based methods for models with independent and dependent input parameters, Fourier-based techniques for performing variance-based methods and partial derivatives, elementary effects in the context of local SA, are used to quantify the effects of input parameters and their interactions on the mechanical properties of the PNCs. A bootstrap technique is used to assess the robustness of the global SA methods with respect to their performance. In addition, the probability distribution of mechanical properties are determined by using the probability plot method. The upper and lower bounds of the predicted Young's modulus according to 95 \% prediction intervals were provided. The above-mentioned methods study on the behaviour of intact materials. Novel numerical methods such as a node-based smoothed extended finite element method (NS-XFEM) and an edge-based smoothed phantom node method (ES-Phantom node) were developed for fracture problems. These methods can be used to account for crack at macro-scale for future works. The predicted mechanical properties were validated and verified. They show good agreement with previous experimental and simulations results.}, subject = {Polymere}, language = {en} } @phdthesis{Ghasemi, author = {Ghasemi, Hamid}, title = {Stochastic optimization of fiber reinforced composites considering uncertainties}, doi = {10.25643/bauhaus-universitaet.2704}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20161117-27042}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {140}, abstract = {Briefly, the two basic questions that this research is supposed to answer are: 1. Howmuch fiber is needed and how fibers should be distributed through a fiber reinforced composite (FRC) structure in order to obtain the optimal and reliable structural response? 2. How do uncertainties influence the optimization results and reliability of the structure? Giving answer to the above questions a double stage sequential optimization algorithm for finding the optimal content of short fiber reinforcements and their distribution in the composite structure, considering uncertain design parameters, is presented. In the first stage, the optimal amount of short fibers in a FRC structure with uniformly distributed fibers is conducted in the framework of a Reliability Based Design Optimization (RBDO) problem. Presented model considers material, structural and modeling uncertainties. In the second stage, the fiber distribution optimization (with the aim to further increase in structural reliability) is performed by defining a fiber distribution function through a Non-Uniform Rational BSpline (NURBS) surface. The advantages of using the NURBS surface as a fiber distribution function include: using the same data set for the optimization and analysis; high convergence rate due to the smoothness of the NURBS; mesh independency of the optimal layout; no need for any post processing technique and its non-heuristic nature. The output of stage 1 (the optimal fiber content for homogeneously distributed fibers) is considered as the input of stage 2. The output of stage 2 is the Reliability Index (b ) of the structure with the optimal fiber content and distribution. First order reliability method (in order to approximate the limit state function) as well as different material models including Rule of Mixtures, Mori-Tanaka, energy-based approach and stochastic multi-scales are implemented in different examples. The proposed combined model is able to capture the role of available uncertainties in FRC structures through a computationally efficient algorithm using all sequential, NURBS and sensitivity based techniques. The methodology is successfully implemented for interfacial shear stress optimization in sandwich beams and also for optimization of the internal cooling channels in a ceramic matrix composite. Finally, after some changes and modifications by combining Isogeometric Analysis, level set and point wise density mapping techniques, the computational framework is extended for topology optimization of piezoelectric / flexoelectric materials.}, subject = {Finite-Elemente-Methode}, language = {en} } @phdthesis{Liu, author = {Liu, Bokai}, title = {Stochastic multiscale modeling of polymeric nanocomposites using Data-driven techniques}, doi = {10.25643/bauhaus-universitaet.4637}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220503-46379}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {134}, abstract = {In recent years, lightweight materials, such as polymer composite materials (PNCs) have been studied and developed due to their excellent physical and chemical properties. Structures composed of these composite materials are widely used in aerospace engineering structures, automotive components, and electrical devices. The excellent and outstanding mechanical, thermal, and electrical properties of Carbon nanotube (CNT) make it an ideal filler to strengthen polymer materials' comparable properties. The heat transfer of composite materials has very promising engineering applications in many fields, especially in electronic devices and energy storage equipment. It is essential in high-energy density systems since electronic components need heat dissipation functionality. Or in other words, in electronic devices the generated heat should ideally be dissipated by light and small heat sinks. Polymeric composites consist of fillers embedded in a polymer matrix, the first ones will significantly affect the overall (macroscopic) performance of the material. There are many common carbon-based fillers such as single-walled carbon nanotubes (SWCNT), multi-walled carbon nanotubes (MWCNT), carbon nanobuds (CNB), fullerene, and graphene. Additives inside the matrix have become a popular subject for researchers. Some extraordinary characters, such as high-performance load, lightweight design, excellent chemical resistance, easy processing, and heat transfer, make the design of polymeric nanotube composites (PNCs) flexible. Due to the reinforcing effects with different fillers on composite materials, it has a higher degree of freedom and can be designed for the structure according to specific applications' needs. As already stated, our research focus will be on SWCNT enhanced PNCs. Since experiments are timeconsuming, sometimes expensive and cannot shed light into phenomena taking place for instance at the interfaces/interphases of composites, they are often complemented through theoretical and computational analysis. While most studies are based on deterministic approaches, there is a comparatively lower number of stochastic methods accounting for uncertainties in the input parameters. In deterministic models, the output of the model is fully determined by the parameter values and the initial conditions. However, uncertainties in the input parameters such as aspect ratio, volume fraction, thermal properties of fiber and matrix need to be taken into account for reliable predictions. In this research, a stochastic multiscale method is provided to study the influence of numerous uncertain input parameters on the thermal conductivity of the composite. Therefore, a hierarchical multi-scale method based on computational homogenization is presented in to predict the macroscopic thermal conductivity based on the fine-scale structure. In order to study the inner mechanism, we use the finite element method and employ surrogate models to conduct a Global Sensitivity Analysis (GSA). The SA is performed in order to quantify the influence of the conductivity of the fiber, matrix, Kapitza resistance, volume fraction and aspect ratio on the macroscopic conductivity. Therefore, we compute first-order and total-effect sensitivity indices with different surrogate models. As stochastic multiscale models are computational expensive, surrogate approaches are commonly exploited. With the emergence of high performance computing and artificial intelligence, machine learning has become a popular modeling tool for numerous applications. Machine learning (ML) is commonly used in regression and maps data through specific rules with algorithms to build input and output models. They are particularly useful for nonlinear input-output relationships when sufficient data is available. ML has also been used in the design of new materials and multiscale analysis. For instance, Artificial neural networks and integrated learning seem to be ideally for such a task. They can theoretically simulate any non-linear relationship through the connection of neurons. Mapping relationships are employed to carry out data-driven simulations of inputs and outputs in stochastic modeling. This research aims to develop a stochastic multi-scale computational models of PNCs in heat transfer. Multi-scale stochastic modeling with uncertainty analysis and machine learning methods consist of the following components: -Uncertainty Analysis. A surrogate based global sensitivity analysis is coupled with a hierarchical multi-scale method employing computational homogenization. The effect of the conductivity of the fibers and the matrix, the Kapitza resistance, volume fraction and aspect ratio on the 'macroscopic' conductivity of the composite is systematically studied. All selected surrogate models yield consistently the conclusions that the most influential input parameters are the aspect ratio followed by the volume fraction. The Kapitza Resistance has no significant effect on the thermal conductivity of the PNCs. The most accurate surrogate model in terms of the R2 value is the moving least square (MLS). -Hybrid Machine Learning Algorithms. A combination of artificial neural network (ANN) and particle swarm optimization (PSO) is applied to estimate the relationship between variable input and output parameters. The ANN is used for modeling the composite while PSO improves the prediction performance through an optimized global minimum search. The thermal conductivity of the fibers and the matrix, the kapitza resistance, volume fraction and aspect ratio are selected as input parameters. The output is the macroscopic (homogenized) thermal conductivity of the composite. The results show that the PSO significantly improves the predictive ability of this hybrid intelligent algorithm, which outperforms traditional neural networks. -Stochastic Integrated Machine Learning. A stochastic integrated machine learning based multiscale approach for the prediction of the macroscopic thermal conductivity in PNCs is developed. Seven types of machine learning models are exploited in this research, namely Multivariate Adaptive Regression Splines (MARS), Support Vector Machine (SVM), Regression Tree (RT), Bagging Tree (Bag), Random Forest (RF), Gradient Boosting Machine (GBM) and Cubist. They are used as components of stochastic modeling to construct the relationship between the variable of the inputs' uncertainty and the macroscopic thermal conductivity of PNCs. Particle Swarm Optimization (PSO) is used for hyper-parameter tuning to find the global optimal values leading to a significant reduction in the computational cost. The advantages and disadvantages of various methods are also analyzed in terms of computing time and model complexity to finally give a recommendation for the applicability of different models.}, subject = {Polymere}, language = {en} } @phdthesis{Chan, author = {Chan, Chiu Ling}, title = {Smooth representation of thin shells and volume structures for isogeometric analysis}, doi = {10.25643/bauhaus-universitaet.4208}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200812-42083}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {162}, abstract = {The purpose of this study is to develop self-contained methods for obtaining smooth meshes which are compatible with isogeometric analysis (IGA). The study contains three main parts. We start by developing a better understanding of shapes and splines through the study of an image-related problem. Then we proceed towards obtaining smooth volumetric meshes of the given voxel-based images. Finally, we treat the smoothness issue on the multi-patch domains with C1 coupling. Following are the highlights of each part. First, we present a B-spline convolution method for boundary representation of voxel-based images. We adopt the filtering technique to compute the B-spline coefficients and gradients of the images effectively. We then implement the B-spline convolution for developing a non-rigid images registration method. The proposed method is in some sense of "isoparametric", for which all the computation is done within the B-splines framework. Particularly, updating the images by using B-spline composition promote smooth transformation map between the images. We show the possible medical applications of our method by applying it for registration of brain images. Secondly, we develop a self-contained volumetric parametrization method based on the B-splines boundary representation. We aim to convert a given voxel-based data to a matching C1 representation with hierarchical cubic splines. The concept of the osculating circle is employed to enhance the geometric approximation, where it is done by a single template and linear transformations (scaling, translations, and rotations) without the need for solving an optimization problem. Moreover, we use the Laplacian smoothing and refinement techniques to avoid irregular meshes and to improve mesh quality. We show with several examples that the method is capable of handling complex 2D and 3D configurations. In particular, we parametrize the 3D Stanford bunny which contains irregular shapes and voids. Finally, we propose the B´ezier ordinates approach and splines approach for C1 coupling. In the first approach, the new basis functions are defined in terms of the B´ezier Bernstein polynomials. For the second approach, the new basis is defined as a linear combination of C0 basis functions. The methods are not limited to planar or bilinear mappings. They allow the modeling of solutions to fourth order partial differential equations (PDEs) on complex geometric domains, provided that the given patches are G1 continuous. Both methods have their advantages. In particular, the B´ezier approach offer more degree of freedoms, while the spline approach is more computationally efficient. In addition, we proposed partial degree elevation to overcome the C1-locking issue caused by the over constraining of the solution space. We demonstrate the potential of the resulting C1 basis functions for application in IGA which involve fourth order PDEs such as those appearing in Kirchhoff-Love shell models, Cahn-Hilliard phase field application, and biharmonic problems.}, subject = {Modellierung}, language = {en} } @phdthesis{Tan, author = {Tan, Fengjie}, title = {Shape Optimization Design of Arch Type Dams under Uncertainties}, doi = {10.25643/bauhaus-universitaet.3960}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190819-39608}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Due to an increased need for hydro-electricity, water storage, and flood protection, it is assumed that a series of new dams will be built throughout the world. Comparing existing design methodologies for arch-type dams, model-based shape optimization can effectively reduce construction costs and leverage the properties of construction materials. To apply the means of shape optimization, suitable variables need to be chosen to formulate the objective function, which is the volume of the arch dam here. In order to increase the consistency with practical conditions, a great number of geometrical and behavioral constraints are included in the mathematical model. An optimization method, namely Genetic Algorithm is adopted which allows a global search. Traditional optimization techniques are realized based on a deterministic approach, which means that the material properties and loading conditions are assumed to be fixed values. As a result, the real-world structures that are optimized by these approaches suffer from uncertainties that one needs to be aware of. Hence, in any optimization process for arch dams, it is nec- essary to find a methodology that is capable of considering the influences of uncertainties and generating a solution which is robust enough against the uncertainties. The focus of this thesis is the formulation and the numerical method for the optimization of the arch dam under the uncertainties. The two main models, the probabilistic model, and non-probabilistic models are intro- duced and discussed. Classic procedures of probabilistic approaches un- der uncertainties, such as RDO (robust design optimization) and RBDO (reliability-based design optimization), are in general computationally ex- pensive and rely on estimates of the system's response variance and fail- ure probabilities. Instead, the robust optimization (RO) method which is based on the non-probabilistic model, will not follow a full probabilistic approach but works with pre-defined confidence levels. This leads to a bi-level optimization program where the volume of the dam is optimized under the worst combination of the uncertain parameters. By this, robust and reliable designs are obtained and the result is independent of any as- sumptions on stochastic properties of the random variables in the model. The optimization of an arch-type dam is realized here by a robust optimiza- tion method under load uncertainty, where hydraulic and thermal loads are considered. The load uncertainty is modeled as an ellipsoidal expression. Comparing with any traditional deterministic optimization (DO) method, which only concerns the minimum objective value and offers a solution candidate close to limit-states, the RO method provides a robust solution against uncertainties. All the above mentioned methods are applied to the optimization of the arch dam to compare with the optimal design with DO methods. The re- sults are compared and analyzed to discuss the advantages and drawbacks of each method. In order to reduce the computational cost, a ranking strategy and an ap- proximation model are further involved to do a preliminary screening. By means of these, the robust design can generate an improved arch dam structure which ensures both safety and serviceability during its lifetime.}, subject = {Wasserbau}, language = {en} }