@phdthesis{Yousefi, author = {Yousefi, Hassan}, title = {Discontinuous propagating fronts: linear and nonlinear systems}, doi = {10.25643/bauhaus-universitaet.4717}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220922-47178}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {356}, abstract = {The aim of this study is controlling of spurious oscillations developing around discontinuous solutions of both linear and non-linear wave equations or hyperbolic partial differential equations (PDEs). The equations include both first-order and second-order (wave) hyperbolic systems. In these systems even smooth initial conditions, or smoothly varying source (load) terms could lead to discontinuous propagating solutions (fronts). For the first order hyperbolic PDEs, the concept of central high resolution schemes is integrated with the multiresolution-based adaptation to capture properly both discontinuous propagating fronts and effects of fine-scale responses on those of larger scales in the multiscale manner. This integration leads to using central high resolution schemes on non-uniform grids; however, such simulation is unstable, as the central schemes are originally developed to work properly on uniform cells/grids. Hence, the main concern is stable collaboration of central schemes and multiresoltion-based cell adapters. Regarding central schemes, the considered approaches are: 1) Second order central and central-upwind schemes; 2) Third order central schemes; 3) Third and fourth order central weighted non-oscillatory schemes (central-WENO or CWENO); 4) Piece-wise parabolic methods (PPMs) obtained with two different local stencils. For these methods, corresponding (nonlinear) stability conditions are studied and modified, as well. Based on these stability conditions several limiters are modified/developed as follows: 1) Several second-order limiters with total variation diminishing (TVD) feature, 2) Second-order uniformly high order accurate non-oscillatory (UNO) limiters, 3) Two third-order nonlinear scaling limiters, 4) Two new limiters for PPMs. Numerical results show that adaptive solvers lead to cost-effective computations (e.g., in some 1-D problems, number of adapted grid points are less than 200 points during simulations, while in the uniform-grid case, to have the same accuracy, using of 2049 points is essential). Also, in some cases, it is confirmed that fine scale responses have considerable effects on higher scales. In numerical simulation of nonlinear first order hyperbolic systems, the two main concerns are: convergence and uniqueness. The former is important due to developing of the spurious oscillations, the numerical dispersion and the numerical dissipation. Convergence in a numerical solution does not guarantee that it is the physical/real one (the uniqueness feature). Indeed, a nonlinear systems can converge to several numerical results (which mathematically all of them are true). In this work, the convergence and uniqueness are directly studied on non-uniform grids/cells by the concepts of local numerical truncation error and numerical entropy production, respectively. Also, both of these concepts have been used for cell/grid adaptations. So, the performance of these concepts is also compared by the multiresolution-based method. Several 1-D and 2-D numerical examples are examined to confirm the efficiency of the adaptive solver. Examples involve problems with convex and non-convex fluxes. In the latter case, due to developing of complex waves, proper capturing of real answers needs more attention. For this purpose, using of method-adaptation seems to be essential (in parallel to the cell/grid adaptation). This new type of adaptation is also performed in the framework of the multiresolution analysis. Regarding second order hyperbolic PDEs (mechanical waves), the regularization concept is used to cure artificial (numerical) oscillation effects, especially for high-gradient or discontinuous solutions. There, oscillations are removed by the regularization concept acting as a post-processor. Simulations will be performed directly on the second-order form of wave equations. It should be mentioned that it is possible to rewrite second order wave equations as a system of first-order waves, and then simulated the new system by high resolution schemes. However, this approach ends to increasing of variable numbers (especially for 3D problems). The numerical discretization is performed by the compact finite difference (FD) formulation with desire feature; e.g., methods with spectral-like or optimized-error properties. These FD methods are developed to handle high frequency waves (such as waves near earthquake sources). The performance of several regularization approaches is studied (both theoretically and numerically); at last, a proper regularization approach controlling the Gibbs phenomenon is recommended. At the end, some numerical results are provided to confirm efficiency of numerical solvers enhanced by the regularization concept. In this part, shock-like responses due to local and abrupt changing of physical properties, and also stress wave propagation in stochastic-like domains are studied.}, subject = {Partielle Differentialgleichung}, language = {en} } @phdthesis{Stein, author = {Stein, Peter}, title = {Procedurally generated models for Isogeometric Analysis}, publisher = {Universit{\"a}tsverlag}, address = {Weimar}, isbn = {978-3-86068-488-7}, doi = {10.25643/bauhaus-universitaet.1848}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20130212-18483}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {82}, abstract = {Increasingly powerful hard- and software allows for the numerical simulation of complex physical phenomena with high levels of detail. In light of this development the definition of numerical models for the Finite Element Method (FEM) has become the bottleneck in the simulation process. Characteristic features of the model generation are large manual efforts and a de-coupling of geometric and numerical model. In the highly probable case of design revisions all steps of model preprocessing and mesh generation have to be repeated. This includes the idealization and approximation of a geometric model as well as the definition of boundary conditions and model parameters. Design variants leading to more resource-efficient structures might hence be disregarded due to limited budgets and constrained time frames. A potential solution to above problem is given with the concept of Isogeometric Analysis (IGA). Core idea of this method is to directly employ a geometric model for numerical simulations, which allows to circumvent model transformations and the accompanying data losses. Basis for this method are geometric models described in terms of Non-uniform rational B-Splines (NURBS). This class of piecewise continuous rational polynomial functions is ubiquitous in computer graphics and Computer-Aided Design (CAD). It allows the description of a wide range of geometries using a compact mathematical representation. The shape of an object thereby results from the interpolation of a set of control points by means of the NURBS functions, allowing efficient representations for curves, surfaces and solid bodies alike. Existing software applications, however, only support the modeling and manipulation of the former two. The description of three-dimensional solid bodies consequently requires significant manual effort, thus essentially forbidding the setup of complex models. This thesis proposes a procedural approach for the generation of volumetric NURBS models. That is, a model is not described in terms of its data structures but as a sequence of modeling operations applied to a simple initial shape. In a sense this describes the "evolution" of the geometric model under the sequence of operations. In order to adapt this concept to NURBS geometries, only a compact set of commands is necessary which, in turn, can be adapted from existing algorithms. A model then can be treated in terms of interpretable model parameters. This leads to an abstraction from its data structures and model variants can be set up by variation of the governing parameters. The proposed concept complements existing template modeling approaches: templates can not only be defined in terms of modeling commands but can also serve as input geometry for said operations. Such templates, arranged in a nested hierarchy, provide an elegant model representation. They offer adaptivity on each tier of the model hierarchy and allow to create complex models from only few model parameters. This is demonstrated for volumetric fluid domains used in the simulation of vertical-axis wind turbines. Starting from a template representation of airfoil cross-sections, the complete "negative space" around the rotor blades can be described by a small set of model parameters, and model variants can be set up in a fraction of a second. NURBS models offer a high geometric flexibility, allowing to represent a given shape in different ways. Different model instances can exhibit varying suitability for numerical analyses. For their assessment, Finite Element mesh quality metrics are regarded. The considered metrics are based on purely geometric criteria and allow to identify model degenerations commonly used to achieve certain geometric features. They can be used to decide upon model adaptions and provide a measure for their efficacy. Unfortunately, they do not reveal a relation between mesh distortion and ill-conditioning of the equation systems resulting from the numerical model.}, subject = {NURBS}, language = {en} } @phdthesis{Schrader, author = {Schrader, Kai}, title = {Hybrid 3D simulation methods for the damage analysis of multiphase composites}, doi = {10.25643/bauhaus-universitaet.2059}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20131021-20595}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {174}, abstract = {Modern digital material approaches for the visualization and simulation of heterogeneous materials allow to investigate the behavior of complex multiphase materials with their physical nonlinear material response at various scales. However, these computational techniques require extensive hardware resources with respect to computing power and main memory to solve numerically large-scale discretized models in 3D. Due to a very high number of degrees of freedom, which may rapidly be increased to the two-digit million range, the limited hardware ressources are to be utilized in a most efficient way to enable an execution of the numerical algorithms in minimal computation time. Hence, in the field of computational mechanics, various methods and algorithms can lead to an optimized runtime behavior of nonlinear simulation models, where several approaches are proposed and investigated in this thesis. Today, the numerical simulation of damage effects in heterogeneous materials is performed by the adaption of multiscale methods. A consistent modeling in the three-dimensional space with an appropriate discretization resolution on each scale (based on a hierarchical or concurrent multiscale model), however, still contains computational challenges in respect to the convergence behavior, the scale transition or the solver performance of the weak coupled problems. The computational efficiency and the distribution among available hardware resources (often based on a parallel hardware architecture) can significantly be improved. In the past years, high-performance computing (HPC) and graphics processing unit (GPU) based computation techniques were established for the investigationof scientific objectives. Their application results in the modification of existing and the development of new computational methods for the numerical implementation, which enables to take advantage of massively clustered computer hardware resources. In the field of numerical simulation in material science, e.g. within the investigation of damage effects in multiphase composites, the suitability of such models is often restricted by the number of degrees of freedom (d.o.f.s) in the three-dimensional spatial discretization. This proves to be difficult for the type of implementation method used for the nonlinear simulation procedure and, simultaneously has a great influence on memory demand and computational time. In this thesis, a hybrid discretization technique has been developed for the three-dimensional discretization of a three-phase material, which is respecting the numerical efficiency of nonlinear (damage) simulations of these materials. The increase of the computational efficiency is enabled by the improved scalability of the numerical algorithms. Consequently, substructuring methods for partitioning the hybrid mesh were implemented, tested and adapted to the HPC computing framework using several hundred CPU (central processing units) nodes for building the finite element assembly. A memory-efficient iterative and parallelized equation solver combined with a special preconditioning technique for solving the underlying equation system was modified and adapted to enable combined CPU and GPU based computations. Hence, it is recommended by the author to apply the substructuring method for hybrid meshes, which respects different material phases and their mechanical behavior and which enables to split the structure in elastic and inelastic parts. However, the consideration of the nonlinear material behavior, specified for the corresponding phase, is limited to the inelastic domains only, and by that causes a decreased computing time for the nonlinear procedure. Due to the high numerical effort for such simulations, an alternative approach for the nonlinear finite element analysis, based on the sequential linear analysis, was implemented in respect to scalable HPC. The incremental-iterative procedure in finite element analysis (FEA) during the nonlinear step was then replaced by a sequence of linear FE analysis when damage in critical regions occured, known in literature as saw-tooth approach. As a result, qualitative (smeared) crack initiation in 3D multiphase specimens has efficiently been simulated.}, subject = {high-performance computing}, language = {en} } @phdthesis{Salavati, author = {Salavati, Mohammad}, title = {Multi-Scale Modeling of Mechanical and Electrochemical Properties of 1D and 2D Nanomaterials, Application in Battery Energy Storage Systems}, doi = {10.25643/bauhaus-universitaet.4183}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200623-41830}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {166}, abstract = {Material properties play a critical role in durable products manufacturing. Estimation of the precise characteristics in different scales requires complex and expensive experimental measurements. Potentially, computational methods can provide a platform to determine the fundamental properties before the final experiment. Multi-scale computational modeling leads to the modeling of the various time, and length scales include nano, micro, meso, and macro scales. These scales can be modeled separately or in correlation with coarser scales. Depend on the interested scales modeling, the right selection of multi-scale methods leads to reliable results and affordable computational cost. The present dissertation deals with the problems in various length and time scales using computational methods include density functional theory (DFT), molecular mechanics (MM), molecular dynamics (MD), and finite element (FE) methods. Physical and chemical interactions in lower scales determine the coarser scale properties. Particles interaction modeling and exploring fundamental properties are significant challenges of computational science. Downscale modelings need more computational effort due to a large number of interacted atoms/particles. To deal with this problem and bring up a fine-scale (nano) as a coarse-scale (macro) problem, we extended an atomic-continuum framework. The discrete atomic models solve as a continuum problem using the computationally efficient FE method. MM or force field method based on a set of assumptions approximates a solution on the atomic scale. In this method, atoms and bonds model as a harmonic oscillator with a system of mass and springs. The negative gradient of the potential energy equal to the forces on each atom. In this way, each bond's total potential energy includes bonded, and non-bonded energies are simulated as equivalent structural strain energies. Finally, the chemical nature of the atomic bond is modeled as a piezoelectric beam element that solves by the FE method. Exploring novel materials with unique properties is a demand for various industrial applications. During the last decade, many two-dimensional (2D) materials have been synthesized and shown outstanding properties. Investigation of the probable defects during the formation/fabrication process and studying their strength under severe service life are the critical tasks to explore performance prospects. We studied various defects include nano crack, notch, and point vacancy (Stone-Wales defect) defects employing MD analysis. Classical MD has been used to simulate a considerable amount of molecules at micro-, and meso- scales. Pristine and defective nanosheet structures considered under the uniaxial tensile loading at various temperatures using open-source LAMMPS codes. The results were visualized with the open-source software of OVITO and VMD. Quantum based first principle calculations have been conducting at electronic scales and known as the most accurate Ab initio methods. However, they are computationally expensive to apply for large systems. We used density functional theory (DFT) to estimate the mechanical and electrochemical response of the 2D materials. Many-body Schr{\"o}dinger's equation describes the motion and interactions of the solid-state particles. Solid describes as a system of positive nuclei and negative electrons, all electromagnetically interacting with each other, where the wave function theory describes the quantum state of the set of particles. However, dealing with the 3N coordinates of the electrons, nuclei, and N coordinates of the electrons spin components makes the governing equation unsolvable for just a few interacted atoms. Some assumptions and theories like Born Oppenheimer and Hartree-Fock mean-field and Hohenberg-Kohn theories are needed to treat with this equation. First, Born Oppenheimer approximation reduces it to the only electronic coordinates. Then Kohn and Sham, based on Hartree-Fock and Hohenberg-Kohn theories, assumed an equivalent fictitious non-interacting electrons system as an electron density functional such that their ground state energies are equal to a set of interacting electrons. Exchange-correlation energy functionals are responsible for satisfying the equivalency between both systems. The exact form of the exchange-correlation functional is not known. However, there are widely used methods to derive functionals like local density approximation (LDA), Generalized gradient approximation (GGA), and hybrid functionals (e.g., B3LYP). In our study, DFT performed using VASP codes within the GGA/PBE approximation, and visualization/post-processing of the results realized via open-source software of VESTA. The extensive DFT calculations are conducted 2D nanomaterials prospects as anode/cathode electrode materials for batteries. Metal-ion batteries' performance strongly depends on the design of novel electrode material. Two-dimensional (2D) materials have developed a remarkable interest in using as an electrode in battery cells due to their excellent properties. Desirable battery energy storage systems (BESS) must satisfy the high energy density, safe operation, and efficient production costs. Batteries have been using in electronic devices and provide a solution to the environmental issues and store the discontinuous energies generated from renewable wind or solar power plants. Therefore, exploring optimal electrode materials can improve storage capacity and charging/discharging rates, leading to the design of advanced batteries. Our results in multiple scales highlight not only the proposed and employed methods' efficiencies but also promising prospect of recently synthesized nanomaterials and their applications as an anode material. In this way, first, a novel approach developed for the modeling of the 1D nanotube as a continuum piezoelectric beam element. The results converged and matched closely with those from experiments and other more complex models. Then mechanical properties of nanosheets estimated and the failure mechanisms results provide a useful guide for further use in prospect applications. Our results indicated a comprehensive and useful vision concerning the mechanical properties of nanosheets with/without defects. Finally, mechanical and electrochemical properties of the several 2D nanomaterials are explored for the first time—their application performance as an anode material illustrates high potentials in manufacturing super-stretchable and ultrahigh-capacity battery energy storage systems (BESS). Our results exhibited better performance in comparison to the available commercial anode materials.}, subject = {Batterie}, language = {en} } @phdthesis{Nikulla, author = {Nikulla, Susanne}, title = {Quality assessment of kinematical models by means of global and goal-oriented error estimation techniques}, publisher = {Verlag der Bauhaus-Universit{\"a}t Weimar}, address = {Weimar}, doi = {10.25643/bauhaus-universitaet.1616}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20120419-16161}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {117}, abstract = {Methods for model quality assessment are aiming to find the most appropriate model with respect to accuracy and computational effort for a structural system under investigation. Model error estimation techniques can be applied for this purpose when kinematical models are investigated. They are counted among the class of white box models, which means that the model hierarchy and therewith the best model is known. This thesis gives an overview of discretisation error estimators. Deduced from these, methods for model error estimation are presented. Their general goal is to make a prediction of the inaccuracies that are introduced using the simpler model without knowing the solution of a more complex model. This information can be used to steer an adaptive process. Techniques for linear and non-linear problems as well as global and goal-oriented errors are introduced. The estimation of the error in local quantities is realised by solving a dual problem, which serves as a weight for the primal error. So far, such techniques have mainly been applied in material modelling and for dimensional adaptivity. Within the scope of this thesis, available model error estimators are adapted for an application to kinematical models. Their applicability is tested regarding the question of whether a geometrical non-linear calculation is necessary or not. The analysis is limited to non-linear estimators due to the structure of the underlying differential equations. These methods often involve simplification, e.g linearisations. It is investigated to which extent such assumptions lead to meaningful results, when applied to kinematical models.}, subject = {Model quality, Model error estimation, Kinematical model, Geometric non-linearity, Finite Element method}, language = {en} } @phdthesis{NguyenThanh, author = {Nguyen-Thanh, Nhon}, title = {Isogeometric analysis based on rational splines over hierarchical T-mesh and alpha finite element method for structural analysis}, issn = {1610-7381}, doi = {10.25643/bauhaus-universitaet.2078}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20131125-20781}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {196}, abstract = {This thesis presents two new methods in finite elements and isogeometric analysis for structural analysis. The first method proposes an alternative alpha finite element method using triangular elements. In this method, the piecewise constant strain field of linear triangular finite element method models is enhanced by additional strain terms with an adjustable parameter a, which results in an effectively softer stiffness formulation compared to a linear triangular element. In order to avoid the transverse shear locking of Reissner-Mindlin plates analysis the alpha finite element method is coupled with a discrete shear gap technique for triangular elements to significantly improve the accuracy of the standard triangular finite elements. The basic idea behind this element formulation is to approximate displacements and rotations as in the standard finite element method, but to construct the bending, geometrical and shear strains using node-based smoothing domains. Several numerical examples are presented and show that the alpha FEM gives a good agreement compared to several other methods in the literature. Second method, isogeometric analysis based on rational splines over hierarchical T-meshes (RHT-splines) is proposed. The RHT-splines are a generalization of Non-Uniform Rational B-splines (NURBS) over hierarchical T-meshes, which is a piecewise bicubic polynomial over a hierarchical T-mesh. The RHT-splines basis functions not only inherit all the properties of NURBS such as non-negativity, local support and partition of unity but also more importantly as the capability of joining geometric objects without gaps, preserving higher order continuity everywhere and allow local refinement and adaptivity. In order to drive the adaptive refinement, an efficient recovery-based error estimator is employed. For this problem an imaginary surface is defined. The imaginary surface is basically constructed by RHT-splines basis functions which is used for approximation and interpolation functions as well as the construction of the recovered stress components. Numerical investigations prove that the proposed method is capable to obtain results with higher accuracy and convergence rate than NURBS results.}, subject = {Isogeometric analysis}, language = {en} } @phdthesis{Kessler2018, author = {Keßler, Andrea}, title = {Matrix-free voxel-based finite element method for materials with heterogeneous microstructures}, doi = {10.25643/bauhaus-universitaet.3844}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190116-38448}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {113}, year = {2018}, abstract = {Modern image detection techniques such as micro computer tomography (μCT), magnetic resonance imaging (MRI) and scanning electron microscopy (SEM) provide us with high resolution images of the microstructure of materials in a non-invasive and convenient way. They form the basis for the geometrical models of high-resolution analysis, so called image-based analysis. However especially in 3D, discretizations of these models reach easily the size of 100 Mill. degrees of freedoms and require extensive hardware resources in terms of main memory and computing power to solve the numerical model. Consequently, the focus of this work is to combine and adapt numerical solution methods to reduce the memory demand first and then the computation time and therewith enable an execution of the image-based analysis on modern computer desktops. Hence, the numerical model is a straightforward grid discretization of the voxel-based (pixels with a third dimension) geometry which omits the boundary detection algorithms and allows reduced storage of the finite element data structure and a matrix-free solution algorithm. This in turn reduce the effort of almost all applied grid-based solution techniques and results in memory efficient and numerically stable algorithms for the microstructural models. Two variants of the matrix-free algorithm are presented. The efficient iterative solution method of conjugate gradients is used with matrix-free applicable preconditioners such as the Jacobi and the especially suited multigrid method. The jagged material boundaries of the voxel-based mesh are smoothed through embedded boundary elements which contain different material information at the integration point and are integrated sub-cell wise though without additional boundary detection. The efficiency of the matrix-free methods can be retained.}, subject = {Dissertation}, language = {en} } @phdthesis{Jia, author = {Jia, Yue}, title = {Methods based on B-splines for model representation, numerical analysis and image registration}, doi = {10.25643/bauhaus-universitaet.2484}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20151210-24849}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {200}, abstract = {The thesis consists of inter-connected parts for modeling and analysis using newly developed isogeometric methods. The main parts are reproducing kernel triangular B-splines, extended isogeometric analysis for solving weakly discontinuous problems, collocation methods using superconvergent points, and B-spline basis in image registration applications. Each topic is oriented towards application of isogeometric analysis basis functions to ease the process of integrating the modeling and analysis phases of simulation. First, we develop reproducing a kernel triangular B-spline-based FEM for solving PDEs. We review the triangular B-splines and their properties. By definition, the triangular basis function is very flexible in modeling complicated domains. However, instability results when it is applied for analysis. We modify the triangular B-spline by a reproducing kernel technique, calculating a correction term for the triangular kernel function from the chosen surrounding basis. The improved triangular basis is capable to obtain the results with higher accuracy and almost optimal convergence rates. Second, we propose an extended isogeometric analysis for dealing with weakly discontinuous problems such as material interfaces. The original IGA is combined with XFEM-like enrichments which are continuous functions themselves but with discontinuous derivatives. Consequently, the resulting solution space can approximate solutions with weak discontinuities. The method is also applied to curved material interfaces, where the inverse mapping and the curved triangular elements are considered. Third, we develop an IGA collocation method using superconvergent points. The collocation methods are efficient because no numerical integration is needed. In particular when higher polynomial basis applied, the method has a lower computational cost than Galerkin methods. However, the positions of the collocation points are crucial for the accuracy of the method, as they affect the convergent rate significantly. The proposed IGA collocation method uses superconvergent points instead of the traditional Greville abscissae points. The numerical results show the proposed method can have better accuracy and optimal convergence rates, while the traditional IGA collocation has optimal convergence only for even polynomial degrees. Lastly, we propose a novel dynamic multilevel technique for handling image registration. It is application of the B-spline functions in image processing. The procedure considered aims to align a target image from a reference image by a spatial transformation. The method starts with an energy function which is the same as a FEM-based image registration. However, we simplify the solving procedure, working on the energy function directly. We dynamically solve for control points which are coefficients of B-spline basis functions. The new approach is more simple and fast. Moreover, it is also enhanced by a multilevel technique in order to prevent instabilities. The numerical testing consists of two artificial images, four real bio-medical MRI brain and CT heart images, and they show our registration method is accurate, fast and efficient, especially for large deformation problems.}, subject = {Finite-Elemente-Methode}, language = {en} } @phdthesis{Hommel, author = {Hommel, Angela}, title = {Diskret holomorphe Funktionen und deren Bedeutung bei der L{\"o}sung von Differenzengleichungen}, doi = {10.25643/bauhaus-universitaet.3784}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180827-37846}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Auf der Grundlage diskreter Cauchy-Riemann Operatoren werden diskret holomorphe Funktionen definiert und detailliert studiert. Darauf aufbauend wird die L{\"o}sung von Differenzengleichungen mit Hilfe der diskret holomorphen Funktionen beschrieben.}, subject = {Differenzengleichung}, language = {de} } @misc{Hamzah, type = {Master Thesis}, author = {Hamzah, Abdulrazzak}, title = {L{\"o}sung von Randwertaufgaben der Bruchmechanik mit Hilfe einer approximationsbasierten Kopplung zwischen der Finite-Elemente-Methode und Methoden der komplexen Analysis}, doi = {10.25643/bauhaus-universitaet.4093}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200211-40936}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Das Hauptziel der vorliegenden Arbeit war es, eine stetige Kopplung zwischen der ananlytischen und numerischen L{\"o}sung von Randwertaufgaben mit Singularit{\"a}ten zu realisieren. Durch die inter-polationsbasierte gekoppelte Methode kann eine globale C0 Stetigkeit erzielt werden. F{\"u}r diesen Zweck wird ein spezielle finite Element (Kopplungselement) verwendet, das die Stetigkeit der L{\"o}sung sowohl mit dem analytischen Element als auch mit den normalen CST Elementen gew{\"a}hrleistet. Die interpolationsbasierte gekoppelte Methode ist zwar f{\"u}r beliebige Knotenanzahl auf dem Interface ΓAD anwendbar, aber es konnte durch die Untersuchung von der Interpolationsmatrix und numerische Simulationen festgestellt werden, dass sie schlecht konditioniert ist. Um das Problem mit den numerischen Instabilit{\"a}ten zu bew{\"a}ltigen, wurde eine approximationsbasierte Kopplungsmethode entwickelt und untersucht. Die Stabilit{\"a}t dieser Methode wurde anschließend anhand der Untersuchung von der Gramschen Matrix des verwendeten Basissystems auf zwei Intervallen [-π,π] und [-2π,2π] beurteilt. Die Gramsche Matrix auf dem Intervall [-2π,2π] hat einen g{\"u}nstigeren Konditionszahl in der Abh{\"a}ngigkeit von der Anzahl der Kopplungsknoten auf dem Interface aufgewiesen. Um die dazu geh{\"o}rigen numerischen Instabilit{\"a}ten ausschließen zu k{\"o}nnen wird das Basissystem mit Hilfe vom Gram-Schmidtschen Orthogonalisierungsverfahren auf beiden Intervallen orthogonalisiert. Das orthogonale Basissystem l{\"a}sst sich auf dem Intervall [-2π,2π] mit expliziten Formeln schreiben. Die Methode des konsistentes Sampling, die h{\"a}ufig in der Nachrichtentechnik verwendet wird, wurde zur Realisierung von der approximationsbasierten Kopplung herangezogen. Eine Beschr{\"a}nkung dieser Methode ist es, dass die Anzahl der Sampling-Basisfunktionen muss gleich der Anzahl der Wiederherstellungsbasisfunktionen sein. Das hat dazu gef{\"u}hrt, dass das eingef{\"u}hrt Basissys-tem (mit 2 n Basisfunktionen) nur mit n Basisfunktion verwendet werden kann. Zur L{\"o}sung diese Problems wurde ein alternatives Basissystems (Variante 2) vorgestellt. F{\"u}r die Verwendung dieses Basissystems ist aber eine Transformationsmatrix M n{\"o}tig und bei der Orthogonalisierung des Basissystems auf dem Intervall [-π,π] kann die Herleitung von dieser Matrix kompliziert und aufwendig sein. Die Formfunktionen wurden anschließend f{\"u}r die beiden Varianten hergeleitet und grafisch (f{\"u}r n = 5) dargestellt und wurde gezeigt, dass diese Funktionen die Anforderungen an den Formfunktionen erf{\"u}llen und k{\"o}nnen somit f{\"u}r die FE- Approximation verwendet werden. Anhand numerischer Simulationen, die mit der Variante 1 (mit Orthogonalisierung auf dem Intervall [-2π,2π]) durchgef{\"u}hrt wurden, wurden die grundlegenden Fragen (Beispielsweise: Stetigkeit der Verformungen auf dem Interface ΓAD, Spannungen auf dem analytischen Gebiet) {\"u}ber- pr{\"u}ft.}, subject = {Mathematik}, language = {de} } @phdthesis{Hamdia, author = {Hamdia, Khader}, title = {On the fracture toughness of polymeric nanocomposites: Comprehensive stochastic and numerical studies}, doi = {10.25643/bauhaus-universitaet.3765}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180712-37652}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Polymeric nanocomposites (PNCs) are considered for numerous nanotechnology such as: nano-biotechnology, nano-systems, nanoelectronics, and nano-structured materials. Commonly , they are formed by polymer (epoxy) matrix reinforced with a nanosized filler. The addition of rigid nanofillers to the epoxy matrix has offered great improvements in the fracture toughness without sacrificing other important thermo-mechanical properties. The physics of the fracture in PNCs is rather complicated and is influenced by different parameters. The presence of uncertainty in the predicted output is expected as a result of stochastic variance in the factors affecting the fracture mechanism. Consequently, evaluating the improved fracture toughness in PNCs is a challenging problem. Artificial neural network (ANN) and adaptive neuro-fuzzy inference system (ANFIS) have been employed to predict the fracture energy of polymer/particle nanocomposites. The ANN and ANFIS models were constructed, trained, and tested based on a collection of 115 experimental datasets gathered from the literature. The performance evaluation indices of the developed ANN and ANFIS showed relatively small error, with high coefficients of determination (R2), and low root mean square error and mean absolute percentage error. In the framework for uncertainty quantification of PNCs, a sensitivity analysis (SA) has been conducted to examine the influence of uncertain input parameters on the fracture toughness of polymer/clay nanocomposites (PNCs). The phase-field approach is employed to predict the macroscopic properties of the composite considering six uncertain input parameters. The efficiency, robustness, and repeatability are compared and evaluated comprehensively for five different SA methods. The Bayesian method is applied to develop a methodology in order to evaluate the performance of different analytical models used in predicting the fracture toughness of polymeric particles nanocomposites. The developed method have considered the model and parameters uncertainties based on different reference data (experimental measurements) gained from the literature. Three analytical models differing in theory and assumptions were examined. The coefficients of variation of the model predictions to the measurements are calculated using the approximated optimal parameter sets. Then, the model selection probability is obtained with respect to the different reference data. Stochastic finite element modeling is implemented to predict the fracture toughness of polymer/particle nanocomposites. For this purpose, 2D finite element model containing an epoxy matrix and rigid nanoparticles surrounded by an interphase zone is generated. The crack propagation is simulated by the cohesive segments method and phantom nodes. Considering the uncertainties in the input parameters, a polynomial chaos expansion (PCE) surrogate model is construed followed by a sensitivity analysis.}, subject = {Bruch}, language = {en} } @phdthesis{Habtemariam, author = {Habtemariam, Abinet Kifle}, title = {Generalized Beam Theory for the analysis of thin-walled circular pipe members}, doi = {10.25643/bauhaus-universitaet.4572}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220127-45723}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {188}, abstract = {The detailed structural analysis of thin-walled circular pipe members often requires the use of a shell or solid-based finite element method. Although these methods provide a very good approximation of the deformations, they require a higher degree of discretization which causes high computational costs. On the other hand, the analysis of thin-walled circular pipe members based on classical beam theories is easy to implement and needs much less computation time, however, they are limited in their ability to approximate the deformations as they cannot consider the deformation of the cross-section. This dissertation focuses on the study of the Generalized Beam Theory (GBT) which is both accurate and efficient in analyzing thin-walled members. This theory is based on the separation of variables in which the displacement field is expressed as a combination of predetermined deformation modes related to the cross-section, and unknown amplitude functions defined on the beam's longitudinal axis. Although the GBT was initially developed for long straight members, through the consideration of complementary deformation modes, which amend the null transverse and shear membrane strain assumptions of the classical GBT, problems involving short members, pipe bends, and geometrical nonlinearity can also be analyzed using GBT. In this dissertation, the GBT formulation for the analysis of these problems is developed and the application and capabilities of the method are illustrated using several numerical examples. Furthermore, the displacement and stress field results of these examples are verified using an equivalent refined shell-based finite element model. The developed static and dynamic GBT formulations for curved thin-walled circular pipes are based on the linear kinematic description of the curved shell theory. In these formulations, the complex problem in pipe bends due to the strong coupling effect of the longitudinal bending, warping and the cross-sectional ovalization is handled precisely through the derivation of the coupling tensors between the considered GBT deformation modes. Similarly, the geometrically nonlinear GBT analysis is formulated for thin-walled circular pipes based on the nonlinear membrane kinematic equations. Here, the initial linear and quadratic stress and displacement tangent stiffness matrices are built using the third and fourth-order GBT deformation mode coupling tensors. Longitudinally, the formulation of the coupled GBT element stiffness and mass matrices are presented using a beam-based finite element formulation. Furthermore, the formulated GBT elements are tested for shear and membrane locking problems and the limitations of the formulations regarding the membrane locking problem are discussed.}, subject = {Finite-Elemente-Methode}, language = {en} } @phdthesis{Chan, author = {Chan, Chiu Ling}, title = {Smooth representation of thin shells and volume structures for isogeometric analysis}, doi = {10.25643/bauhaus-universitaet.4208}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200812-42083}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {162}, abstract = {The purpose of this study is to develop self-contained methods for obtaining smooth meshes which are compatible with isogeometric analysis (IGA). The study contains three main parts. We start by developing a better understanding of shapes and splines through the study of an image-related problem. Then we proceed towards obtaining smooth volumetric meshes of the given voxel-based images. Finally, we treat the smoothness issue on the multi-patch domains with C1 coupling. Following are the highlights of each part. First, we present a B-spline convolution method for boundary representation of voxel-based images. We adopt the filtering technique to compute the B-spline coefficients and gradients of the images effectively. We then implement the B-spline convolution for developing a non-rigid images registration method. The proposed method is in some sense of "isoparametric", for which all the computation is done within the B-splines framework. Particularly, updating the images by using B-spline composition promote smooth transformation map between the images. We show the possible medical applications of our method by applying it for registration of brain images. Secondly, we develop a self-contained volumetric parametrization method based on the B-splines boundary representation. We aim to convert a given voxel-based data to a matching C1 representation with hierarchical cubic splines. The concept of the osculating circle is employed to enhance the geometric approximation, where it is done by a single template and linear transformations (scaling, translations, and rotations) without the need for solving an optimization problem. Moreover, we use the Laplacian smoothing and refinement techniques to avoid irregular meshes and to improve mesh quality. We show with several examples that the method is capable of handling complex 2D and 3D configurations. In particular, we parametrize the 3D Stanford bunny which contains irregular shapes and voids. Finally, we propose the B´ezier ordinates approach and splines approach for C1 coupling. In the first approach, the new basis functions are defined in terms of the B´ezier Bernstein polynomials. For the second approach, the new basis is defined as a linear combination of C0 basis functions. The methods are not limited to planar or bilinear mappings. They allow the modeling of solutions to fourth order partial differential equations (PDEs) on complex geometric domains, provided that the given patches are G1 continuous. Both methods have their advantages. In particular, the B´ezier approach offer more degree of freedoms, while the spline approach is more computationally efficient. In addition, we proposed partial degree elevation to overcome the C1-locking issue caused by the over constraining of the solution space. We demonstrate the potential of the resulting C1 basis functions for application in IGA which involve fourth order PDEs such as those appearing in Kirchhoff-Love shell models, Cahn-Hilliard phase field application, and biharmonic problems.}, subject = {Modellierung}, language = {en} } @phdthesis{Bianco, author = {Bianco, Marcelo Jos{\´e}}, title = {Coupling between Shell and Generalized Beam Theory (GBT) elements}, doi = {10.25643/bauhaus-universitaet.4391}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210315-43914}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {265}, abstract = {In the last decades, Finite Element Method has become the main method in statics and dynamics analysis in engineering practice. For current problems, this method provides a faster, more flexible solution than the analytic approach. Prognoses of complex engineer problems that used to be almost impossible to solve are now feasible. Although the finite element method is a robust tool, it leads to new questions about engineering solutions. Among these new problems, it is possible to divide into two major groups: the first group is regarding computer performance; the second one is related to understanding the digital solution. Simultaneously with the development of the finite element method for numerical solutions, a theory between beam theory and shell theory was developed: Generalized Beam Theory, GBT. This theory has not only a systematic and analytical clear presentation of complicated structural problems, but also a compact and elegant calculation approach that can improve computer performance. Regrettably, GBT was not internationally known since the most publications of this theory were written in German, especially in the first years. Only in recent years, GBT has gradually become a fertile research topic, with developments from linear to non-linear analysis. Another reason for the misuse of GBT is the isolated application of the theory. Although recently researches apply finite element method to solve the GBT's problems numerically, the coupling between finite elements of GBT and other theories (shell, solid, etc) is not the subject of previous research. Thus, the main goal of this dissertation is the coupling between GBT and shell/membrane elements. Consequently, one achieves the benefits of both sides: the versatility of shell elements with the high performance of GBT elements. Based on the assumptions of GBT, this dissertation presents how the separation of variables leads to two calculation's domains of a beam structure: a cross-section modal analysis and the longitudinal amplification axis. Therefore, there is the possibility of applying the finite element method not only in the cross-section analysis, but also the development for an exact GBT's finite element in the longitudinal direction. For the cross-section analysis, this dissertation presents the solution of the quadratic eigenvalue problem with an original separation between plate and membrane mechanism. Subsequently, one obtains a clearer representation of the deformation mode, as well as a reduced quadratic eigenvalue problem. Concerning the longitudinal direction, this dissertation develops the novel exact elements, based on hyperbolic and trigonometric shape functions. Although these functions do not have trivial expressions, they provide a recursive procedure that allows periodic derivatives to systematise the development of stiffness matrices. Also, these shape functions enable a single-element discretisation of the beam structure and ensure a smooth stress field. From these developments, this dissertation achieves the formulation of its primary objective: the connection of GBT and shell elements in a mixed model. Based on the displacement field, it is possible to define the coupling equations applied in the master-slave method. Therefore, one can model the structural connections and joints with finite shell elements and the structural beams and columns with GBT finite element. As a side effect, the coupling equations limit the displacement field of the shell elements under the assumptions of GBT, in particular in the neighbourhood of the coupling cross-section. Although these side effects are almost unnoticeable in linear analysis, they lead to cumulative errors in non-linear analysis. Therefore, this thesis finishes with the evaluation of the mixed GBT-shell models in non-linear analysis.}, subject = {Biegetheorie}, language = {en} } @phdthesis{AlYasiri2017, author = {Al-Yasiri, Zainab Riyadh Shaker}, title = {Function Theoretic Methods for the Analytical and Numerical Solution of Some Non-linear Boundary Value Problems with Singularities}, doi = {10.25643/bauhaus-universitaet.3898}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190506-38987}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {164}, year = {2017}, abstract = {The p-Laplace equation is a nonlinear generalization of the well-known Laplace equation. It is often used as a model problem for special types of nonlinearities, and therefore it can be seen as a bridge between very general nonlinear equations and the linear Laplace equation, too. It appears in many problems for instance in the theory of non-Newtonian fluids and fluid dynamics or in rockfill dam problems, as well as in special problems of image restoration and image processing. The aim of this thesis is to solve the p-Laplace equation for 1 < p < 2, as well as for 2 < p < 3 and to find strong solutions in the framework of Clifford analysis. The idea is to apply a hypercomplex integral operator and special function theoretic methods to transform the p-Laplace equation into a p-Dirac equation. We consider boundary value problems for the p-Laplace equation and transfer them to boundary value problems for a p-Dirac equation. These equations will be solved iteratively by applying Banach's fixed-point principle. Applying operator-theoretical methods for the p-Dirac equation, the existence and uniqueness of solutions in certain Sobolev spaces will be proved. In addition, using a finite difference approach on a uniform lattice in the plane, the fundamental solution of the Cauchy-Riemann operator and its adjoint based on the fundamental solution of the Laplacian will be calculated. Besides, we define gener- alized discrete Teodorescu transform operators, which are right-inverse to the discrete Cauchy-Riemann operator and its adjoint in the plane. Furthermore, a new formula for generalized discrete boundary operators (analogues of the Cauchy integral operator) will be considered. Based on these operators a new version of discrete Borel-Pompeiu formula is formulated and proved. This is the basis for an operator calculus that will be applied to the numerical solution of the p-Dirac equation. Finally, numerical results will be presented showing advantages and problems of this approach.}, subject = {Finite-Differenzen-Methode}, language = {en} }