@phdthesis{Alkam, author = {Alkam, Feras}, title = {Vibration-based Monitoring of Concrete Catenary Poles using Bayesian Inference}, volume = {2021}, publisher = {Bauhaus-Universit{\"a}tsverlag}, address = {Weimar}, doi = {10.25643/bauhaus-universitaet.4433}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210526-44338}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {177}, abstract = {This work presents a robust status monitoring approach for detecting damage in cantilever structures based on logistic functions. Also, a stochastic damage identification approach based on changes of eigenfrequencies is proposed. The proposed algorithms are verified using catenary poles of electrified railways track. The proposed damage features overcome the limitation of frequency-based damage identification methods available in the literature, which are valid to detect damage in structures to Level 1 only. Changes in eigenfrequencies of cantilever structures are enough to identify possible local damage at Level 3, i.e., to cover damage detection, localization, and quantification. The proposed algorithms identified the damage with relatively small errors, even at a high noise level.}, subject = {Parameteridentifikation}, language = {en} } @article{IşıkBueyueksaracLeventEkincietal., author = {I{\c{s}}{\i}k, Ercan and B{\"u}y{\"u}ksara{\c{c}}, Ayd{\i}n and Levent Ekinci, Yunus and Ayd{\i}n, Mehmet Cihan and Harirchian, Ehsan}, title = {The Effect of Site-Specific Design Spectrum on Earthquake-Building Parameters: A Case Study from the Marmara Region (NW Turkey)}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7247}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207247}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42758}, pages = {23}, abstract = {The Marmara Region (NW Turkey) has experienced significant earthquakes (M > 7.0) to date. A destructive earthquake is also expected in the region. To determine the effect of the specific design spectrum, eleven provinces located in the region were chosen according to the Turkey Earthquake Building Code updated in 2019. Additionally, the differences between the previous and updated regulations of the country were investigated. Peak Ground Acceleration (PGA) and Peak Ground Velocity (PGV) were obtained for each province by using earthquake ground motion levels with 2\%, 10\%, 50\%, and 68\% probability of exceedance in 50-year periods. The PGA values in the region range from 0.16 to 0.7 g for earthquakes with a return period of 475 years. For each province, a sample of a reinforced-concrete building having two different numbers of stories with the same ground and structural characteristics was chosen. Static adaptive pushover analyses were performed for the sample reinforced-concrete building using each province's design spectrum. The variations in the earthquake and structural parameters were investigated according to different geographical locations. It was determined that the site-specific design spectrum significantly influences target displacements for performance-based assessments of buildings due to seismicity characteristics of the studied geographic location.}, subject = {Erdbeben}, language = {en} } @phdthesis{Vu, author = {Vu, Bac Nam}, title = {Stochastic uncertainty quantification for multiscale modeling of polymeric nanocomposites}, doi = {10.25643/bauhaus-universitaet.2555}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160322-25551}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {265}, abstract = {Nanostructured materials are extensively applied in many fields of material science for new industrial applications, particularly in the automotive, aerospace industry due to their exceptional physical and mechanical properties. Experimental testing of nanomaterials is expensive, timeconsuming,challenging and sometimes unfeasible. Therefore,computational simulations have been employed as alternative method to predict macroscopic material properties. The behavior of polymeric nanocomposites (PNCs) are highly complex. The origins of macroscopic material properties reside in the properties and interactions taking place on finer scales. It is therefore essential to use multiscale modeling strategy to properly account for all large length and time scales associated with these material systems, which across many orders of magnitude. Numerous multiscale models of PNCs have been established, however, most of them connect only two scales. There are a few multiscale models for PNCs bridging four length scales (nano-, micro-, meso- and macro-scales). In addition, nanomaterials are stochastic in nature and the prediction of macroscopic mechanical properties are influenced by many factors such as fine-scale features. The predicted mechanical properties obtained by traditional approaches significantly deviate from the measured values in experiments due to neglecting uncertainty of material features. This discrepancy is indicated that the effective macroscopic properties of materials are highly sensitive to various sources of uncertainty, such as loading and boundary conditions and material characteristics, etc., while very few stochastic multiscale models for PNCs have been developed. Therefore, it is essential to construct PNC models within the framework of stochastic modeling and quantify the stochastic effect of the input parameters on the macroscopic mechanical properties of those materials. This study aims to develop computational models at four length scales (nano-, micro-, meso- and macro-scales) and hierarchical upscaling approaches bridging length scales from nano- to macro-scales. A framework for uncertainty quantification (UQ) applied to predict the mechanical properties of the PNCs in dependence of material features at different scales is studied. Sensitivity and uncertainty analysis are of great helps in quantifying the effect of input parameters, considering both main and interaction effects, on the mechanical properties of the PNCs. To achieve this major goal, the following tasks are carried out: At nano-scale, molecular dynamics (MD) were used to investigate deformation mechanism of glassy amorphous polyethylene (PE) in dependence of temperature and strain rate. Steered molecular dynamics (SMD)were also employed to investigate interfacial characteristic of the PNCs. At mico-scale, we developed an atomistic-based continuum model represented by a representative volume element (RVE) in which the SWNT's properties and the SWNT/polymer interphase are modeled at nano-scale, the surrounding polymer matrix is modeled by solid elements. Then, a two-parameter model was employed at meso-scale. A hierarchical multiscale approach has been developed to obtain the structure-property relations at one length scale and transfer the effect to the higher length scales. In particular, we homogenized the RVE into an equivalent fiber. The equivalent fiber was then employed in a micromechanical analysis (i.e. Mori-Tanaka model) to predict the effective macroscopic properties of the PNC. Furthermore, an averaging homogenization process was also used to obtain the effective stiffness of the PCN at meso-scale. Stochastic modeling and uncertainty quantification consist of the following ingredients: - Simple random sampling, Latin hypercube sampling, Sobol' quasirandom sequences, Iman and Conover's method (inducing correlation in Latin hypercube sampling) are employed to generate independent and dependent sample data, respectively. - Surrogate models, such as polynomial regression, moving least squares (MLS), hybrid method combining polynomial regression and MLS, Kriging regression, and penalized spline regression, are employed as an approximation of a mechanical model. The advantage of the surrogate models is the high computational efficiency and robust as they can be constructed from a limited amount of available data. - Global sensitivity analysis (SA) methods, such as variance-based methods for models with independent and dependent input parameters, Fourier-based techniques for performing variance-based methods and partial derivatives, elementary effects in the context of local SA, are used to quantify the effects of input parameters and their interactions on the mechanical properties of the PNCs. A bootstrap technique is used to assess the robustness of the global SA methods with respect to their performance. In addition, the probability distribution of mechanical properties are determined by using the probability plot method. The upper and lower bounds of the predicted Young's modulus according to 95 \% prediction intervals were provided. The above-mentioned methods study on the behaviour of intact materials. Novel numerical methods such as a node-based smoothed extended finite element method (NS-XFEM) and an edge-based smoothed phantom node method (ES-Phantom node) were developed for fracture problems. These methods can be used to account for crack at macro-scale for future works. The predicted mechanical properties were validated and verified. They show good agreement with previous experimental and simulations results.}, subject = {Polymere}, language = {en} } @phdthesis{Liu, author = {Liu, Bokai}, title = {Stochastic multiscale modeling of polymeric nanocomposites using Data-driven techniques}, doi = {10.25643/bauhaus-universitaet.4637}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220503-46379}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {134}, abstract = {In recent years, lightweight materials, such as polymer composite materials (PNCs) have been studied and developed due to their excellent physical and chemical properties. Structures composed of these composite materials are widely used in aerospace engineering structures, automotive components, and electrical devices. The excellent and outstanding mechanical, thermal, and electrical properties of Carbon nanotube (CNT) make it an ideal filler to strengthen polymer materials' comparable properties. The heat transfer of composite materials has very promising engineering applications in many fields, especially in electronic devices and energy storage equipment. It is essential in high-energy density systems since electronic components need heat dissipation functionality. Or in other words, in electronic devices the generated heat should ideally be dissipated by light and small heat sinks. Polymeric composites consist of fillers embedded in a polymer matrix, the first ones will significantly affect the overall (macroscopic) performance of the material. There are many common carbon-based fillers such as single-walled carbon nanotubes (SWCNT), multi-walled carbon nanotubes (MWCNT), carbon nanobuds (CNB), fullerene, and graphene. Additives inside the matrix have become a popular subject for researchers. Some extraordinary characters, such as high-performance load, lightweight design, excellent chemical resistance, easy processing, and heat transfer, make the design of polymeric nanotube composites (PNCs) flexible. Due to the reinforcing effects with different fillers on composite materials, it has a higher degree of freedom and can be designed for the structure according to specific applications' needs. As already stated, our research focus will be on SWCNT enhanced PNCs. Since experiments are timeconsuming, sometimes expensive and cannot shed light into phenomena taking place for instance at the interfaces/interphases of composites, they are often complemented through theoretical and computational analysis. While most studies are based on deterministic approaches, there is a comparatively lower number of stochastic methods accounting for uncertainties in the input parameters. In deterministic models, the output of the model is fully determined by the parameter values and the initial conditions. However, uncertainties in the input parameters such as aspect ratio, volume fraction, thermal properties of fiber and matrix need to be taken into account for reliable predictions. In this research, a stochastic multiscale method is provided to study the influence of numerous uncertain input parameters on the thermal conductivity of the composite. Therefore, a hierarchical multi-scale method based on computational homogenization is presented in to predict the macroscopic thermal conductivity based on the fine-scale structure. In order to study the inner mechanism, we use the finite element method and employ surrogate models to conduct a Global Sensitivity Analysis (GSA). The SA is performed in order to quantify the influence of the conductivity of the fiber, matrix, Kapitza resistance, volume fraction and aspect ratio on the macroscopic conductivity. Therefore, we compute first-order and total-effect sensitivity indices with different surrogate models. As stochastic multiscale models are computational expensive, surrogate approaches are commonly exploited. With the emergence of high performance computing and artificial intelligence, machine learning has become a popular modeling tool for numerous applications. Machine learning (ML) is commonly used in regression and maps data through specific rules with algorithms to build input and output models. They are particularly useful for nonlinear input-output relationships when sufficient data is available. ML has also been used in the design of new materials and multiscale analysis. For instance, Artificial neural networks and integrated learning seem to be ideally for such a task. They can theoretically simulate any non-linear relationship through the connection of neurons. Mapping relationships are employed to carry out data-driven simulations of inputs and outputs in stochastic modeling. This research aims to develop a stochastic multi-scale computational models of PNCs in heat transfer. Multi-scale stochastic modeling with uncertainty analysis and machine learning methods consist of the following components: -Uncertainty Analysis. A surrogate based global sensitivity analysis is coupled with a hierarchical multi-scale method employing computational homogenization. The effect of the conductivity of the fibers and the matrix, the Kapitza resistance, volume fraction and aspect ratio on the 'macroscopic' conductivity of the composite is systematically studied. All selected surrogate models yield consistently the conclusions that the most influential input parameters are the aspect ratio followed by the volume fraction. The Kapitza Resistance has no significant effect on the thermal conductivity of the PNCs. The most accurate surrogate model in terms of the R2 value is the moving least square (MLS). -Hybrid Machine Learning Algorithms. A combination of artificial neural network (ANN) and particle swarm optimization (PSO) is applied to estimate the relationship between variable input and output parameters. The ANN is used for modeling the composite while PSO improves the prediction performance through an optimized global minimum search. The thermal conductivity of the fibers and the matrix, the kapitza resistance, volume fraction and aspect ratio are selected as input parameters. The output is the macroscopic (homogenized) thermal conductivity of the composite. The results show that the PSO significantly improves the predictive ability of this hybrid intelligent algorithm, which outperforms traditional neural networks. -Stochastic Integrated Machine Learning. A stochastic integrated machine learning based multiscale approach for the prediction of the macroscopic thermal conductivity in PNCs is developed. Seven types of machine learning models are exploited in this research, namely Multivariate Adaptive Regression Splines (MARS), Support Vector Machine (SVM), Regression Tree (RT), Bagging Tree (Bag), Random Forest (RF), Gradient Boosting Machine (GBM) and Cubist. They are used as components of stochastic modeling to construct the relationship between the variable of the inputs' uncertainty and the macroscopic thermal conductivity of PNCs. Particle Swarm Optimization (PSO) is used for hyper-parameter tuning to find the global optimal values leading to a significant reduction in the computational cost. The advantages and disadvantages of various methods are also analyzed in terms of computing time and model complexity to finally give a recommendation for the applicability of different models.}, subject = {Polymere}, language = {en} } @misc{Zafar, type = {Master Thesis}, author = {Zafar, Usman}, title = {Probabilistic Reliability Analysis of Wind Turbines}, doi = {10.25643/bauhaus-universitaet.3977}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20240507-39773}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Renewable energy use is on the rise and these alternative resources of energy can help combat with the climate change. Around 80\% of the world's electricity comes from coal and petroleum however, the renewables are the fastest growing source of energy in the world. Solar, wind, hydro, geothermal and biogas are the most common forms of renewable energy. Among them, wind energy is emerging as a reliable and large-scaled source of power production. The recent research and confidence in the performance has led to the construction of more and bigger wind turbines around the world. As wind turbines are getting bigger, a concern regarding their safety is also in discussion. Wind turbines are expensive machinery to construct and the enormous capital investment is one of the main reasons, why many countries are unable to adopt to the wind energy. Generally, a reliable wind turbine will result in better performance and assist in minimizing the cost of operation. If a wind turbine fails, it's a loss of investment and can be harmful for the surrounding habitat. This thesis aims towards estimating the reliability of an offshore wind turbine. A model of Jacket type offshore wind turbine is prepared by using finite element software package ABAQUS and is compared with the structural failure criteria of the wind turbine tower. UQLab, which is a general uncertainty quantification framework developed at ETH Z{\"u}rich, is used for the reliability analysis. Several probabilistic methods are included in the framework of UQLab, which include Monte Carlo, First Order Reliability Analysis and Adaptive Kriging Monte Carlo simulation. This reliability study is performed only for the structural failure of the wind turbine but it can be extended to many other forms of failures e.g. reliability for power production, or reliability for different component failures etc. It's a useful tool that can be utilized to estimate the reliability of future wind turbines, that could result in more safer and better performance of wind turbines.}, subject = {Windturbine}, language = {en} } @unpublished{AbbasKavrakovMorgenthaletal., author = {Abbas, Tajammal and Kavrakov, Igor and Morgenthal, Guido and Lahmer, Tom}, title = {Prediction of aeroelastic response of bridge decks using artificial neural networks}, doi = {10.25643/bauhaus-universitaet.4097}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40974}, abstract = {The assessment of wind-induced vibrations is considered vital for the design of long-span bridges. The aim of this research is to develop a methodological framework for robust and efficient prediction strategies for complex aerodynamic phenomena using hybrid models that employ numerical analyses as well as meta-models. Here, an approach to predict motion-induced aerodynamic forces is developed using artificial neural network (ANN). The ANN is implemented in the classical formulation and trained with a comprehensive dataset which is obtained from computational fluid dynamics forced vibration simulations. The input to the ANN is the response time histories of a bridge section, whereas the output is the motion-induced forces. The developed ANN has been tested for training and test data of different cross section geometries which provide promising predictions. The prediction is also performed for an ambient response input with multiple frequencies. Moreover, the trained ANN for aerodynamic forcing is coupled with the structural model to perform fully-coupled fluid--structure interaction analysis to determine the aeroelastic instability limit. The sensitivity of the ANN parameters to the model prediction quality and the efficiency has also been highlighted. The proposed methodology has wide application in the analysis and design of long-span bridges.}, subject = {Aerodynamik}, language = {en} } @unpublished{KhosraviSheikhKhozaniCooper, author = {Khosravi, Khabat and Sheikh Khozani, Zohreh and Cooper, James R.}, title = {Predicting stable gravel-bed river hydraulic geometry: A test of novel, advanced, hybrid data mining algorithms}, volume = {2021}, doi = {10.25643/bauhaus-universitaet.4499}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211004-44998}, abstract = {Accurate prediction of stable alluvial hydraulic geometry, in which erosion and sedimentation are in equilibrium, is one of the most difficult but critical topics in the field of river engineering. Data mining algorithms have been gaining more attention in this field due to their high performance and flexibility. However, an understanding of the potential for these algorithms to provide fast, cheap, and accurate predictions of hydraulic geometry is lacking. This study provides the first quantification of this potential. Using at-a-station field data, predictions of flow depth, water-surface width and longitudinal water surface slope are made using three standalone data mining techniques -, Instance-based Learning (IBK), KStar, Locally Weighted Learning (LWL) - along with four types of novel hybrid algorithms in which the standalone models are trained with Vote, Attribute Selected Classifier (ASC), Regression by Discretization (RBD), and Cross-validation Parameter Selection (CVPS) algorithms (Vote-IBK, Vote-Kstar, Vote-LWL, ASC-IBK, ASC-Kstar, ASC-LWL, RBD-IBK, RBD-Kstar, RBD-LWL, CVPSIBK, CVPS-Kstar, CVPS-LWL). Through a comparison of their predictive performance and a sensitivity analysis of the driving variables, the results reveal: (1) Shield stress was the most effective parameter in the prediction of all geometry dimensions; (2) hybrid models had a higher prediction power than standalone data mining models, empirical equations and traditional machine learning algorithms; (3) Vote-Kstar model had the highest performance in predicting depth and width, and ASC-Kstar in estimating slope, each providing very good prediction performance. Through these algorithms, the hydraulic geometry of any river can potentially be predicted accurately and with ease using just a few, readily available flow and channel parameters. Thus, the results reveal that these models have great potential for use in stable channel design in data poor catchments, especially in developing nations where technical modelling skills and understanding of the hydraulic and sediment processes occurring in the river system may be lacking.}, subject = {Maschinelles Lernen}, language = {en} } @unpublished{RadmardRahmaniKoenke, author = {Radmard Rahmani, Hamid and K{\"o}nke, Carsten}, title = {Passive Control of Tall Buildings Using Distributed Multiple Tuned Mass Dampers}, doi = {10.25643/bauhaus-universitaet.3859}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190311-38597}, pages = {43}, abstract = {The vibration control of the tall building during earthquake excitations is a challenging task due to their complex seismic behavior. This paper investigates the optimum placement and properties of the Tuned Mass Dampers (TMDs) in tall buildings, which are employed to control the vibrations during earthquakes. An algorithm was developed to spend a limited mass either in a single TMD or in multiple TMDs and distribute them optimally over the height of the building. The Non-dominated Sorting Genetic Algorithm (NSGA - II) method was improved by adding multi-variant genetic operators and utilized to simultaneously study the optimum design parameters of the TMDs and the optimum placement. The results showed that under earthquake excitations with noticeable amplitude in higher modes, distributing TMDs over the height of the building is more effective in mitigating the vibrations compared to the use of a single TMD system. From the optimization, it was observed that the locations of the TMDs were related to the stories corresponding to the maximum modal displacements in the lower modes and the stories corresponding to the maximum modal displacements in the modes which were highly activated by the earthquake excitations. It was also noted that the frequency content of the earthquake has significant influence on the optimum location of the TMDs.}, subject = {Schwingungsd{\"a}mpfer}, language = {en} } @misc{Habtemariam, type = {Master Thesis}, author = {Habtemariam, Abinet Kifle}, title = {Numerical Demolition Analysis of a Slender Guyed Antenna Mast}, doi = {10.25643/bauhaus-universitaet.4460}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210723-44609}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {75}, abstract = {The main purpose of the thesis is to ensure the safe demolition of old guyed antenna masts that are located in different parts of Germany. The major problem in demolition of this masts is the falling down of the masts in unexpected direction because of buckling problem. The objective of this thesis is development of a numerical models using finite element method (FEM) and assuring a controlled collapse by coming up with different time setups for the detonation of explosives which are responsible for cutting down the cables. The result of this thesis will avoid unexpected outcomes during the demolition processes and prevent risk of collapsing of the mast over near by structures.}, subject = {Abbruch}, language = {en} } @phdthesis{Schwedler, author = {Schwedler, Michael}, title = {Integrated structural analysis using isogeometric finite element methods}, doi = {10.25643/bauhaus-universitaet.2737}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170130-27372}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {209}, abstract = {The gradual digitization in the architecture, engineering, and construction industry over the past fifty years led to an extremely heterogeneous software environment, which today is embodied by the multitude of different digital tools and proprietary data formats used by the many specialists contributing to the design process in a construction project. Though these projects become increasingly complex, the demands on financial efficiency and the completion within a tight schedule grow at the same time. The digital collaboration of project partners has been identified as one key issue in successfully dealing with these challenges. Yet currently, the numerous software applications and their respective individual views on the design process severely impede that collaboration. An approach to establish a unified basis for the digital collaboration, regardless of the existing software heterogeneity, is a comprehensive digital building model contributed to by all projects partners. This type of data management known as building information modeling (BIM) has many benefits, yet its adoption is associated with many difficulties and thus, proceeds only slowly. One aspect in the field of conflicting requirements on such a digital model is the cooperation of architects and structural engineers. Traditionally, these two disciplines use different abstractions of reality for their models that in consequence lead to incompatible digital representations thereof. The onset of isogeometric analysis (IGA) promised to ease the discrepancy in design and analysis model representations. Yet, that initial focus quickly shifted towards using these methods as a more powerful basis for numerical simulations. Furthermore, the isogeometric representation alone is not capable of solving the model abstraction problem. It is thus the intention of this work to contribute to an improved digital collaboration of architects and engineers by exploring an integrated analysis approach on the basis of an unified digital model and solid geometry expressed by splines. In the course of this work, an analysis framework is developed that utilizes such models to automatically conduct numerical simulations commonly required in construction projects. In essence, this allows to retrieve structural analysis results from BIM models in a fast and simple manner, thereby facilitating rapid design iterations and profound design feedback. The BIM implementation Industry Foundation Classes (IFC) is reviewed with regard to its capabilities of representing the unified model. The current IFC schema strongly supports the use of redundant model data, a major pitfall in digital collaboration. Additionally, it does not allow to describe the geometry by volumetric splines. As the pursued approach builds upon a unique model for both, architectural and structural design, and furthermore requires solid geometry, necessary schema modifications are suggested. Structural entities are modeled by volumetric NURBS patches, each of which constitutes an individual subdomain that, with regard to the analysis, is incompatible with the remaining full model. The resulting consequences for numerical simulation are elaborated in this work. The individual subdomains have to be weakly coupled, for which the mortar method is used. Different approaches to discretize the interface traction fields are implemented and their respective impact on the analysis results is evaluated. All necessary coupling conditions are automatically derived from the related geometry model. The weak coupling procedure leads to a linear system of equations in saddle point form, which, owed to the volumetric modeling, is large in size and, the associated coefficient matrix has, due to the use of higher degree basis functions, a high bandwidth. The peculiarities of the system require adapted solution methods that generally cause higher numerical costs than the standard procedures for symmetric, positive-definite systems do. Different methods to solve the specific system are investigated and an efficient parallel algorithm is finally proposed. When the structural analysis model is derived from the unified model in the BIM data, it does in general initially not meet the requirements on the discretization that are necessary to obtain sufficiently accurate analysis results. The consequently necessary patch refinements must be controlled automatically to allowfor an entirely automatic analysis procedure. For that purpose, an empirical refinement scheme based on the geometrical and possibly mechanical properties of the specific entities is proposed. The level of refinement may be selectively manipulated by the structural engineer in charge. Furthermore, a Zienkiewicz-Zhu type error estimator is adapted for the use with isogeometric analysis results. It is shown that also this estimator can be used to steer an adaptive refinement procedure.}, subject = {Finite-Elemente-Methode}, language = {en} }