@article{HarirchianIsik, author = {Harirchian, Ehsan and Isik, Ercan}, title = {A Comparative Probabilistic Seismic Hazard Analysis for Eastern Turkey (Bitlis) Based on Updated Hazard Map and Its Effect on Regular RC Structures}, series = {Buildings}, volume = {2022}, journal = {Buildings}, number = {Volume 12, issue 10, article 1573}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/buildings12101573}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221028-47283}, pages = {1 -- 19}, abstract = {Determining the earthquake hazard of any settlement is one of the primary studies for reducing earthquake damage. Therefore, earthquake hazard maps used for this purpose must be renewed over time. Turkey Earthquake Hazard Map has been used instead of Turkey Earthquake Zones Map since 2019. A probabilistic seismic hazard was performed by using these last two maps and different attenuation relationships for Bitlis Province (Eastern Turkey) were located in the Lake Van Basin, which has a high seismic risk. The earthquake parameters were determined by considering all districts and neighborhoods in the province. Probabilistic seismic hazard analyses were carried out for these settlements using seismic sources and four different attenuation relationships. The obtained values are compared with the design spectrum stated in the last two earthquake maps. Significant differences exist between the design spectrum obtained according to the different exceedance probabilities. In this study, adaptive pushover analyses of sample-reinforced concrete buildings were performed using the design ground motion level. Structural analyses were carried out using three different design spectra, as given in the last two seismic design codes and the mean spectrum obtained from attenuation relationships. Different design spectra significantly change the target displacements predicted for the performance levels of the buildings.}, subject = {Erbeben}, language = {en} } @article{HarirchianJadhavMohammadetal., author = {Harirchian, Ehsan and Jadhav, Kirti and Mohammad, Kifaytullah and Aghakouchaki Hosseini, Seyed Ehsan and Lahmer, Tom}, title = {A Comparative Study of MCDM Methods Integrated with Rapid Visual Seismic Vulnerability Assessment of Existing RC Structures}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 18, article 6411}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10186411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200918-42360}, pages = {24}, abstract = {Recently, the demand for residence and usage of urban infrastructure has been increased, thereby resulting in the elevation of risk levels of human lives over natural calamities. The occupancy demand has rapidly increased the construction rate, whereas the inadequate design of structures prone to more vulnerability. Buildings constructed before the development of seismic codes have an additional susceptibility to earthquake vibrations. The structural collapse causes an economic loss as well as setbacks for human lives. An application of different theoretical methods to analyze the structural behavior is expensive and time-consuming. Therefore, introducing a rapid vulnerability assessment method to check structural performances is necessary for future developments. The process, as mentioned earlier, is known as Rapid Visual Screening (RVS). This technique has been generated to identify, inventory, and screen structures that are potentially hazardous. Sometimes, poor construction quality does not provide some of the required parameters; in this case, the RVS process turns into a tedious scenario. Hence, to tackle such a situation, multiple-criteria decision-making (MCDM) methods for the seismic vulnerability assessment opens a new gateway. The different parameters required by RVS can be taken in MCDM. MCDM evaluates multiple conflicting criteria in decision making in several fields. This paper has aimed to bridge the gap between RVS and MCDM. Furthermore, to define the correlation between these techniques, implementation of the methodologies from Indian, Turkish, and Federal Emergency Management Agency (FEMA) codes has been done. The effects of seismic vulnerability of structures have been observed and compared.}, subject = {Erdbebensicherheit}, language = {en} } @article{BeerFirmenichRichter2004, author = {Beer, Daniel G. and Firmenich, Berthold and Richter, Torsten}, title = {A Concept for CAD Systems with Persistent Versioned Data Models}, doi = {10.25643/bauhaus-universitaet.204}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-2046}, year = {2004}, abstract = {The synchronous distributed processing of common source code in the software development process is supported by well proven methods. The planning process has similarities with the software development process. However, there are no consistent and similarly successful methods for applications in construction projects. A new approach is proposed in this contribution.}, subject = {Produktmodell}, language = {en} } @article{ZhuangHuangLiangetal., author = {Zhuang, Xiaoying and Huang, Runqiu and Liang, Chao and Rabczuk, Timon}, title = {A coupled thermo-hydro-mechanical model of jointed hard rock for compressed air energy storage}, series = {Mathematical Problems in Engineering}, journal = {Mathematical Problems in Engineering}, doi = {10.1155/2014/179169}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170428-31726}, abstract = {Renewable energy resources such as wind and solar are intermittent, which causes instability when being connected to utility grid of electricity. Compressed air energy storage (CAES) provides an economic and technical viable solution to this problem by utilizing subsurface rock cavern to store the electricity generated by renewable energy in the form of compressed air. Though CAES has been used for over three decades, it is only restricted to salt rock or aquifers for air tightness reason. In this paper, the technical feasibility of utilizing hard rock for CAES is investigated by using a coupled thermo-hydro-mechanical (THM) modelling of nonisothermal gas flow. Governing equations are derived from the rules of energy balance, mass balance, and static equilibrium. Cyclic volumetric mass source and heat source models are applied to simulate the gas injection and production. Evaluation is carried out for intact rock and rock with discrete crack, respectively. In both cases, the heat and pressure losses using air mass control and supplementary air injection are compared.}, subject = {Energiespeicherung}, language = {en} } @article{AlaladeReichertKoehnetal., author = {Alalade, Muyiwa and Reichert, Ina and K{\"o}hn, Daniel and Wuttke, Frank and Lahmer, Tom}, title = {A Cyclic Multi-Stage Implementation of the Full-Waveform Inversion for the Identification of Anomalies in Dams}, series = {Infrastructures}, volume = {2022}, journal = {Infrastructures}, number = {Volume 7, issue 12, article 161}, editor = {Qu, Chunxu and Gao, Chunxu and Zhang, Rui and Jia, Ziguang and Li, Jiaxiang}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/infrastructures7120161}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221201-48396}, pages = {19}, abstract = {For the safe and efficient operation of dams, frequent monitoring and maintenance are required. These are usually expensive, time consuming, and cumbersome. To alleviate these issues, we propose applying a wave-based scheme for the location and quantification of damages in dams. To obtain high-resolution "interpretable" images of the damaged regions, we drew inspiration from non-linear full-multigrid methods for inverse problems and applied a new cyclic multi-stage full-waveform inversion (FWI) scheme. Our approach is less susceptible to the stability issues faced by the standard FWI scheme when dealing with ill-posed problems. In this paper, we first selected an optimal acquisition setup and then applied synthetic data to demonstrate the capability of our approach in identifying a series of anomalies in dams by a mixture of reflection and transmission tomography. The results had sufficient robustness, showing the prospects of application in the field of non-destructive testing of dams.}, subject = {Damm}, language = {en} } @article{MaQin2004, author = {Ma, Zhiliang and Qin, Liang}, title = {A Framework of Management Information System for Construction Projects}, doi = {10.25643/bauhaus-universitaet.212}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-2126}, year = {2004}, abstract = {A comprehensive framework of information management system for construction projects in China has been established through extensive literature survey and field investigation. It utilizes the potential information technologies and covers the practical management patterns as well as the major aspects of construction project management. It can be used to guide and evaluate the design of the information management systems for construction projects in order to make the system to be applicable to a wide variety of construction projects and survive the changes in project management.}, subject = {Mehragentensystem}, language = {en} } @article{VoelskeGollubHagenetal., author = {V{\"o}lske, Michael and Gollub, Tim and Hagen, Matthias and Stein, Benno}, title = {A keyquery-based classification system for CORE}, series = {D-Lib Magazine}, journal = {D-Lib Magazine}, doi = {10.1045/november14-voelske}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170426-31662}, abstract = {We apply keyquery-based taxonomy composition to compute a classification system for the CORE dataset, a shared crawl of about 850,000 scientific papers. Keyquery-based taxonomy composition can be understood as a two-phase hierarchical document clustering technique that utilizes search queries as cluster labels: In a first phase, the document collection is indexed by a reference search engine, and the documents are tagged with the search queries they are relevant—for their so-called keyqueries. In a second phase, a hierarchical clustering is formed from the keyqueries within an iterative process. We use the explicit topic model ESA as document retrieval model in order to index the CORE dataset in the reference search engine. Under the ESA retrieval model, documents are represented as vectors of similarities to Wikipedia articles; a methodology proven to be advantageous for text categorization tasks. Our paper presents the generated taxonomy and reports on quantitative properties such as document coverage and processing requirements.}, subject = {Massendaten}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Raj Das, Rohan and Rasulzade, Shahla and Lahmer, Tom}, title = {A Machine Learning Framework for Assessing Seismic Hazard Safety of Reinforced Concrete Buildings}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7153}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207153}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42744}, pages = {18}, abstract = {Although averting a seismic disturbance and its physical, social, and economic disruption is practically impossible, using the advancements in computational science and numerical modeling shall equip humanity to predict its severity, understand the outcomes, and equip for post-disaster management. Many buildings exist amidst the developed metropolitan areas, which are senile and still in service. These buildings were also designed before establishing national seismic codes or without the introduction of construction regulations. In that case, risk reduction is significant for developing alternatives and designing suitable models to enhance the existing structure's performance. Such models will be able to classify risks and casualties related to possible earthquakes through emergency preparation. Thus, it is crucial to recognize structures that are susceptible to earthquake vibrations and need to be prioritized for retrofitting. However, each building's behavior under seismic actions cannot be studied through performing structural analysis, as it might be unrealistic because of the rigorous computations, long period, and substantial expenditure. Therefore, it calls for a simple, reliable, and accurate process known as Rapid Visual Screening (RVS), which serves as a primary screening platform, including an optimum number of seismic parameters and predetermined performance damage conditions for structures. In this study, the damage classification technique was studied, and the efficacy of the Machine Learning (ML) method in damage prediction via a Support Vector Machine (SVM) model was explored. The ML model is trained and tested separately on damage data from four different earthquakes, namely Ecuador, Haiti, Nepal, and South Korea. Each dataset consists of varying numbers of input data and eight performance modifiers. Based on the study and the results, the ML model using SVM classifies the given input data into the belonging classes and accomplishes the performance on hazard safety evaluation of buildings.}, subject = {Erdbeben}, language = {en} } @article{KarimimoshaverHajivalieiShokrietal., author = {Karimimoshaver, Mehrdad and Hajivaliei, Hatameh and Shokri, Manouchehr and Khalesro, Shakila and Aram, Farshid and Shamshirband, Shahaboddin}, title = {A Model for Locating Tall Buildings through a Visual Analysis Approach}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 17, article 6072}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10176072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43350}, pages = {1 -- 25}, abstract = {Tall buildings have become an integral part of cities despite all their pros and cons. Some current tall buildings have several problems because of their unsuitable location; the problems include increasing density, imposing traffic on urban thoroughfares, blocking view corridors, etc. Some of these buildings have destroyed desirable views of the city. In this research, different criteria have been chosen, such as environment, access, social-economic, land-use, and physical context. These criteria and sub-criteria are prioritized and weighted by the analytic network process (ANP) based on experts' opinions, using Super Decisions V2.8 software. On the other hand, layers corresponding to sub-criteria were made in ArcGIS 10.3 simultaneously, then via a weighted overlay (map algebra), a locating plan was created. In the next step seven hypothetical tall buildings (20 stories), in the best part of the locating plan, were considered to evaluate how much of theses hypothetical buildings would be visible (fuzzy visibility) from the street and open spaces throughout the city. These processes have been modeled by MATLAB software, and the final fuzzy visibility plan was created by ArcGIS. Fuzzy visibility results can help city managers and planners to choose which location is suitable for a tall building and how much visibility may be appropriate. The proposed model can locate tall buildings based on technical and visual criteria in the future development of the city and it can be widely used in any city as long as the criteria and weights are localized.}, subject = {Geb{\"a}ude}, language = {en} } @article{ElRayesHyari2004, author = {El-Rayes, Khaled and Hyari, Khalied}, title = {A Multi-objective Model for Optimizing Construction Planning of Repetitive Infrastructure Projects}, doi = {10.25643/bauhaus-universitaet.213}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-2135}, year = {2004}, abstract = {This paper presents the development of a model for optimizing resource utilization in repetitive infrastructure projects. The model provides the capability of simultaneous minimization of both project duration and work interruptions for construction crews. The model provides in a single run, a set of nondominated solutions that represent the tradeoff between these two objectives. The model incorporates a multiobjective genetic algorithm and scheduling algorithm. The model initially generates a randomly selected set of solutions that evolves to a near optimal set of tradeoff solutions in subsequent generations. Each solution represents a unique scheduling solution that is associated with certain project duration and a number of interruption days for utilized construction crews. As such, the model provides project planners with alternative schedules along with their expected duration and resource utilization efficiency.}, subject = {Mehragentensystem}, language = {en} } @article{SaadatfarKhosraviHassannatajJoloudarietal., author = {Saadatfar, Hamid and Khosravi, Samiyeh and Hassannataj Joloudari, Javad and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {A New K-Nearest Neighbors Classifier for Big Data Based on Efficient Data Pruning}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 2, article 286}, publisher = {MDPI}, doi = {10.3390/math8020286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40996}, pages = {12}, abstract = {The K-nearest neighbors (KNN) machine learning algorithm is a well-known non-parametric classification method. However, like other traditional data mining methods, applying it on big data comes with computational challenges. Indeed, KNN determines the class of a new sample based on the class of its nearest neighbors; however, identifying the neighbors in a large amount of data imposes a large computational cost so that it is no longer applicable by a single computing machine. One of the proposed techniques to make classification methods applicable on large datasets is pruning. LC-KNN is an improved KNN method which first clusters the data into some smaller partitions using the K-means clustering method; and then applies the KNN for each new sample on the partition which its center is the nearest one. However, because the clusters have different shapes and densities, selection of the appropriate cluster is a challenge. In this paper, an approach has been proposed to improve the pruning phase of the LC-KNN method by taking into account these factors. The proposed approach helps to choose a more appropriate cluster of data for looking for the neighbors, thus, increasing the classification accuracy. The performance of the proposed approach is evaluated on different real datasets. The experimental results show the effectiveness of the proposed approach and its higher classification accuracy and lower time cost in comparison to other recent relevant methods.}, subject = {Maschinelles Lernen}, language = {en} } @article{SchirmerOsburg, author = {Schirmer, Ulrike and Osburg, Andrea}, title = {A new method for the quantification of adsorbed styrene acrylate copolymer particles on cementitious surfaces: a critical comparative study}, series = {SN Applied Sciences}, volume = {2020}, journal = {SN Applied Sciences}, number = {Volume 2, article 2061}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s42452-020-03825-5}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44729}, pages = {1 -- 11}, abstract = {The amount of adsorbed styrene acrylate copolymer (SA) particles on cementitious surfaces at the early stage of hydration was quantitatively determined using three different methodological approaches: the depletion method, the visible spectrophotometry (VIS) and the thermo-gravimetry coupled with mass spectrometry (TG-MS). Considering the advantages and disadvantages of each method, including the respectively required sample preparation, the results for four polymer-modified cement pastes, varying in polymer content and cement fineness, were evaluated. To some extent, significant discrepancies in the adsorption degrees were observed. There is a tendency that significantly lower amounts of adsorbed polymers were identified using TG-MS compared to values determined with the depletion method. Spectrophotometrically generated values were ​​lying in between these extremes. This tendency was found for three of the four cement pastes examined and is originated in sample preparation and methodical limitations. The main influencing factor is the falsification of the polymer concentration in the liquid phase during centrifugation. Interactions in the interface between sediment and supernatant are the cause. The newly developed method, using TG-MS for the quantification of SA particles, proved to be suitable for dealing with these revealed issues. Here, instead of the fluid phase, the sediment is examined with regard to the polymer content, on which the influence of centrifugation is considerably lower.}, subject = {Zement}, language = {en} } @article{IbanezKraus, author = {Ibanez, Stalin and Kraus, Matthias}, title = {A Numerical Approach for Plastic Cross Cross-Sectional Analyses of Steel Members}, series = {ce/papers}, volume = {2021}, journal = {ce/papers}, number = {Volume 4, issue 2-4}, publisher = {Ernst \& Sohn, a Wiley brand}, address = {Berlin}, doi = {10.1002/cepa.1527}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220112-45622}, pages = {2098 -- 2106}, abstract = {Global structural analyses in civil engineering are usually performed considering linear-elastic material behavior. However, for steel structures, a certain degree of plasticization depending on the member classification may be considered. Corresponding plastic analyses taking material nonlinearities into account are effectively realized using numerical methods. Frequently applied finite elements of two and three-dimensional models evaluate the plasticity at defined nodes using a yield surface, i.e. by a yield condition, hardening rule, and flow rule. Corresponding calculations are connected to a large numerical as well as time-consuming effort and they do not rely on the theoretical background of beam theory, to which the regulations of standards mainly correspond. For that reason, methods using beam elements (one-dimensional) combined with cross-sectional analyses are commonly applied for steel members in terms of plastic zones theories. In these approaches, plasticization is in general assessed by means of axial stress only. In this paper, more precise numerical representation of the combined stress states, i.e. axial and shear stresses, is presented and results of the proposed approach are validated and discussed.}, subject = {Stahlkonstruktion}, language = {en} } @article{VuBacNguyenXuanChenetal., author = {Vu-Bac, N. and Nguyen-Xuan, Hung and Chen, Lei and Lee, C.K. and Zi, Goangseup and Zhuang, Xiaoying and Liu, G.R. and Rabczuk, Timon}, title = {A phantom-node method with edge-based strain smoothing for linear elastic fracture mechanics}, series = {Journal of Applied Mathematics}, journal = {Journal of Applied Mathematics}, doi = {10.1155/2013/978026}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170426-31676}, abstract = {This paper presents a novel numerical procedure based on the combination of an edge-based smoothed finite element (ES-FEM) with a phantom-node method for 2D linear elastic fracture mechanics. In the standard phantom-node method, the cracks are formulated by adding phantom nodes, and the cracked element is replaced by two new superimposed elements. This approach is quite simple to implement into existing explicit finite element programs. The shape functions associated with discontinuous elements are similar to those of the standard finite elements, which leads to certain simplification with implementing in the existing codes. The phantom-node method allows modeling discontinuities at an arbitrary location in the mesh. The ES-FEM model owns a close-to-exact stiffness that is much softer than lower-order finite element methods (FEM). Taking advantage of both the ES-FEM and the phantom-node method, we introduce an edge-based strain smoothing technique for the phantom-node method. Numerical results show that the proposed method achieves high accuracy compared with the extended finite element method (XFEM) and other reference solutions.}, subject = {Finite-Elemente-Methode}, language = {en} } @article{FoorgberMueller1997, author = {Foorgber, U. and M{\"u}ller, Christian}, title = {A Planning Process Model for Computer Supported Cooperative Work in Building Construction}, doi = {10.25643/bauhaus-universitaet.492}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-4922}, year = {1997}, abstract = {Increasing complexity of today's buildings requires a high level of integration in the planning process. Common planning strategies, where individual project partners cooperate mainly to exchange results, are not suitable to jointly develop project goals and objectives. Integrated planning, a more holistic approach to deal with complex problems, is based on a high degree of communication amoung team members and leads to a goal oriented cooperation. Current approaches in the reasearch area of Computer Supported Cooperative Work (CSCW) poorly meet the requirements in planning. A planning process model, based on the principles of integrated planning will be introduced, aimed to provide the background for the implementation of a CSCW-platform.}, subject = {Bauwesen}, language = {en} } @article{MironovPahl2004, author = {Mironov, Vadim and Pahl, Peter Jan}, title = {A Prismatic Finite Element for Accurate Arch Dam Analysis}, doi = {10.25643/bauhaus-universitaet.246}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-2467}, year = {2004}, abstract = {The displacements and stresses in arch dams and their abutments are frequently determined with 20-node brick elements. The elements are distorted near the contact plane between the wall and the abutment. A cantilever beam testbed has been developed to investigate the consequences of this distortion. It is shown that the deterioration of the accuracy in the computed stresses is significant. A compatible 18-node wedge element with linear stress variation is developed as an alternative to the brick element. The shape of this element type is readily adapted to the shape of the contact plane. It is shown that the accuracy of the computed stresses in the vicinity of the contact plane is improved significantly by the use of wedge elements.}, subject = {Finite-Elemente-Methode}, language = {en} } @article{Rebolj1997, author = {Rebolj, D.}, title = {A Product Model of a Road}, doi = {10.25643/bauhaus-universitaet.458}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20111215-4584}, year = {1997}, abstract = {Many errors and delays frequently appear when data is exchanged between particular tasks in the lifecycle of the road. Inter-task connections are therefore of great importance for the quality of the final product. The article describes a product model of a road wich is the kernel of an integrated information system intended to support all important stages of the road lifecycle: design, evaluation (through different analysis procedures), construction, and maintainance. Since particular tasks are often executed at different places and in different companies, the interconnections are supported by a special metafile which contains all specific data of the product model. The concept of the integrated system is object and component oriented. Additionally, existing conventional program packages are included to support some common tasks (methods). A conventional relational database system as well as an open spatial database system with the relevant GIS functionality are included to support the data structures of the model.}, subject = {Straße}, language = {en} } @article{Lutolli, author = {Lutolli, Blerim}, title = {A Review of Domed Cities and Architecture: Past, Present and Future}, series = {Future cities and environment}, volume = {2022}, journal = {Future cities and environment}, number = {Volume 8, issue 1}, publisher = {Ubiquity Press Limited}, address = {London}, doi = {10.5334/fce.154}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20221103-47335}, pages = {1 -- 9}, abstract = {The goal of architecture is changing in response to the expanding role of cities, rapid urbanization, and transformation under changing economic, environmental, social, and demographic factors. As cities increased in the early modern era, overcrowding, urbanization, and pollution conditions led reformers to consider the future shape of the cities. One of the most critical topics in contemporary architecture is the subject of the future concepts of living. In most cases, domed cities, as a future concept of living, are rarely considered, and they are used chiefly as "utopian" visions in the discourse of future ways of living. This paper highlights the reviews of domed cities to deepen the understanding of the idea in practice, like its approach in terms of architecture. The main aim of this paper is to provide a broad overview for domed cities in the face of pollution as one of the main concerns in many European cities. As a result, the significance of the reviews of the existing projects is focused on their conceptual quality. This review will pave the way for further studies in terms of future developments in the realm of domed cities. In this paper, the city of Celje, one of the most polluted cities in Slovenia, is taken as a case study for considering the concept of Dome incorporated due to the lack of accessible literature on the topic. This review's primary contribution is to allow architects to explore a broad spectrum of innovation by comparing today's achievable statuses against the possibilities generated by domed cities. As a result of this study, the concept of living under the Dome remains to be developed in theory and practice. The current challenging climatic situation will accelerate the evolution of these concepts, resulting in the formation of new typologies, which are a requirement for humanity.}, subject = {Architektur}, language = {en} } @article{AlkamLahmer, author = {Alkam, Feras and Lahmer, Tom}, title = {A robust method of the status monitoring of catenary poles installed along high-speed electrified train tracks}, series = {Results in Engineering}, volume = {2021}, journal = {Results in Engineering}, number = {volume 12, article 100289}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.rineng.2021.100289}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211011-45212}, pages = {1 -- 8}, abstract = {Electric trains are considered one of the most eco-friendly and safest means of transportation. Catenary poles are used worldwide to support overhead power lines for electric trains. The performance of the catenary poles has an extensive influence on the integrity of the train systems and, consequently, the connected human services. It became a must nowadays to develop SHM systems that provide the instantaneous status of catenary poles in- service, making the decision-making processes to keep or repair the damaged poles more feasible. This study develops a data-driven, model-free approach for status monitoring of cantilever structures, focusing on pre-stressed, spun-cast ultrahigh-strength concrete catenary poles installed along high-speed train tracks. The pro-posed approach evaluates multiple damage features in an unfied damage index, which leads to straightforward interpretation and comparison of the output. Besides, it distinguishes between multiple damage scenarios of the poles, either the ones caused by material degradation of the concrete or by the cracks that can be propagated during the life span of the given structure. Moreover, using a logistic function to classify the integrity of structure avoids the expensive learning step in the existing damage detection approaches, namely, using the modern machine and deep learning methods. The findings of this study look very promising when applied to other types of cantilever structures, such as the poles that support the power transmission lines, antenna masts, chimneys, and wind turbines.}, subject = {Fahrleitung}, language = {en} } @article{TalebiZiSilanietal., author = {Talebi, Hossein and Zi, Goangseup and Silani, Mohammad and Samaniego, Esteban and Rabczuk, Timon}, title = {A simple circular cell method for multilevel finite element analysis}, series = {Journal of Applied Mathematics}, journal = {Journal of Applied Mathematics}, doi = {10.1155/2012/526846}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170426-31639}, abstract = {A simple multiscale analysis framework for heterogeneous solids based on a computational homogenization technique is presented. The macroscopic strain is linked kinematically to the boundary displacement of a circular or spherical representative volume which contains the microscopic information of the material. The macroscopic stress is obtained from the energy principle between the macroscopic scale and the microscopic scale. This new method is applied to several standard examples to show its accuracy and consistency of the method proposed.}, subject = {Finite-Elemente-Methode}, language = {en} }