@incollection{Manning, author = {Manning, Erin}, title = {10 Propositionen f{\"u}r eine radikale P{\"a}dagogik, oder: Wie den Wert neu denken?}, series = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, booktitle = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, publisher = {Nocturne}, address = {Berlin und Weimar}, doi = {10.25643/bauhaus-universitaet.4266}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201008-42660}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {121 -- 138}, abstract = {Radikale P{\"a}dagogik richtet ihre Aufmerksamkeit sowohl auf die allt{\"a}glichen Techniken p{\"a}dagogischer Praxis - Techniken zur Aktivierung eines Begegnungsraumes, Techniken, sich um die Arbeit und einander zu k{\"u}mmern, Techniken des kultur{\"u}bergreifenden Zuh{\"o}rens, Techniken, sich dem Mehr-als zuzuwenden - als auch auf Techniken zum »{\"U}berschreiten der Schwelle«. Das {\"U}berschreiten der Schwelle h{\"a}ngt mit der Art und Weise der Anpassung (accommodation) zusammen, die es erm{\"o}glicht, das Lernen in all seinen Erscheinungsformen wertzusch{\"a}tzen.}, subject = {P{\"a}dagogik}, language = {de} } @phdthesis{Kunert, author = {Kunert, Andr{\´e}}, title = {3D Interaction Techniques in Multi-User Virtual Reality : towards scalable templates and implementation patterns for cooperative interfaces}, doi = {10.25643/bauhaus-universitaet.4296}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201204-42962}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {147}, abstract = {Multi-user projection systems provide a coherent 3D interaction space for multiple co-located users that facilitates mutual awareness, full-body interaction, and the coordination of activities. The users perceive the shared scene from their respective viewpoints and can directly interact with the 3D content. This thesis reports on novel interaction patterns for collaborative 3D interaction for local and distributed user groups based on such multi-user projection environments. A particular focus of our developments lies in the provision of multiple independent interaction territories in our workspaces and their tight integration into collaborative workflows. The motivation for such multi-focus workspaces is grounded in research on social cooperation patterns, specifically in the requirement for supporting phases of loose and tight collaboration and the emergence of dedicated orking territories for private usage and public exchange. We realized independent interaction territories in the form of handheld virtual viewing windows and multiple co-located hardware displays in a joint workspace. They provide independent views of a shared virtual environment and serve as access points for the exploration and manipulation of the 3D content. Their tight integration into our workspace supports fluent transitions between individual work and joint user engagement. The different affordances of various displays in an exemplary workspace consisting of a large 3D wall, a 3D tabletop, and handheld virtual viewing windows, promote different usage scenarios, for instance for views from an egocentric perspective, miniature scene representations, close-up views, or storage and transfer areas. This work shows that this versatile workspace can make the cooperation of multiple people in joint tasks more effective, e.g. by parallelizing activities, distributing subtasks, and providing mutual support. In order to create, manage, and share virtual viewing windows, this thesis presents the interaction technique of Photoportals, a tangible interface based on the metaphor of digital photography. They serve as configurable viewing territories and enable the individual examination of scene details as well as the immediate sharing of the prepared views. Photoportals are specifically designed to complement other interface facets and provide extended functionality for scene navigation, object manipulation, and for the creation of temporal recordings of activities in the virtual scene. A further objective of this work is the realization of a coherent interaction space for direct 3D input across the independent interaction territories in multi-display setups. This requires the simultaneous consideration of user input in several potential interaction windows as well as configurable disambiguation schemes for the implicit selection of distinct interaction contexts. We generalized the required implementation structures into a high-level software pattern and demonstrated its versatility by means of various multi-context 3D interaction tools. Additionally, this work tackles specific problems related to group navigation in multiuser projection systems. Joint navigation of a collocated group of users can lead to unintentional collisions when passing narrow scene sections. In this context, we suggest various solutions that prevent individual collisions during group navigation and discuss their effect on the perceived integrity of the travel group and the 3D scene. For collaboration scenarios involving distributed user groups, we furthermore explored different configurations for joint and individual travel. Last but not least, this thesis provides detailed information and implementation templates for the realization of the proposed interaction techniques and collaborative workspaces in scenegraph-based VR systems. These contributions to the abstraction of specific interaction patterns, such as group navigation and multi-window interaction, facilitate their reuse in other virtual reality systems and their adaptation to further collaborative scenarios.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{AbbaspourGilandehMolaeeSabzietal., author = {Abbaspour-Gilandeh, Yousef and Molaee, Amir and Sabzi, Sajad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir}, title = {A Combined Method of Image Processing and Artificial Neural Network for the Identification of 13 Iranian Rice Cultivars}, series = {agronomy}, volume = {2020}, journal = {agronomy}, number = {Volume 10, Issue 1, 117}, publisher = {MDPI}, doi = {10.3390/agronomy10010117}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200123-40695}, pages = {21}, abstract = {Due to the importance of identifying crop cultivars, the advancement of accurate assessment of cultivars is considered essential. The existing methods for identifying rice cultivars are mainly time-consuming, costly, and destructive. Therefore, the development of novel methods is highly beneficial. The aim of the present research is to classify common rice cultivars in Iran based on color, morphologic, and texture properties using artificial intelligence (AI) methods. In doing so, digital images of 13 rice cultivars in Iran in three forms of paddy, brown, and white are analyzed through pre-processing and segmentation of using MATLAB. Ninety-two specificities, including 60 color, 14 morphologic, and 18 texture properties, were identified for each rice cultivar. In the next step, the normal distribution of data was evaluated, and the possibility of observing a significant difference between all specificities of cultivars was studied using variance analysis. In addition, the least significant difference (LSD) test was performed to obtain a more accurate comparison between cultivars. To reduce data dimensions and focus on the most effective components, principal component analysis (PCA) was employed. Accordingly, the accuracy of rice cultivar separations was calculated for paddy, brown rice, and white rice using discriminant analysis (DA), which was 89.2\%, 87.7\%, and 83.1\%, respectively. To identify and classify the desired cultivars, a multilayered perceptron neural network was implemented based on the most effective components. The results showed 100\% accuracy of the network in identifying and classifying all mentioned rice cultivars. Hence, it is concluded that the integrated method of image processing and pattern recognition methods, such as statistical classification and artificial neural networks, can be used for identifying and classification of rice cultivars.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianJadhavMohammadetal., author = {Harirchian, Ehsan and Jadhav, Kirti and Mohammad, Kifaytullah and Aghakouchaki Hosseini, Seyed Ehsan and Lahmer, Tom}, title = {A Comparative Study of MCDM Methods Integrated with Rapid Visual Seismic Vulnerability Assessment of Existing RC Structures}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 18, article 6411}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10186411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200918-42360}, pages = {24}, abstract = {Recently, the demand for residence and usage of urban infrastructure has been increased, thereby resulting in the elevation of risk levels of human lives over natural calamities. The occupancy demand has rapidly increased the construction rate, whereas the inadequate design of structures prone to more vulnerability. Buildings constructed before the development of seismic codes have an additional susceptibility to earthquake vibrations. The structural collapse causes an economic loss as well as setbacks for human lives. An application of different theoretical methods to analyze the structural behavior is expensive and time-consuming. Therefore, introducing a rapid vulnerability assessment method to check structural performances is necessary for future developments. The process, as mentioned earlier, is known as Rapid Visual Screening (RVS). This technique has been generated to identify, inventory, and screen structures that are potentially hazardous. Sometimes, poor construction quality does not provide some of the required parameters; in this case, the RVS process turns into a tedious scenario. Hence, to tackle such a situation, multiple-criteria decision-making (MCDM) methods for the seismic vulnerability assessment opens a new gateway. The different parameters required by RVS can be taken in MCDM. MCDM evaluates multiple conflicting criteria in decision making in several fields. This paper has aimed to bridge the gap between RVS and MCDM. Furthermore, to define the correlation between these techniques, implementation of the methodologies from Indian, Turkish, and Federal Emergency Management Agency (FEMA) codes has been done. The effects of seismic vulnerability of structures have been observed and compared.}, subject = {Erdbebensicherheit}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Raj Das, Rohan and Rasulzade, Shahla and Lahmer, Tom}, title = {A Machine Learning Framework for Assessing Seismic Hazard Safety of Reinforced Concrete Buildings}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7153}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207153}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42744}, pages = {18}, abstract = {Although averting a seismic disturbance and its physical, social, and economic disruption is practically impossible, using the advancements in computational science and numerical modeling shall equip humanity to predict its severity, understand the outcomes, and equip for post-disaster management. Many buildings exist amidst the developed metropolitan areas, which are senile and still in service. These buildings were also designed before establishing national seismic codes or without the introduction of construction regulations. In that case, risk reduction is significant for developing alternatives and designing suitable models to enhance the existing structure's performance. Such models will be able to classify risks and casualties related to possible earthquakes through emergency preparation. Thus, it is crucial to recognize structures that are susceptible to earthquake vibrations and need to be prioritized for retrofitting. However, each building's behavior under seismic actions cannot be studied through performing structural analysis, as it might be unrealistic because of the rigorous computations, long period, and substantial expenditure. Therefore, it calls for a simple, reliable, and accurate process known as Rapid Visual Screening (RVS), which serves as a primary screening platform, including an optimum number of seismic parameters and predetermined performance damage conditions for structures. In this study, the damage classification technique was studied, and the efficacy of the Machine Learning (ML) method in damage prediction via a Support Vector Machine (SVM) model was explored. The ML model is trained and tested separately on damage data from four different earthquakes, namely Ecuador, Haiti, Nepal, and South Korea. Each dataset consists of varying numbers of input data and eight performance modifiers. Based on the study and the results, the ML model using SVM classifies the given input data into the belonging classes and accomplishes the performance on hazard safety evaluation of buildings.}, subject = {Erdbeben}, language = {en} } @article{KarimimoshaverHajivalieiShokrietal., author = {Karimimoshaver, Mehrdad and Hajivaliei, Hatameh and Shokri, Manouchehr and Khalesro, Shakila and Aram, Farshid and Shamshirband, Shahaboddin}, title = {A Model for Locating Tall Buildings through a Visual Analysis Approach}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 17, article 6072}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10176072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43350}, pages = {1 -- 25}, abstract = {Tall buildings have become an integral part of cities despite all their pros and cons. Some current tall buildings have several problems because of their unsuitable location; the problems include increasing density, imposing traffic on urban thoroughfares, blocking view corridors, etc. Some of these buildings have destroyed desirable views of the city. In this research, different criteria have been chosen, such as environment, access, social-economic, land-use, and physical context. These criteria and sub-criteria are prioritized and weighted by the analytic network process (ANP) based on experts' opinions, using Super Decisions V2.8 software. On the other hand, layers corresponding to sub-criteria were made in ArcGIS 10.3 simultaneously, then via a weighted overlay (map algebra), a locating plan was created. In the next step seven hypothetical tall buildings (20 stories), in the best part of the locating plan, were considered to evaluate how much of theses hypothetical buildings would be visible (fuzzy visibility) from the street and open spaces throughout the city. These processes have been modeled by MATLAB software, and the final fuzzy visibility plan was created by ArcGIS. Fuzzy visibility results can help city managers and planners to choose which location is suitable for a tall building and how much visibility may be appropriate. The proposed model can locate tall buildings based on technical and visual criteria in the future development of the city and it can be widely used in any city as long as the criteria and weights are localized.}, subject = {Geb{\"a}ude}, language = {en} } @article{SaadatfarKhosraviHassannatajJoloudarietal., author = {Saadatfar, Hamid and Khosravi, Samiyeh and Hassannataj Joloudari, Javad and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {A New K-Nearest Neighbors Classifier for Big Data Based on Efficient Data Pruning}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 2, article 286}, publisher = {MDPI}, doi = {10.3390/math8020286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40996}, pages = {12}, abstract = {The K-nearest neighbors (KNN) machine learning algorithm is a well-known non-parametric classification method. However, like other traditional data mining methods, applying it on big data comes with computational challenges. Indeed, KNN determines the class of a new sample based on the class of its nearest neighbors; however, identifying the neighbors in a large amount of data imposes a large computational cost so that it is no longer applicable by a single computing machine. One of the proposed techniques to make classification methods applicable on large datasets is pruning. LC-KNN is an improved KNN method which first clusters the data into some smaller partitions using the K-means clustering method; and then applies the KNN for each new sample on the partition which its center is the nearest one. However, because the clusters have different shapes and densities, selection of the appropriate cluster is a challenge. In this paper, an approach has been proposed to improve the pruning phase of the LC-KNN method by taking into account these factors. The proposed approach helps to choose a more appropriate cluster of data for looking for the neighbors, thus, increasing the classification accuracy. The performance of the proposed approach is evaluated on different real datasets. The experimental results show the effectiveness of the proposed approach and its higher classification accuracy and lower time cost in comparison to other recent relevant methods.}, subject = {Maschinelles Lernen}, language = {en} } @article{SchirmerOsburg, author = {Schirmer, Ulrike and Osburg, Andrea}, title = {A new method for the quantification of adsorbed styrene acrylate copolymer particles on cementitious surfaces: a critical comparative study}, series = {SN Applied Sciences}, volume = {2020}, journal = {SN Applied Sciences}, number = {Volume 2, article 2061}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s42452-020-03825-5}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44729}, pages = {1 -- 11}, abstract = {The amount of adsorbed styrene acrylate copolymer (SA) particles on cementitious surfaces at the early stage of hydration was quantitatively determined using three different methodological approaches: the depletion method, the visible spectrophotometry (VIS) and the thermo-gravimetry coupled with mass spectrometry (TG-MS). Considering the advantages and disadvantages of each method, including the respectively required sample preparation, the results for four polymer-modified cement pastes, varying in polymer content and cement fineness, were evaluated. To some extent, significant discrepancies in the adsorption degrees were observed. There is a tendency that significantly lower amounts of adsorbed polymers were identified using TG-MS compared to values determined with the depletion method. Spectrophotometrically generated values were ​​lying in between these extremes. This tendency was found for three of the four cement pastes examined and is originated in sample preparation and methodical limitations. The main influencing factor is the falsification of the polymer concentration in the liquid phase during centrifugation. Interactions in the interface between sediment and supernatant are the cause. The newly developed method, using TG-MS for the quantification of SA particles, proved to be suitable for dealing with these revealed issues. Here, instead of the fluid phase, the sediment is examined with regard to the polymer content, on which the influence of centrifugation is considerably lower.}, subject = {Zement}, language = {en} } @phdthesis{Winkel, author = {Winkel, Benjamin}, title = {A three-dimensional model of skeletal muscle for physiological, pathological and experimental mechanical simulations}, doi = {10.25643/bauhaus-universitaet.4300}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201211-43002}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {In recent decades, a multitude of concepts and models were developed to understand, assess and predict muscular mechanics in the context of physiological and pathological events. Most of these models are highly specialized and designed to selectively address fields in, e.g., medicine, sports science, forensics, product design or CGI; their data are often not transferable to other ranges of application. A single universal model, which covers the details of biochemical and neural processes, as well as the development of internal and external force and motion patterns and appearance could not be practical with regard to the diversity of the questions to be investigated and the task to find answers efficiently. With reasonable limitations though, a generalized approach is feasible. The objective of the work at hand was to develop a model for muscle simulation which covers the phenomenological aspects, and thus is universally applicable in domains where up until now specialized models were utilized. This includes investigations on active and passive motion, structural interaction of muscles within the body and with external elements, for example in crash scenarios, but also research topics like the verification of in vivo experiments and parameter identification. For this purpose, elements for the simulation of incompressible deformations were studied, adapted and implemented into the finite element code SLang. Various anisotropic, visco-elastic muscle models were developed or enhanced. The applicability was demonstrated on the base of several examples, and a general base for the implementation of further material models was developed and elaborated.}, subject = {Biomechanik}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} } @phdthesis{Oucif, author = {Oucif, Chahmi}, title = {Analytical Modeling of Self-Healing and Super Healing in Cementitious Materials}, doi = {10.25643/bauhaus-universitaet.4229}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200831-42296}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {208}, abstract = {Self-healing materials have recently become more popular due to their capability to autonomously and autogenously repair the damage in cementitious materials. The concept of self-healing gives the damaged material the ability to recover its stiffness. This gives a difference in comparing with a material that is not subjected to healing. Once this material is damaged, it cannot sustain loading due to the stiffness degradation. Numerical modeling of self-healing materials is still in its infancy. Multiple experimental researches were conducted in literature to describe the behavior of self-healing of cementitious materials. However, few numerical investigations were undertaken. The thesis presents an analytical framework of self-healing and super healing materials based on continuum damage-healing mechanics. Through this framework, we aim to describe the recovery and strengthening of material stiffness and strength. A simple damage healing law is proposed and applied on concrete material. The proposed damage-healing law is based on a new time-dependent healing variable. The damage-healing model is applied on isotropic concrete material at the macroscale under tensile load. Both autonomous and autogenous self-healing mechanisms are simulated under different loading conditions. These two mechanisms are denoted in the present work by coupled and uncoupled self-healing mechanisms, respectively. We assume in the coupled self-healing that the healing occurs at the same time with damage evolution, while we assume in the uncoupled self-healing that the healing occurs when the material is deformed and subjected to a rest period (damage is constant). In order to describe both coupled and uncoupled healing mechanisms, a one-dimensional element is subjected to different types of loading history. In the same context, derivation of nonlinear self-healing theory is given, and comparison of linear and nonlinear damage-healing models is carried out using both coupled and uncoupled self-healing mechanisms. The nonlinear healing theory includes generalized nonlinear and quadratic healing models. The healing efficiency is studied by varying the values of the healing rest period and the parameter describing the material characteristics. In addition, theoretical formulation of different self-healing variables is presented for both isotropic and anisotropic maerials. The healing variables are defined based on the recovery in elastic modulus, shear modulus, Poisson's ratio, and bulk modulus. The evolution of the healing variable calculated based on cross-section as function of the healing variable calculated based on elastic stiffness is presented in both hypotheses of elastic strain equivalence and elastic energy equivalence. The components of the fourth-rank healing tensor are also obtained in the case of isotropic elasticity, plane stress and plane strain. Recent research revealed that self-healing presents a crucial solution also for the strengthening of the materials. This new concept has been termed ``Super Healing``. Once the stiffness of the material is recovered, further healing can result as a strengthening material. In the present thesis, new theory of super healing materials is defined in isotropic and anisotropic cases using sound mathematical and mechanical principles which are applied in linear and nonlinear super healing theories. Additionally, the link of the proposed theory with the theory of undamageable materials is outlined. In order to describe the super healing efficiency in linear and nonlinear theories, the ratio of effective stress to nominal stress is calculated as function of the super healing variable. In addition, the hypotheses of elastic strain and elastic energy equivalence are applied. In the same context, new super healing matrix in plane strain is proposed based on continuum damage-healing mechanics. In the present work, we also focus on numerical modeling of impact behavior of reinforced concrete slabs using the commercial finite element package Abaqus/Explicit. Plain and reinforced concrete slabs of unconfined compressive strength 41 MPa are simulated under impact of ogive-nosed hard projectile. The constitutive material modeling of the concrete and steel reinforcement bars is performed using the Johnson-Holmquist-2 damage and the Johnson-Cook plasticity material models, respectively. Damage diameters and residual velocities obtained by the numerical model are compared with the experimental results and effect of steel reinforcement and projectile diameter is studied.}, subject = {Schaden}, language = {en} } @phdthesis{Jentzsch, author = {Jentzsch, Sina}, title = {Appell der Dinge. K{\"u}nstlerische Zuwendung zu den Dingen in den 1960er-Jahren}, doi = {10.25643/bauhaus-universitaet.4112}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200323-41129}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {261}, abstract = {Diese Dissertation besch{\"a}ftigt sich mit Kunstwerken, die das allt{\"a}gliche Ding in den Blick nehmen. N{\"a}hrboden dieser Kunstform sind die soziokulturellen Entwicklungen des 20. Jahrhunderts, mit denen wesentliche Ver{\"a}nderungen hinsichtlich des Verh{\"a}ltnisses von Mensch und Ding einhergingen. Daraus resultierte eine allgemeine k{\"u}nstlerische Zuwendung zu den Dingen und eine einzigartige Kulmination aus verschiedenartigen Auseinandersetzungen mit ihnen als kunstf{\"a}hige Gegenst{\"a}nde, {\"u}ber die sich die neue Dingwelt erschlossen wurde und deren Kunstwerke einen Spiegel dieser Entwicklungen darstellen. Die Dissertation stellt ebenfalls die Dinge selbst in den Fokus. Vier Aspekte von Dingen (Materialit{\"a}t, Funktionalit{\"a}t, Repr{\"a}sentationalit{\"a}t und Relationalit{\"a}t) werden gesondert ins Auge gefasst und in den theoretischen Diskurs des 20. Jahrhunderts eingeordnet, um sie als Teil der gelebten Realit{\"a}t besser zu verstehen, von der sich der {\"a}sthetische Blick nicht trennen l{\"a}sst. Anhand der k{\"u}nstlerischen Positionen von Robert Rauschenberg, Christo und Jeanne-Claude, Daniel Spoerri und Arman sowie Claes Oldenburg werden die verschiedenen Aspekte der Dinge n{\"a}her betrachtet und analysiert, wie diese speziell in den Kunstwerken thematisiert werden und welche Relevanz sie f{\"u}r deren Rezeptionserfahrung haben. Die Korrelation dieser beiden Ebenen - die Dinge als konstitutiver Bestandteil im sozialen Raum und die Dinge als Elemente in Kunstwerken -, die im Fokus der vorliegenden Untersuchung steht, erm{\"o}glicht es, die k{\"u}nstlerische Zuwendung zu den Dingen in den 1960er-Jahren neu einzuordnen. Dar{\"u}ber hinaus wird dadurch ein differenziertes Bild von der Kunst dieser Zeit sowie den Dingen in der Kunst im Allgemeinen gezeichnet.}, subject = {Ding}, language = {de} } @article{HarirchianLahmerKumarietal., author = {Harirchian, Ehsan and Lahmer, Tom and Kumari, Vandana and Jadhav, Kirti}, title = {Application of Support Vector Machine Modeling for the Rapid Seismic Hazard Safety Evaluation of Existing Buildings}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {volume 13, issue 13, 3340}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13133340}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200707-41915}, pages = {15}, abstract = {The economic losses from earthquakes tend to hit the national economy considerably; therefore, models that are capable of estimating the vulnerability and losses of future earthquakes are highly consequential for emergency planners with the purpose of risk mitigation. This demands a mass prioritization filtering of structures to identify vulnerable buildings for retrofitting purposes. The application of advanced structural analysis on each building to study the earthquake response is impractical due to complex calculations, long computational time, and exorbitant cost. This exhibits the need for a fast, reliable, and rapid method, commonly known as Rapid Visual Screening (RVS). The method serves as a preliminary screening platform, using an optimum number of seismic parameters of the structure and predefined output damage states. In this study, the efficacy of the Machine Learning (ML) application in damage prediction through a Support Vector Machine (SVM) model as the damage classification technique has been investigated. The developed model was trained and examined based on damage data from the 1999 D{\"u}zce Earthquake in Turkey, where the building's data consists of 22 performance modifiers that have been implemented with supervised machine learning.}, subject = {Erdbeben}, language = {en} } @phdthesis{RadmardRahmani, author = {Radmard Rahmani, Hamid}, title = {Artificial Intelligence Approach for Seismic Control of Structures}, doi = {10.25643/bauhaus-universitaet.4135}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200417-41359}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Abstract In the first part of this research, the utilization of tuned mass dampers in the vibration control of tall buildings during earthquake excitations is studied. The main issues such as optimizing the parameters of the dampers and studying the effects of frequency content of the target earthquakes are addressed. Abstract The non-dominated sorting genetic algorithm method is improved by upgrading generic operators, and is utilized to develop a framework for determining the optimum placement and parameters of dampers in tall buildings. A case study is presented in which the optimal placement and properties of dampers are determined for a model of a tall building under different earthquake excitations through computer simulations. Abstract In the second part, a novel framework for the brain learning-based intelligent seismic control of smart structures is developed. In this approach, a deep neural network learns how to improve structural responses during earthquake excitations using feedback control. Abstract Reinforcement learning method is improved and utilized to develop a framework for training the deep neural network as an intelligent controller. The efficiency of the developed framework is examined through two case studies including a single-degree-of-freedom system and a high-rise building under different earthquake excitation records. Abstract The results show that the controller gradually develops an optimum control policy to reduce the vibrations of a structure under an earthquake excitation through a cyclical process of actions and observations. Abstract It is shown that the controller efficiently improves the structural responses under new earthquake excitations for which it was not trained. Moreover, it is shown that the controller has a stable performance under uncertainties.}, subject = {Erdbeben}, language = {en} } @phdthesis{Mueller, author = {M{\"u}ller, Jan Philip}, title = {Audiovision und Synchronisation. Sehen, H{\"o}ren und Gleichzeitigkeit in Anordnungen vom Observatorium {\"u}ber psychologische Experimente bis zum Tonfilm im 19. und 20. Jahrhundert}, doi = {10.25643/bauhaus-universitaet.4290}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201123-42906}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {516}, abstract = {In dieser Untersuchung wird eine Geschichte von Problemen der Gleichzeitigkeit zwischen Sehen und H{\"o}ren, beziehungsweise der Synchronit{\"a}t von Bild und Ton, bis zur Entstehung des Tonfilms rekonstruiert. Dabei werden Linien gezogen zwischen diskursiven Konfigurationen und medialen Anordnungen, in denen das Verh{\"a}ltnis von Sehen und H{\"o}ren oder Bild und Ton als zeitliches erscheint - in denen Sehen und H{\"o}ren, Bild und Ton zwischen Mannigfaltigkeit und Einheit verschmelzen, auseinanderfallen, interagieren, redundant oder spezifisch werden, einander erg{\"a}nzen, dominieren, verfehlen verdr{\"a}ngen, aufspalten… Tonfilm ist in der Kinogeschichte eben nicht nur eine Erg{\"a}nzung. Vielmehr {\"a}hnelt er dem Auftritt eines Gespensts, das das Wissen und die Techniken der Trennung der Sinne schon l{\"a}nger, vielleicht schon immer begleitet hatte. Das Auftreten des Tonfilms ist auch {\"u}berhaupt fr{\"u}her Anlass eines weitreichenden Diskurses dar{\"u}ber, was Audiovision eigentlich sein k{\"o}nnte und sollte. Noch allgemeiner k{\"o}nnte auch davon gesprochen werden, dass Tonfilm eins der ersten großen Projekte der Konvergenz technischer Medien ist, die heute - besonders angesichts des Computers - als entscheidender Aspekt von Mediengeschichte erscheint. Die Linien der Probleme von Gleichzeitigkeit/Ungleichzeitigkeit an den Schnittstellen von Wissen, Technik und {\"A}sthetik werden insbesondere durch drei Felder hindurch nachgezeichnet: 1) Die Geschichte von Intermodalit{\"a}t in Bezug auf die Frage nach Gleichzeitigkeit und Ungleichzeitigkeit als Problem und Gegenstand von Wissenschaft seit dem 19. Jahrhundert, vornehmlich in zwei Gebieten: Als Fehlerquelle im astronomischen Observatorium bei der Messung, Feststellung und Vereinheitlichung von Raum und Zeit, die auf individuelle Abweichungen Intermodaler Wahrnehmung verweist und als Problem der „pers{\"o}nlichen Gleichung" weit {\"u}ber die Astronomie hinaus Karriere macht. Als heiße Zone wahrnehmungspsychologischer Experimente und ihrer Apparate seit der Mitte des 19. Jahrhunderts, die mit dem Konzept der „Komplikation" Fragen nach einer Synthese der Sinneswahrnehmungen und damit letztlich nach der Selbstgegenwart des Menschen stellt. 2) Eine Technikgeschichte des Problems auditive und visuelle Zeitmedien - wie Phonograph und Film - zu koppeln, zu synchronisieren. Darin eskalieren zwei zeitkritische Relationen: Einerseits zwischen diskreter, intermittierender Bewegung des Films und stetiger, kontinuierlicher Bewegung des Phonographen, andererseits in Bezug darauf, an welcher Stelle - wo und wann - audiovisuelle Gegenwart des Kinos ensteht; oder auch verfehlt wird. 3) Eine Geschichte von Filmtheorie und -{\"a}sthetik, in der sich mit der Durchsetzung des Tonfilms um 1930 die Frage stellt, was dieses neue Medium sei und was damit zu tun. Diese Verhandlungen spannen sich zwischen dem formulierten Ziel einer spezifischen Illusion oder Pr{\"a}senz von Tonfilm durch Synchronit{\"a}t auf der einen Seite und der sich aus dem Verdacht des Betrugs durch Synchronit{\"a}t ergebenden Forderung nach „Asynchronismus" als kritischer Methode auf der anderen Seite auf. Ausgehend von der These, dass im 19. Jahrhundert die Sinne aufgeteilt werden, dann wird in diesen Anordnungen an irgendeiner Stelle Heterogenes gleichzeitig passieren. An welcher Stelle? Und was bedeuten diese (Un-)Gleichzeitigkeiten? Was dabei - sehr allgemein gesprochen - auf dem Spiel steht, sind M{\"o}glichkeiten einer audiovisuell geteilten - getrennten oder gemeinsamen - Welt und Gegenwart.}, subject = {Tonfilm}, language = {de} } @article{BecherVoelkerRodehorstetal., author = {Becher, Lia and V{\"o}lker, Conrad and Rodehorst, Volker and Kuhne, Michael}, title = {Background-oriented schlieren technique for two-dimensional visualization of convective indoor air flows}, series = {Optics and Lasers in Engineering}, volume = {2020}, journal = {Optics and Lasers in Engineering}, number = {Volume 134, article 106282}, doi = {https://doi.org/10.1016/j.optlaseng.2020.106282}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220810-46972}, pages = {9}, abstract = {This article focuses on further developments of the background-oriented schlieren (BOS) technique to visualize convective indoor air flow, which is usually defined by very small density gradients. Since the light rays deflect when passing through fluids with different densities, BOS can detect the resulting refractive index gradients as integration along a line of sight. In this paper, the BOS technique is used to yield a two-dimensional visualization of small density gradients. The novelty of the described method is the implementation of a highly sensitive BOS setup to visualize the ascending thermal plume from a heated thermal manikin with temperature differences of minimum 1 K. To guarantee steady boundary conditions, the thermal manikin was seated in a climate laboratory. For the experimental investigations, a high-resolution DLSR camera was used capturing a large field of view with sufficient detail accuracy. Several parameters such as various backgrounds, focal lengths, room air temperatures, and distances between the object of investigation, camera, and structured background were tested to find the most suitable parameters to visualize convective indoor air flow. Besides these measurements, this paper presents the analyzing method using cross-correlation algorithms and finally the results of visualizing the convective indoor air flow with BOS. The highly sensitive BOS setup presented in this article complements the commonly used invasive methods that highly influence weak air flows.}, subject = {Raumklima}, language = {en} } @book{BieberBuskeElertetal., author = {Bieber, Constanze and Buske, Johann and Elert, Robert and G{\"o}bel, Hannah and Gripp, David and Hempel, Anne-Mareike and Hummitzsch, Ruben and Kamigashima Kohmann, Laelia and Klocke, Johanna and Mann, Michael and Mitzenheim, Robert and Oehler, Louis and Pfeffer, Edna and Pfeiffer, Julia and Pieper, Kai and Schwarz, Philipp and Zeyse, Samuel}, title = {Barf{\"u}ßerkirche Erfurt: Weiterbauen an der Ruine}, editor = {Angermann, Kirsten and Engelmann, Iris and Horn, Karsten}, address = {Weimar}, doi = {10.25643/bauhaus-universitaet.4203}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200727-42037}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {119}, abstract = {Die Ruine der Barf{\"u}ßerkirche in Erfurt stellt eine der letzten Erinnerungen an die Zerst{\"o}rungen der Stadt im Zweiten Weltkrieg dar. Sie wird bis heute tempor{\"a}r und saisonal kulturell genutzt. Im Rahmen eines Studienprojektes im Sommersemester 2019 wurden an der Bauhaus-Universit{\"a}t Weimar, betreut durch die Professur Denkmalpflege und Baugeschichte und unterst{\"u}tzt vom Initiativkreis Barf{\"u}ßerkirche, Nutzungskonzepte f{\"u}r ein Museum f{\"u}r Mittelalterkunst und f{\"u}r einen Tagungsort untersucht. Der vorliegende Band dokumentiert die 14 studentischen Entw{\"u}rfe, die f{\"u}r ein Weiterbauen an der Barf{\"u}ßerkirche entstanden sind.}, subject = {Architektur}, language = {de} } @phdthesis{Gretzki, author = {Gretzki, Allan}, title = {BundeskunstHall of Fame - Realisierungsprozess eines Graffiti Ausstellungsprojekts im musealen Kontext}, doi = {10.25643/bauhaus-universitaet.4215}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200818-42158}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {228}, abstract = {Realisierungsprozess eines Graffiti Ausstellungsprojekts im musealen Kontext}, subject = {Graffito}, language = {de} } @phdthesis{Truemer, author = {Tr{\"u}mer, Andr{\´e}}, title = {Calcinierte Tone als Puzzolane der Zukunft - Von den Rohstoffen bis zur Wirkung im Beton}, isbn = {978-3-00-065011-6}, doi = {10.25643/bauhaus-universitaet.4096}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200214-40968}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {222}, abstract = {Vor dem Hintergrund einer stetig wachsenden Nachfrage an Beton wie auch ambitionierter Reduktionsziele beim in der Zementproduktion anfallenden CO2 gelten calcinierte Tone als derzeit aussichtsreichste technische Neuerung im Bereich nachhaltiger Bindemittelkonzepte. Unter Ausnutzung ihrer Puzzolanit{\"a}t soll ein erheblicher Teil der Klinkerkomponente im Zement ersetzt werden, wobei der zu ihrer Aktivierung notwendige Energiebedarf vergleichsweise niedrig ist. Wesentliche Vorteile der Tone sind ihre weltweit nahezu unbegrenzte Verf{\"u}gbarkeit sowie der {\"a}ußerst geringe rohstoffbedingte CO2-Ausstoß w{\"a}hrend der Calcinierung. Schwierigkeiten auf dem Weg der Umsetzung bestehen allerdings in der Vielseitigkeit des Systems, welches durch eine hohe Variet{\"a}t der Rohtone und des daraus folgenden thermischen Verhaltens gekennzeichnet ist. Entsprechend schwierig ist die {\"U}bertragbarkeit von Erfahrungen mit bereits etablierten calcinierten Tonen wie dem Metakaolin, der sich durch hohe Reinheit, einen aufwendigen Aufbereitungsprozess und eine entsprechend hohe Reaktivit{\"a}t auszeichnet. Ziel der Arbeit ist es daher, den bereits erlangten Kenntnisstand auf andere, wirtschaftlich relevante Tone zu erweitern und deren Eignung f{\"u}r die Anwendung im Beton herauszuarbeiten. In einem mehrstufigen Arbeitsprogramm wurde untersucht, inwieweit großtechnisch nutzbare Tone aktivierbar sind und welche Eigenschaften sich daraus f{\"u}r Zement und Beton ergeben. Die dabei festgestellte Reihenfolge Kaolinit > Montmorillonit > Illit beschreibt sowohl die Reaktivit{\"a}t der Brennprodukte als auch umgekehrt die H{\"o}he der optimalen Calciniertemperatur. Auch wandelt sich der Charakter der entstandenen Metaphasen in dieser Abfolge von r{\"o}ntgenamorph und hochreaktiv zu glasig und reaktionstr{\"a}ge. Trotz dieser Einordnung konnte selbst mit dem Illit eine mit Steinkohlenflugasche vergleichbare Puzzolanit{\"a}t festgestellt werden. Dies best{\"a}tigte sich anschließend in Parameterversuchen, bei denen die Einfl{\"u}sse von Rohstoffqualit{\"a}t, Calcinierung, Aufbereitung und Zement hinsichtlich der Reaktivit{\"a}tsausbeute bewertet wurden. Die Bandbreite der erzielbaren Qualit{\"a}ten ist dabei immens und gipfelt nicht zuletzt in stark unterschiedlichen Wirkungen auf die Festbetoneigenschaften. Hier machte sich vor allem die f{\"u}r Puzzolane typische Porenverfeinerung bemerkbar, sodass viele von Transportvorg{\"a}ngen abh{\"a}ngige Schadmechanismen unterdr{\"u}ckt wurden. Andere Schadex-positionen wie der Frostangriff ließen sich durch Zusatzmaßnahmen wie dem Eintrag von Luftporen beherrschen. Zu bem{\"a}ngeln sind vor allem die schlechte Verarbeitbarkeit kaolinitischer Metatone wie auch die f{\"u}r Puzzolane stark ausgepr{\"a}gte Carbonatisierungsneigung. Wesentliches Ergebnis der Arbeit ist, dass auch Tone, die bisher als geringwertig bez{\"u}glich des Aktivierungspotentials galten, nutzbare puzzolanische Eigenschaften entwickeln k{\"o}nnen. So kann selbst ein stark verunreinigter Illit-Ton die Qualit{\"a}t von Flugasche erreichen. Mit stei-gendem Tonmineralgehalt sowie bei Pr{\"a}sens thermisch instabilerer Tonminerale wie Mont-morillonit und Kaolinit erweitert sich das Spektrum nutzbarer Puzzolanit{\"a}ten bis hin zur hochreaktiven Metakaolin-Qualit{\"a}t. Damit lassen sich gute bis sehr gute Betoneigenschaften erzielen, sodass die Leistungsf{\"a}higkeit etablierter Kompositmaterialien erreicht wird. Somit sind die Voraussetzungen f{\"u}r eine umfangreiche Nutzung der erheblichen Tonmengen im Zement und Beton gegeben. Entsprechend k{\"o}nnen Tone einen effektiven Beitrag zu einer gesteigerten Nachhaltigkeit in der Baustoffproduktion weltweit leisten.}, subject = {Beton}, language = {de} } @incollection{Bee, author = {Bee, Julia}, title = {Collagen, Montagen, Anordnen, Umordnen - Wie mit Bildern experimentieren}, series = {Experimente lernen, Techniken tauschen, ein spekulatives Handbuch}, booktitle = {Experimente lernen, Techniken tauschen, ein spekulatives Handbuch}, publisher = {Noturne}, address = {Berlin/Weimar}, doi = {10.25643/bauhaus-universitaet.4250}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201008-42504}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {29 -- 49}, abstract = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch Das spekulative Handbuch bietet vielf{\"a}ltige Techniken f{\"u}r ein radikales Lernen und Vermitteln. Es umfasst konkrete Anleitungen, Erfahrungen und theoretische {\"U}berlegungen. Die Texte beteiligen sich an der Konzeption einer Vermittlung, die das gemeinsame Experimentieren (wieder) einf{\"u}hrt. Im Seminarraum, in Workshops, auf Festivals, in Fluren, Parks und der Stadt finden Lernen und Verlernen statt. Texte und Anleitungen u. a. zu: Filmessays, Collagen, Bank{\"u}berf{\"a}llen, der Universit{\"a}t der Toten, wildem Schreiben, konzeptuellem speed Dating, neurodiversem Lernen, Format-Denken, dem Theater der Sorge, dem Schreiblabor, dem K{\"o}rperstreik.}, subject = {Montage}, language = {de} }