@incollection{Manning, author = {Manning, Erin}, title = {10 Propositionen f{\"u}r eine radikale P{\"a}dagogik, oder: Wie den Wert neu denken?}, series = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, booktitle = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, publisher = {Nocturne}, address = {Berlin und Weimar}, doi = {10.25643/bauhaus-universitaet.4266}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201008-42660}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {121 -- 138}, abstract = {Radikale P{\"a}dagogik richtet ihre Aufmerksamkeit sowohl auf die allt{\"a}glichen Techniken p{\"a}dagogischer Praxis - Techniken zur Aktivierung eines Begegnungsraumes, Techniken, sich um die Arbeit und einander zu k{\"u}mmern, Techniken des kultur{\"u}bergreifenden Zuh{\"o}rens, Techniken, sich dem Mehr-als zuzuwenden - als auch auf Techniken zum »{\"U}berschreiten der Schwelle«. Das {\"U}berschreiten der Schwelle h{\"a}ngt mit der Art und Weise der Anpassung (accommodation) zusammen, die es erm{\"o}glicht, das Lernen in all seinen Erscheinungsformen wertzusch{\"a}tzen.}, subject = {P{\"a}dagogik}, language = {de} } @phdthesis{Kunert, author = {Kunert, Andr{\´e}}, title = {3D Interaction Techniques in Multi-User Virtual Reality : towards scalable templates and implementation patterns for cooperative interfaces}, doi = {10.25643/bauhaus-universitaet.4296}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201204-42962}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {147}, abstract = {Multi-user projection systems provide a coherent 3D interaction space for multiple co-located users that facilitates mutual awareness, full-body interaction, and the coordination of activities. The users perceive the shared scene from their respective viewpoints and can directly interact with the 3D content. This thesis reports on novel interaction patterns for collaborative 3D interaction for local and distributed user groups based on such multi-user projection environments. A particular focus of our developments lies in the provision of multiple independent interaction territories in our workspaces and their tight integration into collaborative workflows. The motivation for such multi-focus workspaces is grounded in research on social cooperation patterns, specifically in the requirement for supporting phases of loose and tight collaboration and the emergence of dedicated orking territories for private usage and public exchange. We realized independent interaction territories in the form of handheld virtual viewing windows and multiple co-located hardware displays in a joint workspace. They provide independent views of a shared virtual environment and serve as access points for the exploration and manipulation of the 3D content. Their tight integration into our workspace supports fluent transitions between individual work and joint user engagement. The different affordances of various displays in an exemplary workspace consisting of a large 3D wall, a 3D tabletop, and handheld virtual viewing windows, promote different usage scenarios, for instance for views from an egocentric perspective, miniature scene representations, close-up views, or storage and transfer areas. This work shows that this versatile workspace can make the cooperation of multiple people in joint tasks more effective, e.g. by parallelizing activities, distributing subtasks, and providing mutual support. In order to create, manage, and share virtual viewing windows, this thesis presents the interaction technique of Photoportals, a tangible interface based on the metaphor of digital photography. They serve as configurable viewing territories and enable the individual examination of scene details as well as the immediate sharing of the prepared views. Photoportals are specifically designed to complement other interface facets and provide extended functionality for scene navigation, object manipulation, and for the creation of temporal recordings of activities in the virtual scene. A further objective of this work is the realization of a coherent interaction space for direct 3D input across the independent interaction territories in multi-display setups. This requires the simultaneous consideration of user input in several potential interaction windows as well as configurable disambiguation schemes for the implicit selection of distinct interaction contexts. We generalized the required implementation structures into a high-level software pattern and demonstrated its versatility by means of various multi-context 3D interaction tools. Additionally, this work tackles specific problems related to group navigation in multiuser projection systems. Joint navigation of a collocated group of users can lead to unintentional collisions when passing narrow scene sections. In this context, we suggest various solutions that prevent individual collisions during group navigation and discuss their effect on the perceived integrity of the travel group and the 3D scene. For collaboration scenarios involving distributed user groups, we furthermore explored different configurations for joint and individual travel. Last but not least, this thesis provides detailed information and implementation templates for the realization of the proposed interaction techniques and collaborative workspaces in scenegraph-based VR systems. These contributions to the abstraction of specific interaction patterns, such as group navigation and multi-window interaction, facilitate their reuse in other virtual reality systems and their adaptation to further collaborative scenarios.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{AbbaspourGilandehMolaeeSabzietal., author = {Abbaspour-Gilandeh, Yousef and Molaee, Amir and Sabzi, Sajad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir}, title = {A Combined Method of Image Processing and Artificial Neural Network for the Identification of 13 Iranian Rice Cultivars}, series = {agronomy}, volume = {2020}, journal = {agronomy}, number = {Volume 10, Issue 1, 117}, publisher = {MDPI}, doi = {10.3390/agronomy10010117}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200123-40695}, pages = {21}, abstract = {Due to the importance of identifying crop cultivars, the advancement of accurate assessment of cultivars is considered essential. The existing methods for identifying rice cultivars are mainly time-consuming, costly, and destructive. Therefore, the development of novel methods is highly beneficial. The aim of the present research is to classify common rice cultivars in Iran based on color, morphologic, and texture properties using artificial intelligence (AI) methods. In doing so, digital images of 13 rice cultivars in Iran in three forms of paddy, brown, and white are analyzed through pre-processing and segmentation of using MATLAB. Ninety-two specificities, including 60 color, 14 morphologic, and 18 texture properties, were identified for each rice cultivar. In the next step, the normal distribution of data was evaluated, and the possibility of observing a significant difference between all specificities of cultivars was studied using variance analysis. In addition, the least significant difference (LSD) test was performed to obtain a more accurate comparison between cultivars. To reduce data dimensions and focus on the most effective components, principal component analysis (PCA) was employed. Accordingly, the accuracy of rice cultivar separations was calculated for paddy, brown rice, and white rice using discriminant analysis (DA), which was 89.2\%, 87.7\%, and 83.1\%, respectively. To identify and classify the desired cultivars, a multilayered perceptron neural network was implemented based on the most effective components. The results showed 100\% accuracy of the network in identifying and classifying all mentioned rice cultivars. Hence, it is concluded that the integrated method of image processing and pattern recognition methods, such as statistical classification and artificial neural networks, can be used for identifying and classification of rice cultivars.}, subject = {Maschinelles Lernen}, language = {en} } @article{HarirchianJadhavMohammadetal., author = {Harirchian, Ehsan and Jadhav, Kirti and Mohammad, Kifaytullah and Aghakouchaki Hosseini, Seyed Ehsan and Lahmer, Tom}, title = {A Comparative Study of MCDM Methods Integrated with Rapid Visual Seismic Vulnerability Assessment of Existing RC Structures}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 18, article 6411}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10186411}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200918-42360}, pages = {24}, abstract = {Recently, the demand for residence and usage of urban infrastructure has been increased, thereby resulting in the elevation of risk levels of human lives over natural calamities. The occupancy demand has rapidly increased the construction rate, whereas the inadequate design of structures prone to more vulnerability. Buildings constructed before the development of seismic codes have an additional susceptibility to earthquake vibrations. The structural collapse causes an economic loss as well as setbacks for human lives. An application of different theoretical methods to analyze the structural behavior is expensive and time-consuming. Therefore, introducing a rapid vulnerability assessment method to check structural performances is necessary for future developments. The process, as mentioned earlier, is known as Rapid Visual Screening (RVS). This technique has been generated to identify, inventory, and screen structures that are potentially hazardous. Sometimes, poor construction quality does not provide some of the required parameters; in this case, the RVS process turns into a tedious scenario. Hence, to tackle such a situation, multiple-criteria decision-making (MCDM) methods for the seismic vulnerability assessment opens a new gateway. The different parameters required by RVS can be taken in MCDM. MCDM evaluates multiple conflicting criteria in decision making in several fields. This paper has aimed to bridge the gap between RVS and MCDM. Furthermore, to define the correlation between these techniques, implementation of the methodologies from Indian, Turkish, and Federal Emergency Management Agency (FEMA) codes has been done. The effects of seismic vulnerability of structures have been observed and compared.}, subject = {Erdbebensicherheit}, language = {en} } @article{HarirchianKumariJadhavetal., author = {Harirchian, Ehsan and Kumari, Vandana and Jadhav, Kirti and Raj Das, Rohan and Rasulzade, Shahla and Lahmer, Tom}, title = {A Machine Learning Framework for Assessing Seismic Hazard Safety of Reinforced Concrete Buildings}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 20, article 7153}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10207153}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201022-42744}, pages = {18}, abstract = {Although averting a seismic disturbance and its physical, social, and economic disruption is practically impossible, using the advancements in computational science and numerical modeling shall equip humanity to predict its severity, understand the outcomes, and equip for post-disaster management. Many buildings exist amidst the developed metropolitan areas, which are senile and still in service. These buildings were also designed before establishing national seismic codes or without the introduction of construction regulations. In that case, risk reduction is significant for developing alternatives and designing suitable models to enhance the existing structure's performance. Such models will be able to classify risks and casualties related to possible earthquakes through emergency preparation. Thus, it is crucial to recognize structures that are susceptible to earthquake vibrations and need to be prioritized for retrofitting. However, each building's behavior under seismic actions cannot be studied through performing structural analysis, as it might be unrealistic because of the rigorous computations, long period, and substantial expenditure. Therefore, it calls for a simple, reliable, and accurate process known as Rapid Visual Screening (RVS), which serves as a primary screening platform, including an optimum number of seismic parameters and predetermined performance damage conditions for structures. In this study, the damage classification technique was studied, and the efficacy of the Machine Learning (ML) method in damage prediction via a Support Vector Machine (SVM) model was explored. The ML model is trained and tested separately on damage data from four different earthquakes, namely Ecuador, Haiti, Nepal, and South Korea. Each dataset consists of varying numbers of input data and eight performance modifiers. Based on the study and the results, the ML model using SVM classifies the given input data into the belonging classes and accomplishes the performance on hazard safety evaluation of buildings.}, subject = {Erdbeben}, language = {en} } @article{KarimimoshaverHajivalieiShokrietal., author = {Karimimoshaver, Mehrdad and Hajivaliei, Hatameh and Shokri, Manouchehr and Khalesro, Shakila and Aram, Farshid and Shamshirband, Shahaboddin}, title = {A Model for Locating Tall Buildings through a Visual Analysis Approach}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, issue 17, article 6072}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10176072}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43350}, pages = {1 -- 25}, abstract = {Tall buildings have become an integral part of cities despite all their pros and cons. Some current tall buildings have several problems because of their unsuitable location; the problems include increasing density, imposing traffic on urban thoroughfares, blocking view corridors, etc. Some of these buildings have destroyed desirable views of the city. In this research, different criteria have been chosen, such as environment, access, social-economic, land-use, and physical context. These criteria and sub-criteria are prioritized and weighted by the analytic network process (ANP) based on experts' opinions, using Super Decisions V2.8 software. On the other hand, layers corresponding to sub-criteria were made in ArcGIS 10.3 simultaneously, then via a weighted overlay (map algebra), a locating plan was created. In the next step seven hypothetical tall buildings (20 stories), in the best part of the locating plan, were considered to evaluate how much of theses hypothetical buildings would be visible (fuzzy visibility) from the street and open spaces throughout the city. These processes have been modeled by MATLAB software, and the final fuzzy visibility plan was created by ArcGIS. Fuzzy visibility results can help city managers and planners to choose which location is suitable for a tall building and how much visibility may be appropriate. The proposed model can locate tall buildings based on technical and visual criteria in the future development of the city and it can be widely used in any city as long as the criteria and weights are localized.}, subject = {Geb{\"a}ude}, language = {en} } @article{SaadatfarKhosraviHassannatajJoloudarietal., author = {Saadatfar, Hamid and Khosravi, Samiyeh and Hassannataj Joloudari, Javad and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {A New K-Nearest Neighbors Classifier for Big Data Based on Efficient Data Pruning}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 2, article 286}, publisher = {MDPI}, doi = {10.3390/math8020286}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40996}, pages = {12}, abstract = {The K-nearest neighbors (KNN) machine learning algorithm is a well-known non-parametric classification method. However, like other traditional data mining methods, applying it on big data comes with computational challenges. Indeed, KNN determines the class of a new sample based on the class of its nearest neighbors; however, identifying the neighbors in a large amount of data imposes a large computational cost so that it is no longer applicable by a single computing machine. One of the proposed techniques to make classification methods applicable on large datasets is pruning. LC-KNN is an improved KNN method which first clusters the data into some smaller partitions using the K-means clustering method; and then applies the KNN for each new sample on the partition which its center is the nearest one. However, because the clusters have different shapes and densities, selection of the appropriate cluster is a challenge. In this paper, an approach has been proposed to improve the pruning phase of the LC-KNN method by taking into account these factors. The proposed approach helps to choose a more appropriate cluster of data for looking for the neighbors, thus, increasing the classification accuracy. The performance of the proposed approach is evaluated on different real datasets. The experimental results show the effectiveness of the proposed approach and its higher classification accuracy and lower time cost in comparison to other recent relevant methods.}, subject = {Maschinelles Lernen}, language = {en} } @article{SchirmerOsburg, author = {Schirmer, Ulrike and Osburg, Andrea}, title = {A new method for the quantification of adsorbed styrene acrylate copolymer particles on cementitious surfaces: a critical comparative study}, series = {SN Applied Sciences}, volume = {2020}, journal = {SN Applied Sciences}, number = {Volume 2, article 2061}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s42452-020-03825-5}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44729}, pages = {1 -- 11}, abstract = {The amount of adsorbed styrene acrylate copolymer (SA) particles on cementitious surfaces at the early stage of hydration was quantitatively determined using three different methodological approaches: the depletion method, the visible spectrophotometry (VIS) and the thermo-gravimetry coupled with mass spectrometry (TG-MS). Considering the advantages and disadvantages of each method, including the respectively required sample preparation, the results for four polymer-modified cement pastes, varying in polymer content and cement fineness, were evaluated. To some extent, significant discrepancies in the adsorption degrees were observed. There is a tendency that significantly lower amounts of adsorbed polymers were identified using TG-MS compared to values determined with the depletion method. Spectrophotometrically generated values were ​​lying in between these extremes. This tendency was found for three of the four cement pastes examined and is originated in sample preparation and methodical limitations. The main influencing factor is the falsification of the polymer concentration in the liquid phase during centrifugation. Interactions in the interface between sediment and supernatant are the cause. The newly developed method, using TG-MS for the quantification of SA particles, proved to be suitable for dealing with these revealed issues. Here, instead of the fluid phase, the sediment is examined with regard to the polymer content, on which the influence of centrifugation is considerably lower.}, subject = {Zement}, language = {en} } @phdthesis{Winkel, author = {Winkel, Benjamin}, title = {A three-dimensional model of skeletal muscle for physiological, pathological and experimental mechanical simulations}, doi = {10.25643/bauhaus-universitaet.4300}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201211-43002}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {In recent decades, a multitude of concepts and models were developed to understand, assess and predict muscular mechanics in the context of physiological and pathological events. Most of these models are highly specialized and designed to selectively address fields in, e.g., medicine, sports science, forensics, product design or CGI; their data are often not transferable to other ranges of application. A single universal model, which covers the details of biochemical and neural processes, as well as the development of internal and external force and motion patterns and appearance could not be practical with regard to the diversity of the questions to be investigated and the task to find answers efficiently. With reasonable limitations though, a generalized approach is feasible. The objective of the work at hand was to develop a model for muscle simulation which covers the phenomenological aspects, and thus is universally applicable in domains where up until now specialized models were utilized. This includes investigations on active and passive motion, structural interaction of muscles within the body and with external elements, for example in crash scenarios, but also research topics like the verification of in vivo experiments and parameter identification. For this purpose, elements for the simulation of incompressible deformations were studied, adapted and implemented into the finite element code SLang. Various anisotropic, visco-elastic muscle models were developed or enhanced. The applicability was demonstrated on the base of several examples, and a general base for the implementation of further material models was developed and elaborated.}, subject = {Biomechanik}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} } @phdthesis{Oucif, author = {Oucif, Chahmi}, title = {Analytical Modeling of Self-Healing and Super Healing in Cementitious Materials}, doi = {10.25643/bauhaus-universitaet.4229}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200831-42296}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {208}, abstract = {Self-healing materials have recently become more popular due to their capability to autonomously and autogenously repair the damage in cementitious materials. The concept of self-healing gives the damaged material the ability to recover its stiffness. This gives a difference in comparing with a material that is not subjected to healing. Once this material is damaged, it cannot sustain loading due to the stiffness degradation. Numerical modeling of self-healing materials is still in its infancy. Multiple experimental researches were conducted in literature to describe the behavior of self-healing of cementitious materials. However, few numerical investigations were undertaken. The thesis presents an analytical framework of self-healing and super healing materials based on continuum damage-healing mechanics. Through this framework, we aim to describe the recovery and strengthening of material stiffness and strength. A simple damage healing law is proposed and applied on concrete material. The proposed damage-healing law is based on a new time-dependent healing variable. The damage-healing model is applied on isotropic concrete material at the macroscale under tensile load. Both autonomous and autogenous self-healing mechanisms are simulated under different loading conditions. These two mechanisms are denoted in the present work by coupled and uncoupled self-healing mechanisms, respectively. We assume in the coupled self-healing that the healing occurs at the same time with damage evolution, while we assume in the uncoupled self-healing that the healing occurs when the material is deformed and subjected to a rest period (damage is constant). In order to describe both coupled and uncoupled healing mechanisms, a one-dimensional element is subjected to different types of loading history. In the same context, derivation of nonlinear self-healing theory is given, and comparison of linear and nonlinear damage-healing models is carried out using both coupled and uncoupled self-healing mechanisms. The nonlinear healing theory includes generalized nonlinear and quadratic healing models. The healing efficiency is studied by varying the values of the healing rest period and the parameter describing the material characteristics. In addition, theoretical formulation of different self-healing variables is presented for both isotropic and anisotropic maerials. The healing variables are defined based on the recovery in elastic modulus, shear modulus, Poisson's ratio, and bulk modulus. The evolution of the healing variable calculated based on cross-section as function of the healing variable calculated based on elastic stiffness is presented in both hypotheses of elastic strain equivalence and elastic energy equivalence. The components of the fourth-rank healing tensor are also obtained in the case of isotropic elasticity, plane stress and plane strain. Recent research revealed that self-healing presents a crucial solution also for the strengthening of the materials. This new concept has been termed ``Super Healing``. Once the stiffness of the material is recovered, further healing can result as a strengthening material. In the present thesis, new theory of super healing materials is defined in isotropic and anisotropic cases using sound mathematical and mechanical principles which are applied in linear and nonlinear super healing theories. Additionally, the link of the proposed theory with the theory of undamageable materials is outlined. In order to describe the super healing efficiency in linear and nonlinear theories, the ratio of effective stress to nominal stress is calculated as function of the super healing variable. In addition, the hypotheses of elastic strain and elastic energy equivalence are applied. In the same context, new super healing matrix in plane strain is proposed based on continuum damage-healing mechanics. In the present work, we also focus on numerical modeling of impact behavior of reinforced concrete slabs using the commercial finite element package Abaqus/Explicit. Plain and reinforced concrete slabs of unconfined compressive strength 41 MPa are simulated under impact of ogive-nosed hard projectile. The constitutive material modeling of the concrete and steel reinforcement bars is performed using the Johnson-Holmquist-2 damage and the Johnson-Cook plasticity material models, respectively. Damage diameters and residual velocities obtained by the numerical model are compared with the experimental results and effect of steel reinforcement and projectile diameter is studied.}, subject = {Schaden}, language = {en} } @phdthesis{Jentzsch, author = {Jentzsch, Sina}, title = {Appell der Dinge. K{\"u}nstlerische Zuwendung zu den Dingen in den 1960er-Jahren}, doi = {10.25643/bauhaus-universitaet.4112}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200323-41129}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {261}, abstract = {Diese Dissertation besch{\"a}ftigt sich mit Kunstwerken, die das allt{\"a}gliche Ding in den Blick nehmen. N{\"a}hrboden dieser Kunstform sind die soziokulturellen Entwicklungen des 20. Jahrhunderts, mit denen wesentliche Ver{\"a}nderungen hinsichtlich des Verh{\"a}ltnisses von Mensch und Ding einhergingen. Daraus resultierte eine allgemeine k{\"u}nstlerische Zuwendung zu den Dingen und eine einzigartige Kulmination aus verschiedenartigen Auseinandersetzungen mit ihnen als kunstf{\"a}hige Gegenst{\"a}nde, {\"u}ber die sich die neue Dingwelt erschlossen wurde und deren Kunstwerke einen Spiegel dieser Entwicklungen darstellen. Die Dissertation stellt ebenfalls die Dinge selbst in den Fokus. Vier Aspekte von Dingen (Materialit{\"a}t, Funktionalit{\"a}t, Repr{\"a}sentationalit{\"a}t und Relationalit{\"a}t) werden gesondert ins Auge gefasst und in den theoretischen Diskurs des 20. Jahrhunderts eingeordnet, um sie als Teil der gelebten Realit{\"a}t besser zu verstehen, von der sich der {\"a}sthetische Blick nicht trennen l{\"a}sst. Anhand der k{\"u}nstlerischen Positionen von Robert Rauschenberg, Christo und Jeanne-Claude, Daniel Spoerri und Arman sowie Claes Oldenburg werden die verschiedenen Aspekte der Dinge n{\"a}her betrachtet und analysiert, wie diese speziell in den Kunstwerken thematisiert werden und welche Relevanz sie f{\"u}r deren Rezeptionserfahrung haben. Die Korrelation dieser beiden Ebenen - die Dinge als konstitutiver Bestandteil im sozialen Raum und die Dinge als Elemente in Kunstwerken -, die im Fokus der vorliegenden Untersuchung steht, erm{\"o}glicht es, die k{\"u}nstlerische Zuwendung zu den Dingen in den 1960er-Jahren neu einzuordnen. Dar{\"u}ber hinaus wird dadurch ein differenziertes Bild von der Kunst dieser Zeit sowie den Dingen in der Kunst im Allgemeinen gezeichnet.}, subject = {Ding}, language = {de} } @article{HarirchianLahmerKumarietal., author = {Harirchian, Ehsan and Lahmer, Tom and Kumari, Vandana and Jadhav, Kirti}, title = {Application of Support Vector Machine Modeling for the Rapid Seismic Hazard Safety Evaluation of Existing Buildings}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {volume 13, issue 13, 3340}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13133340}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200707-41915}, pages = {15}, abstract = {The economic losses from earthquakes tend to hit the national economy considerably; therefore, models that are capable of estimating the vulnerability and losses of future earthquakes are highly consequential for emergency planners with the purpose of risk mitigation. This demands a mass prioritization filtering of structures to identify vulnerable buildings for retrofitting purposes. The application of advanced structural analysis on each building to study the earthquake response is impractical due to complex calculations, long computational time, and exorbitant cost. This exhibits the need for a fast, reliable, and rapid method, commonly known as Rapid Visual Screening (RVS). The method serves as a preliminary screening platform, using an optimum number of seismic parameters of the structure and predefined output damage states. In this study, the efficacy of the Machine Learning (ML) application in damage prediction through a Support Vector Machine (SVM) model as the damage classification technique has been investigated. The developed model was trained and examined based on damage data from the 1999 D{\"u}zce Earthquake in Turkey, where the building's data consists of 22 performance modifiers that have been implemented with supervised machine learning.}, subject = {Erdbeben}, language = {en} } @phdthesis{RadmardRahmani, author = {Radmard Rahmani, Hamid}, title = {Artificial Intelligence Approach for Seismic Control of Structures}, doi = {10.25643/bauhaus-universitaet.4135}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200417-41359}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {Abstract In the first part of this research, the utilization of tuned mass dampers in the vibration control of tall buildings during earthquake excitations is studied. The main issues such as optimizing the parameters of the dampers and studying the effects of frequency content of the target earthquakes are addressed. Abstract The non-dominated sorting genetic algorithm method is improved by upgrading generic operators, and is utilized to develop a framework for determining the optimum placement and parameters of dampers in tall buildings. A case study is presented in which the optimal placement and properties of dampers are determined for a model of a tall building under different earthquake excitations through computer simulations. Abstract In the second part, a novel framework for the brain learning-based intelligent seismic control of smart structures is developed. In this approach, a deep neural network learns how to improve structural responses during earthquake excitations using feedback control. Abstract Reinforcement learning method is improved and utilized to develop a framework for training the deep neural network as an intelligent controller. The efficiency of the developed framework is examined through two case studies including a single-degree-of-freedom system and a high-rise building under different earthquake excitation records. Abstract The results show that the controller gradually develops an optimum control policy to reduce the vibrations of a structure under an earthquake excitation through a cyclical process of actions and observations. Abstract It is shown that the controller efficiently improves the structural responses under new earthquake excitations for which it was not trained. Moreover, it is shown that the controller has a stable performance under uncertainties.}, subject = {Erdbeben}, language = {en} } @phdthesis{Mueller, author = {M{\"u}ller, Jan Philip}, title = {Audiovision und Synchronisation. Sehen, H{\"o}ren und Gleichzeitigkeit in Anordnungen vom Observatorium {\"u}ber psychologische Experimente bis zum Tonfilm im 19. und 20. Jahrhundert}, doi = {10.25643/bauhaus-universitaet.4290}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201123-42906}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {516}, abstract = {In dieser Untersuchung wird eine Geschichte von Problemen der Gleichzeitigkeit zwischen Sehen und H{\"o}ren, beziehungsweise der Synchronit{\"a}t von Bild und Ton, bis zur Entstehung des Tonfilms rekonstruiert. Dabei werden Linien gezogen zwischen diskursiven Konfigurationen und medialen Anordnungen, in denen das Verh{\"a}ltnis von Sehen und H{\"o}ren oder Bild und Ton als zeitliches erscheint - in denen Sehen und H{\"o}ren, Bild und Ton zwischen Mannigfaltigkeit und Einheit verschmelzen, auseinanderfallen, interagieren, redundant oder spezifisch werden, einander erg{\"a}nzen, dominieren, verfehlen verdr{\"a}ngen, aufspalten… Tonfilm ist in der Kinogeschichte eben nicht nur eine Erg{\"a}nzung. Vielmehr {\"a}hnelt er dem Auftritt eines Gespensts, das das Wissen und die Techniken der Trennung der Sinne schon l{\"a}nger, vielleicht schon immer begleitet hatte. Das Auftreten des Tonfilms ist auch {\"u}berhaupt fr{\"u}her Anlass eines weitreichenden Diskurses dar{\"u}ber, was Audiovision eigentlich sein k{\"o}nnte und sollte. Noch allgemeiner k{\"o}nnte auch davon gesprochen werden, dass Tonfilm eins der ersten großen Projekte der Konvergenz technischer Medien ist, die heute - besonders angesichts des Computers - als entscheidender Aspekt von Mediengeschichte erscheint. Die Linien der Probleme von Gleichzeitigkeit/Ungleichzeitigkeit an den Schnittstellen von Wissen, Technik und {\"A}sthetik werden insbesondere durch drei Felder hindurch nachgezeichnet: 1) Die Geschichte von Intermodalit{\"a}t in Bezug auf die Frage nach Gleichzeitigkeit und Ungleichzeitigkeit als Problem und Gegenstand von Wissenschaft seit dem 19. Jahrhundert, vornehmlich in zwei Gebieten: Als Fehlerquelle im astronomischen Observatorium bei der Messung, Feststellung und Vereinheitlichung von Raum und Zeit, die auf individuelle Abweichungen Intermodaler Wahrnehmung verweist und als Problem der „pers{\"o}nlichen Gleichung" weit {\"u}ber die Astronomie hinaus Karriere macht. Als heiße Zone wahrnehmungspsychologischer Experimente und ihrer Apparate seit der Mitte des 19. Jahrhunderts, die mit dem Konzept der „Komplikation" Fragen nach einer Synthese der Sinneswahrnehmungen und damit letztlich nach der Selbstgegenwart des Menschen stellt. 2) Eine Technikgeschichte des Problems auditive und visuelle Zeitmedien - wie Phonograph und Film - zu koppeln, zu synchronisieren. Darin eskalieren zwei zeitkritische Relationen: Einerseits zwischen diskreter, intermittierender Bewegung des Films und stetiger, kontinuierlicher Bewegung des Phonographen, andererseits in Bezug darauf, an welcher Stelle - wo und wann - audiovisuelle Gegenwart des Kinos ensteht; oder auch verfehlt wird. 3) Eine Geschichte von Filmtheorie und -{\"a}sthetik, in der sich mit der Durchsetzung des Tonfilms um 1930 die Frage stellt, was dieses neue Medium sei und was damit zu tun. Diese Verhandlungen spannen sich zwischen dem formulierten Ziel einer spezifischen Illusion oder Pr{\"a}senz von Tonfilm durch Synchronit{\"a}t auf der einen Seite und der sich aus dem Verdacht des Betrugs durch Synchronit{\"a}t ergebenden Forderung nach „Asynchronismus" als kritischer Methode auf der anderen Seite auf. Ausgehend von der These, dass im 19. Jahrhundert die Sinne aufgeteilt werden, dann wird in diesen Anordnungen an irgendeiner Stelle Heterogenes gleichzeitig passieren. An welcher Stelle? Und was bedeuten diese (Un-)Gleichzeitigkeiten? Was dabei - sehr allgemein gesprochen - auf dem Spiel steht, sind M{\"o}glichkeiten einer audiovisuell geteilten - getrennten oder gemeinsamen - Welt und Gegenwart.}, subject = {Tonfilm}, language = {de} } @article{BecherVoelkerRodehorstetal., author = {Becher, Lia and V{\"o}lker, Conrad and Rodehorst, Volker and Kuhne, Michael}, title = {Background-oriented schlieren technique for two-dimensional visualization of convective indoor air flows}, series = {Optics and Lasers in Engineering}, volume = {2020}, journal = {Optics and Lasers in Engineering}, number = {Volume 134, article 106282}, doi = {https://doi.org/10.1016/j.optlaseng.2020.106282}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220810-46972}, pages = {9}, abstract = {This article focuses on further developments of the background-oriented schlieren (BOS) technique to visualize convective indoor air flow, which is usually defined by very small density gradients. Since the light rays deflect when passing through fluids with different densities, BOS can detect the resulting refractive index gradients as integration along a line of sight. In this paper, the BOS technique is used to yield a two-dimensional visualization of small density gradients. The novelty of the described method is the implementation of a highly sensitive BOS setup to visualize the ascending thermal plume from a heated thermal manikin with temperature differences of minimum 1 K. To guarantee steady boundary conditions, the thermal manikin was seated in a climate laboratory. For the experimental investigations, a high-resolution DLSR camera was used capturing a large field of view with sufficient detail accuracy. Several parameters such as various backgrounds, focal lengths, room air temperatures, and distances between the object of investigation, camera, and structured background were tested to find the most suitable parameters to visualize convective indoor air flow. Besides these measurements, this paper presents the analyzing method using cross-correlation algorithms and finally the results of visualizing the convective indoor air flow with BOS. The highly sensitive BOS setup presented in this article complements the commonly used invasive methods that highly influence weak air flows.}, subject = {Raumklima}, language = {en} } @book{BieberBuskeElertetal., author = {Bieber, Constanze and Buske, Johann and Elert, Robert and G{\"o}bel, Hannah and Gripp, David and Hempel, Anne-Mareike and Hummitzsch, Ruben and Kamigashima Kohmann, Laelia and Klocke, Johanna and Mann, Michael and Mitzenheim, Robert and Oehler, Louis and Pfeffer, Edna and Pfeiffer, Julia and Pieper, Kai and Schwarz, Philipp and Zeyse, Samuel}, title = {Barf{\"u}ßerkirche Erfurt: Weiterbauen an der Ruine}, editor = {Angermann, Kirsten and Engelmann, Iris and Horn, Karsten}, address = {Weimar}, doi = {10.25643/bauhaus-universitaet.4203}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200727-42037}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {119}, abstract = {Die Ruine der Barf{\"u}ßerkirche in Erfurt stellt eine der letzten Erinnerungen an die Zerst{\"o}rungen der Stadt im Zweiten Weltkrieg dar. Sie wird bis heute tempor{\"a}r und saisonal kulturell genutzt. Im Rahmen eines Studienprojektes im Sommersemester 2019 wurden an der Bauhaus-Universit{\"a}t Weimar, betreut durch die Professur Denkmalpflege und Baugeschichte und unterst{\"u}tzt vom Initiativkreis Barf{\"u}ßerkirche, Nutzungskonzepte f{\"u}r ein Museum f{\"u}r Mittelalterkunst und f{\"u}r einen Tagungsort untersucht. Der vorliegende Band dokumentiert die 14 studentischen Entw{\"u}rfe, die f{\"u}r ein Weiterbauen an der Barf{\"u}ßerkirche entstanden sind.}, subject = {Architektur}, language = {de} } @phdthesis{Gretzki, author = {Gretzki, Allan}, title = {BundeskunstHall of Fame - Realisierungsprozess eines Graffiti Ausstellungsprojekts im musealen Kontext}, doi = {10.25643/bauhaus-universitaet.4215}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200818-42158}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {228}, abstract = {Realisierungsprozess eines Graffiti Ausstellungsprojekts im musealen Kontext}, subject = {Graffito}, language = {de} } @phdthesis{Truemer, author = {Tr{\"u}mer, Andr{\´e}}, title = {Calcinierte Tone als Puzzolane der Zukunft - Von den Rohstoffen bis zur Wirkung im Beton}, isbn = {978-3-00-065011-6}, doi = {10.25643/bauhaus-universitaet.4096}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200214-40968}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {222}, abstract = {Vor dem Hintergrund einer stetig wachsenden Nachfrage an Beton wie auch ambitionierter Reduktionsziele beim in der Zementproduktion anfallenden CO2 gelten calcinierte Tone als derzeit aussichtsreichste technische Neuerung im Bereich nachhaltiger Bindemittelkonzepte. Unter Ausnutzung ihrer Puzzolanit{\"a}t soll ein erheblicher Teil der Klinkerkomponente im Zement ersetzt werden, wobei der zu ihrer Aktivierung notwendige Energiebedarf vergleichsweise niedrig ist. Wesentliche Vorteile der Tone sind ihre weltweit nahezu unbegrenzte Verf{\"u}gbarkeit sowie der {\"a}ußerst geringe rohstoffbedingte CO2-Ausstoß w{\"a}hrend der Calcinierung. Schwierigkeiten auf dem Weg der Umsetzung bestehen allerdings in der Vielseitigkeit des Systems, welches durch eine hohe Variet{\"a}t der Rohtone und des daraus folgenden thermischen Verhaltens gekennzeichnet ist. Entsprechend schwierig ist die {\"U}bertragbarkeit von Erfahrungen mit bereits etablierten calcinierten Tonen wie dem Metakaolin, der sich durch hohe Reinheit, einen aufwendigen Aufbereitungsprozess und eine entsprechend hohe Reaktivit{\"a}t auszeichnet. Ziel der Arbeit ist es daher, den bereits erlangten Kenntnisstand auf andere, wirtschaftlich relevante Tone zu erweitern und deren Eignung f{\"u}r die Anwendung im Beton herauszuarbeiten. In einem mehrstufigen Arbeitsprogramm wurde untersucht, inwieweit großtechnisch nutzbare Tone aktivierbar sind und welche Eigenschaften sich daraus f{\"u}r Zement und Beton ergeben. Die dabei festgestellte Reihenfolge Kaolinit > Montmorillonit > Illit beschreibt sowohl die Reaktivit{\"a}t der Brennprodukte als auch umgekehrt die H{\"o}he der optimalen Calciniertemperatur. Auch wandelt sich der Charakter der entstandenen Metaphasen in dieser Abfolge von r{\"o}ntgenamorph und hochreaktiv zu glasig und reaktionstr{\"a}ge. Trotz dieser Einordnung konnte selbst mit dem Illit eine mit Steinkohlenflugasche vergleichbare Puzzolanit{\"a}t festgestellt werden. Dies best{\"a}tigte sich anschließend in Parameterversuchen, bei denen die Einfl{\"u}sse von Rohstoffqualit{\"a}t, Calcinierung, Aufbereitung und Zement hinsichtlich der Reaktivit{\"a}tsausbeute bewertet wurden. Die Bandbreite der erzielbaren Qualit{\"a}ten ist dabei immens und gipfelt nicht zuletzt in stark unterschiedlichen Wirkungen auf die Festbetoneigenschaften. Hier machte sich vor allem die f{\"u}r Puzzolane typische Porenverfeinerung bemerkbar, sodass viele von Transportvorg{\"a}ngen abh{\"a}ngige Schadmechanismen unterdr{\"u}ckt wurden. Andere Schadex-positionen wie der Frostangriff ließen sich durch Zusatzmaßnahmen wie dem Eintrag von Luftporen beherrschen. Zu bem{\"a}ngeln sind vor allem die schlechte Verarbeitbarkeit kaolinitischer Metatone wie auch die f{\"u}r Puzzolane stark ausgepr{\"a}gte Carbonatisierungsneigung. Wesentliches Ergebnis der Arbeit ist, dass auch Tone, die bisher als geringwertig bez{\"u}glich des Aktivierungspotentials galten, nutzbare puzzolanische Eigenschaften entwickeln k{\"o}nnen. So kann selbst ein stark verunreinigter Illit-Ton die Qualit{\"a}t von Flugasche erreichen. Mit stei-gendem Tonmineralgehalt sowie bei Pr{\"a}sens thermisch instabilerer Tonminerale wie Mont-morillonit und Kaolinit erweitert sich das Spektrum nutzbarer Puzzolanit{\"a}ten bis hin zur hochreaktiven Metakaolin-Qualit{\"a}t. Damit lassen sich gute bis sehr gute Betoneigenschaften erzielen, sodass die Leistungsf{\"a}higkeit etablierter Kompositmaterialien erreicht wird. Somit sind die Voraussetzungen f{\"u}r eine umfangreiche Nutzung der erheblichen Tonmengen im Zement und Beton gegeben. Entsprechend k{\"o}nnen Tone einen effektiven Beitrag zu einer gesteigerten Nachhaltigkeit in der Baustoffproduktion weltweit leisten.}, subject = {Beton}, language = {de} } @incollection{Bee, author = {Bee, Julia}, title = {Collagen, Montagen, Anordnen, Umordnen - Wie mit Bildern experimentieren}, series = {Experimente lernen, Techniken tauschen, ein spekulatives Handbuch}, booktitle = {Experimente lernen, Techniken tauschen, ein spekulatives Handbuch}, publisher = {Noturne}, address = {Berlin/Weimar}, doi = {10.25643/bauhaus-universitaet.4250}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201008-42504}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {29 -- 49}, abstract = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch Das spekulative Handbuch bietet vielf{\"a}ltige Techniken f{\"u}r ein radikales Lernen und Vermitteln. Es umfasst konkrete Anleitungen, Erfahrungen und theoretische {\"U}berlegungen. Die Texte beteiligen sich an der Konzeption einer Vermittlung, die das gemeinsame Experimentieren (wieder) einf{\"u}hrt. Im Seminarraum, in Workshops, auf Festivals, in Fluren, Parks und der Stadt finden Lernen und Verlernen statt. Texte und Anleitungen u. a. zu: Filmessays, Collagen, Bank{\"u}berf{\"a}llen, der Universit{\"a}t der Toten, wildem Schreiben, konzeptuellem speed Dating, neurodiversem Lernen, Format-Denken, dem Theater der Sorge, dem Schreiblabor, dem K{\"o}rperstreik.}, subject = {Montage}, language = {de} } @article{ReichertOlneyLahmer, author = {Reichert, Ina and Olney, Peter and Lahmer, Tom}, title = {Combined approach for optimal sensor placement and experimental verification in the context of tower-like structures}, series = {Journal of Civil Structural Health Monitoring}, volume = {2021}, journal = {Journal of Civil Structural Health Monitoring}, number = {volume 11}, publisher = {Heidelberg}, address = {Springer}, doi = {10.1007/s13349-020-00448-7}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44701}, pages = {223 -- 234}, abstract = {When it comes to monitoring of huge structures, main issues are limited time, high costs and how to deal with the big amount of data. In order to reduce and manage them, respectively, methods from the field of optimal design of experiments are useful and supportive. Having optimal experimental designs at hand before conducting any measurements is leading to a highly informative measurement concept, where the sensor positions are optimized according to minimal errors in the structures' models. For the reduction of computational time a combined approach using Fisher Information Matrix and mean-squared error in a two-step procedure is proposed under the consideration of different error types. The error descriptions contain random/aleatoric and systematic/epistemic portions. Applying this combined approach on a finite element model using artificial acceleration time measurement data with artificially added errors leads to the optimized sensor positions. These findings are compared to results from laboratory experiments on the modeled structure, which is a tower-like structure represented by a hollow pipe as the cantilever beam. Conclusively, the combined approach is leading to a sound experimental design that leads to a good estimate of the structure's behavior and model parameters without the need of preliminary measurements for model updating.}, subject = {Strukturmechanik}, language = {en} } @article{Haefner, author = {H{\"a}fner, Lukas}, title = {Common Ground. Kommentar zu Lisa Vollmer und Boris Michel „Wohnen in der Klimakrise. Die Wohnungsfrage als {\"o}kologische Frage"}, series = {sub\urban. zeitschrift f{\"u}r kritische stadtforschung}, volume = {2020}, journal = {sub\urban. zeitschrift f{\"u}r kritische stadtforschung}, number = {Band 8, Heft 1/2}, publisher = {Sub\urban e.V.}, address = {Leipzig}, doi = {10.36900/suburban.v8i1/2.565}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200507-41655}, pages = {177 -- 182}, abstract = {Die im Jahr 2020 in Deutschland praktizierte Siedlungs- und Wohnungspolitik erh{\"a}lt in Anbetracht ihrer Auswirkungen auf die soziale und {\"o}kologische Lage einen bitteren Beigeschmack. Arm und Reich triften weiter auseinander und einer zielgerichteten {\"o}kologischen Transformation der Art und Weise, wie Stadtentwicklung und Wohnungspolitik gestaltet werden,stehen noch immer historisch und systemisch bedingte Pfadabh{\"a}ngigkeiten im Weg. Diese werden nur durch eine integrierte Betrachtung sozialer und {\"o}konomischer Aspekte sichtbar und deuten auf eine der urspr{\"u}nglichen Fragen linker Gesellschaftsforschung hin: Die Auseinandersetzung mit dem Verh{\"a}ltnis von Eigentum und Gerechtigkeit. Im Ergebnis stehen drei wesentliche Befunde: Der Diskurs zum Schutz des Klimas und der Biodiversit{\"a}t ber{\"u}hrt direkt die Parameter Dichte, Nutzungsmischung und Fl{\"a}cheninanspruchnahme; zweitens steigt letztere relativ mit erh{\"o}htem, individuell verf{\"u}gbaren Kapital und insbesondere im selbstgenutztem Eigentum gegen{\"u}ber Mietwohnungen; und drittens w{\"a}chst der Eigentumsanteil mit fortschreitender Finanzialisierung des Wohnungsmarktes, sodass das Risiko sozialer und {\"o}kologischer Krisen sich versch{\"a}rft.}, subject = {Umweltgerechtigkeit}, language = {de} } @article{BandJanizadehChandraPaletal., author = {Band, Shahab S. and Janizadeh, Saeid and Chandra Pal, Subodh and Chowdhuri, Indrajit and Siabi, Zhaleh and Norouzi, Akbar and Melesse, Assefa M. and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Comparative Analysis of Artificial Intelligence Models for Accurate Estimation of Groundwater Nitrate Concentration}, series = {Sensors}, volume = {2020}, journal = {Sensors}, number = {Volume 20, issue 20, article 5763}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/s20205763}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43364}, pages = {1 -- 23}, abstract = {Prediction of the groundwater nitrate concentration is of utmost importance for pollution control and water resource management. This research aims to model the spatial groundwater nitrate concentration in the Marvdasht watershed, Iran, based on several artificial intelligence methods of support vector machine (SVM), Cubist, random forest (RF), and Bayesian artificial neural network (Baysia-ANN) machine learning models. For this purpose, 11 independent variables affecting groundwater nitrate changes include elevation, slope, plan curvature, profile curvature, rainfall, piezometric depth, distance from the river, distance from residential, Sodium (Na), Potassium (K), and topographic wetness index (TWI) in the study area were prepared. Nitrate levels were also measured in 67 wells and used as a dependent variable for modeling. Data were divided into two categories of training (70\%) and testing (30\%) for modeling. The evaluation criteria coefficient of determination (R2), mean absolute error (MAE), root mean square error (RMSE), and Nash-Sutcliffe efficiency (NSE) were used to evaluate the performance of the models used. The results of modeling the susceptibility of groundwater nitrate concentration showed that the RF (R2 = 0.89, RMSE = 4.24, NSE = 0.87) model is better than the other Cubist (R2 = 0.87, RMSE = 5.18, NSE = 0.81), SVM (R2 = 0.74, RMSE = 6.07, NSE = 0.74), Bayesian-ANN (R2 = 0.79, RMSE = 5.91, NSE = 0.75) models. The results of groundwater nitrate concentration zoning in the study area showed that the northern parts of the case study have the highest amount of nitrate, which is higher in these agricultural areas than in other areas. The most important cause of nitrate pollution in these areas is agriculture activities and the use of groundwater to irrigate these crops and the wells close to agricultural areas, which has led to the indiscriminate use of chemical fertilizers by irrigation or rainwater of these fertilizers is washed and penetrates groundwater and pollutes the aquifer.}, subject = {Grundwasser}, language = {en} } @article{MosaviShamshirbandEsmaeilbeikietal., author = {Mosavi, Amir and Shamshirband, Shahaboddin and Esmaeilbeiki, Fatemeh and Zarehaghi, Davoud and Neyshabouri, Mohammadreza and Samadianfard, Saeed and Ghorbani, Mohammad Ali and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Comparative analysis of hybrid models of firefly optimization algorithm with support vector machines and multilayer perceptron for predicting soil temperature at different depths}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, Issue 1}, doi = {10.1080/19942060.2020.1788644}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200911-42347}, pages = {939 -- 953}, abstract = {This research aims to model soil temperature (ST) using machine learning models of multilayer perceptron (MLP) algorithm and support vector machine (SVM) in hybrid form with the Firefly optimization algorithm, i.e. MLP-FFA and SVM-FFA. In the current study, measured ST and meteorological parameters of Tabriz and Ahar weather stations in a period of 2013-2015 are used for training and testing of the studied models with one and two days as a delay. To ascertain conclusive results for validation of the proposed hybrid models, the error metrics are benchmarked in an independent testing period. Moreover, Taylor diagrams utilized for that purpose. Obtained results showed that, in a case of one day delay, except in predicting ST at 5 cm below the soil surface (ST5cm) at Tabriz station, MLP-FFA produced superior results compared with MLP, SVM, and SVM-FFA models. However, for two days delay, MLP-FFA indicated increased accuracy in predicting ST5cm and ST 20cm of Tabriz station and ST10cm of Ahar station in comparison with SVM-FFA. Additionally, for all of the prescribed models, the performance of the MLP-FFA and SVM-FFA hybrid models in the testing phase was found to be meaningfully superior to the classical MLP and SVM models.}, subject = {Bodentemperatur}, language = {en} } @article{KavrakovKareemMorgenthal, author = {Kavrakov, Igor and Kareem, Ahsan and Morgenthal, Guido}, title = {Comparison Metrics for Time-histories: Application to Bridge Aerodynamics}, doi = {10.25643/bauhaus-universitaet.4186}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200625-41863}, pages = {28}, abstract = {Wind effects can be critical for the design of lifelines such as long-span bridges. The existence of a significant number of aerodynamic force models, used to assess the performance of bridges, poses an important question regarding their comparison and validation. This study utilizes a unified set of metrics for a quantitative comparison of time-histories in bridge aerodynamics with a host of characteristics. Accordingly, nine comparison metrics are included to quantify the discrepancies in local and global signal features such as phase, time-varying frequency and magnitude content, probability density, nonstationarity and nonlinearity. Among these, seven metrics available in the literature are introduced after recasting them for time-histories associated with bridge aerodynamics. Two additional metrics are established to overcome the shortcomings of the existing metrics. The performance of the comparison metrics is first assessed using generic signals with prescribed signal features. Subsequently, the metrics are applied to a practical example from bridge aerodynamics to quantify the discrepancies in the aerodynamic forces and response based on numerical and semi-analytical aerodynamic models. In this context, it is demonstrated how a discussion based on the set of comparison metrics presented here can aid a model evaluation by offering deeper insight. The outcome of the study is intended to provide a framework for quantitative comparison and validation of aerodynamic models based on the underlying physics of fluid-structure interaction. Immediate further applications are expected for the comparison of time-histories that are simulated by data-driven approaches.}, subject = {Ingenieurwissenschaften}, language = {en} } @article{WellbrockArangoKureBuschow, author = {Wellbrock, Christian-Mathias and Arango Kure, Maria and Buschow, Christopher}, title = {Competition and Media Performance: A Cross-National Analysis of Corporate Goals of Media Companies in 12 Countries}, series = {International Journal of Communication}, volume = {2020}, journal = {International Journal of Communication}, number = {Vol 14 (2020)}, publisher = {USC, University of Southern California}, address = {Annenberg, California}, doi = {10.25643/bauhaus-universitaet.4317}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201221-43175}, pages = {6154 -- 6181}, abstract = {Despite digitization and platformization, mass media and established media companies still play a crucial role in the provision of journalistic content in democratic societies. Competition is one key driver of (media) company behavior and is considered to have an impact on the media's performance. However, theory and empirical research are ambiguous about the relationship. The objective of this article is to empirically analyze the effect of competition on media performance in a cross-national context. We assessed media performance of media companies as the importance of journalistic goals within their stated corporate goal system. We conducted a content analysis of letters to the shareholders in annual reports of more than 50 media companies from 2000 to 2014 to operationalize journalistic goal importance. When employing a fixed effects regression analysis, as well as a fuzzy set qualitative comparative analysis, results suggest that competition has a positive effect on the importance of journalistic goals, while the existence of a strong public service media sector appears to have the effect of "crowding out" commercial media companies.}, subject = {{\"O}ffentlich-rechtlicher Rundfunk}, language = {en} } @phdthesis{AbuBakar, author = {Abu Bakar, Ilyani Akmar}, title = {Computational Analysis of Woven Fabric Composites: Single- and Multi-Objective Optimizations and Sensitivity Analysis in Meso-scale Structures}, issn = {1610-7381}, doi = {10.25643/bauhaus-universitaet.4176}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200605-41762}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {151}, abstract = {This study permits a reliability analysis to solve the mechanical behaviour issues existing in the current structural design of fabric structures. Purely predictive material models are highly desirable to facilitate an optimized design scheme and to significantly reduce time and cost at the design stage, such as experimental characterization. The present study examined the role of three major tasks; a) single-objective optimization, b) sensitivity analyses and c) multi-objective optimization on proposed weave structures for woven fabric composites. For single-objective optimization task, the first goal is to optimize the elastic properties of proposed complex weave structure under unit cells basis based on periodic boundary conditions. We predict the geometric characteristics towards skewness of woven fabric composites via Evolutionary Algorithm (EA) and a parametric study. We also demonstrate the effect of complex weave structures on the fray tendency in woven fabric composites via tightness evaluation. We utilize a procedure which does not require a numerical averaging process for evaluating the elastic properties of woven fabric composites. The fray tendency and skewness of woven fabrics depends upon the behaviour of the floats which is related to the factor of weave. Results of this study may suggest a broader view for further research into the effects of complex weave structures or may provide an alternative to the fray and skewness problems of current weave structure in woven fabric composites. A comprehensive study is developed on the complex weave structure model which adopts the dry woven fabric of the most potential pattern in singleobjective optimization incorporating the uncertainties parameters of woven fabric composites. The comprehensive study covers the regression-based and variance-based sensitivity analyses. The second task goal is to introduce the fabric uncertainties parameters and elaborate how they can be incorporated into finite element models on macroscopic material parameters such as elastic modulus and shear modulus of dry woven fabric subjected to uni-axial and biaxial deformations. Significant correlations in the study, would indicate the need for a thorough investigation of woven fabric composites under uncertainties parameters. The study describes here could serve as an alternative to identify effective material properties without prolonged time consumption and expensive experimental tests. The last part focuses on a hierarchical stochastic multi-scale optimization approach (fine-scale and coarse-scale optimizations) under geometrical uncertainties parameters for hybrid composites considering complex weave structure. The fine-scale optimization is to determine the best lamina pattern that maximizes its macroscopic elastic properties, conducted by EA under the following uncertain mesoscopic parameters: yarn spacing, yarn height, yarn width and misalignment of yarn angle. The coarse-scale optimization has been carried out to optimize the stacking sequences of symmetric hybrid laminated composite plate with uncertain mesoscopic parameters by employing the Ant Colony Algorithm (ACO). The objective functions of the coarse-scale optimization are to minimize the cost (C) and weight (W) of the hybrid laminated composite plate considering the fundamental frequency and the buckling load factor as the design constraints. Based on the uncertainty criteria of the design parameters, the appropriate variation required for the structural design standards can be evaluated using the reliability tool, and then an optimized design decision in consideration of cost can be subsequently determined.}, subject = {Verbundwerkstoff}, language = {en} } @article{FaroughiKarimimoshaverArametal., author = {Faroughi, Maryam and Karimimoshaver, Mehrdad and Aram, Farshid and Solgi, Ebrahim and Mosavi, Amir and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Computational modeling of land surface temperature using remote sensing data to investigate the spatial arrangement of buildings and energy consumption relationship}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {https://doi.org/10.1080/19942060.2019.1707711}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200110-40585}, pages = {254 -- 270}, abstract = {The effect of urban form on energy consumption has been the subject of various studies around the world. Having examined the effect of buildings on energy consumption, these studies indicate that the physical form of a city has a notable impact on the amount of energy consumed in its spaces. The present study identified the variables that affected energy consumption in residential buildings and analyzed their effects on energy consumption in four neighborhoods in Tehran: Apadana, Bimeh, Ekbatan-phase I, and Ekbatan-phase II. After extracting the variables, their effects are estimated with statistical methods, and the results are compared with the land surface temperature (LST) remote sensing data derived from Landsat 8 satellite images taken in the winter of 2019. The results showed that physical variables, such as the size of buildings, population density, vegetation cover, texture concentration, and surface color, have the greatest impacts on energy usage. For the Apadana neighborhood, the factors with the most potent effect on energy consumption were found to be the size of buildings and the population density. However, for other neighborhoods, in addition to these two factors, a third factor was also recognized to have a significant effect on energy consumption. This third factor for the Bimeh, Ekbatan-I, and Ekbatan-II neighborhoods was the type of buildings, texture concentration, and orientation of buildings, respectively.}, subject = {Fernerkung}, language = {en} } @article{JilteAhmadiKumaretal., author = {Jilte, Ravindra and Ahmadi, Mohammad Hossein and Kumar, Ravinder and Kalamkar, Vilas and Mosavi, Amir}, title = {Cooling Performance of a Novel Circulatory Flow Concentric Multi-Channel Heat Sink with Nanofluids}, series = {Nanomaterials}, volume = {2020}, journal = {Nanomaterials}, number = {Volume 10, Issue 4, 647}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/nano10040647}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200401-41241}, pages = {12}, abstract = {Heat rejection from electronic devices such as processors necessitates a high heat removal rate. The present study focuses on liquid-cooled novel heat sink geometry made from four channels (width 4 mm and depth 3.5 mm) configured in a concentric shape with alternate flow passages (slot of 3 mm gap). In this study, the cooling performance of the heat sink was tested under simulated controlled conditions.The lower bottom surface of the heat sink was heated at a constant heat flux condition based on dissipated power of 50 W and 70 W. The computations were carried out for different volume fractions of nanoparticles, namely 0.5\% to 5\%, and water as base fluid at a flow rate of 30 to 180 mL/min. The results showed a higher rate of heat rejection from the nanofluid cooled heat sink compared with water. The enhancement in performance was analyzed with the help of a temperature difference of nanofluid outlet temperature and water outlet temperature under similar operating conditions. The enhancement was ~2\% for 0.5\% volume fraction nanofluids and ~17\% for a 5\% volume fraction.}, subject = {Nanostrukturiertes Material}, language = {en} } @article{HassannatajJoloudariHassannatajJoloudariSaadatfaretal., author = {Hassannataj Joloudari, Javad and Hassannataj Joloudari, Edris and Saadatfar, Hamid and GhasemiGol, Mohammad and Razavi, Seyyed Mohammad and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Nadai, Laszlo}, title = {Coronary Artery Disease Diagnosis: Ranking the Significant Features Using a Random Trees Model}, series = {International Journal of Environmental Research and Public Health, IJERPH}, volume = {2020}, journal = {International Journal of Environmental Research and Public Health, IJERPH}, number = {Volume 17, Issue 3, 731}, publisher = {MDPI}, doi = {10.3390/ijerph17030731}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40819}, pages = {24}, abstract = {Heart disease is one of the most common diseases in middle-aged citizens. Among the vast number of heart diseases, coronary artery disease (CAD) is considered as a common cardiovascular disease with a high death rate. The most popular tool for diagnosing CAD is the use of medical imaging, e.g., angiography. However, angiography is known for being costly and also associated with a number of side effects. Hence, the purpose of this study is to increase the accuracy of coronary heart disease diagnosis through selecting significant predictive features in order of their ranking. In this study, we propose an integrated method using machine learning. The machine learning methods of random trees (RTs), decision tree of C5.0, support vector machine (SVM), and decision tree of Chi-squared automatic interaction detection (CHAID) are used in this study. The proposed method shows promising results and the study confirms that the RTs model outperforms other models.}, subject = {Maschinelles Lernen}, language = {en} } @article{AlsaadVoelker, author = {Alsaad, Hayder and V{\"o}lker, Conrad}, title = {Could the ductless personalized ventilation be an alternative to the regular ducted personalized ventilation?}, series = {Indoor Air}, volume = {2020}, journal = {Indoor Air}, publisher = {John Wiley \& Sons Ltd}, doi = {10.1111/ina.12720}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200805-42072}, pages = {13}, abstract = {This study investigates the performance of two systems: personalized ventilation (PV) and ductless personalized ventilation (DPV). Even though the literature indicates a compelling performance of PV, it is not often used in practice due to its impracticality. Therefore, the present study assesses the possibility of replacing the inflexible PV with DPV in office rooms equipped with displacement ventilation (DV) in the summer season. Numerical simulations were utilized to evaluate the inhaled concentration of pollutants when PV and DPV are used. The systems were compared in a simulated office with two occupants: a susceptible occupant and a source occupant. Three types of pollution were simulated: exhaled infectious air, dermally emitted contamination, and room contamination from a passive source. Results indicated that PV improved the inhaled air quality regardless of the location of the pollution source; a higher PV supply flow rate positively impacted the inhaled air quality. Contrarily, the performance of DPV was highly sensitive to the source location and the personalized flow rate. A higher DPV flow rate tends to decrease the inhaled air quality due to increased mixing of pollutants in the room. Moreover, both systems achieved better results when the personalized system of the source occupant was switched off.}, subject = {Str{\"o}mungsmechanik}, language = {en} } @phdthesis{Sung, author = {Sung, Younkyoung}, title = {Cultural Tourism and Social Resilience: Discourse of Historic Cities in East Germany, the Case of Gotha and Eisenach}, doi = {10.25643/bauhaus-universitaet.4092}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200212-40920}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {This thesis explores how cultural heritage plays a role in the development of urban identity by engaging both actively and passively with memory, i.e. remembering and forgetting. I argue that architectural heritage is a medium where specific cultural and social decisions form its way of presentation, and it reflects the values and interests of the period. By the process of remembering and forgetting, the meanings between inhabitant and object in urban environment are practiced, and the meanings are created. To enable the research in narrative observation, cultural tourism management is chosen as the main research object, which reflects the alteration of interaction between the architectural heritage and urban identity. Identifying the role of heritage management, the definition of social resilience and the prospects of cultural heritage as a means of social resilience are addressed. Case region of the research is East Ger- many, thereby, the study examines the distinct approaches and objectives regarding heritage management under the different political systems along the German reunification process. The framework is based on various theoretical paradigms to investigate the broad research questions: 1) What is the role of historic urban quarters in the revitalisation of East German towns? 2) How was the transition processed by cultural heritage management? 3) How did policy affect residents' lives? The case study is applied to macro level (city level: Gotha and Eisenach) and micro level study (object level: specific heritage sites), to analyse the performance of selective remembering and making tourist destination through giving significance to specific heritage. By means of site observations, archival research, qualitative inter- views, photographs, and discourse analysis on printed tourism materials, the study demonstrates that certain sites and characteristics of the city enable creating and focusing messages, which aids the social resilience. Combining theory and empirical studies this thesis attempts to widen the academic discussion regarding the practice of remembering and forgetting driven by cultural heritage. The thesis argues for cultural heritage tourism as an element of social resilience and one that embraces the historic and cultural identity of the inhabitants.}, subject = {Stadtmarketing}, language = {en} } @article{AlsaadVoelker, author = {Alsaad, Hayder and V{\"o}lker, Conrad}, title = {Der K{\"u}hlungseffekt der personalisierten L{\"u}ftung}, series = {Bauphysik}, volume = {2020}, journal = {Bauphysik}, number = {volume 42, issue 5}, publisher = {Ernst \& Sohn bei John Wiley \& Sons}, address = {Hoboken}, doi = {10.25643/bauhaus-universitaet.4272}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201020-42723}, pages = {218 -- 225}, abstract = {Personalisierte L{\"u}ftung (PL) kann die thermische Behaglichkeit sowie die Qualit{\"a}t der eingeatmeten Atemluft verbessern, in dem jedem Arbeitsplatz Frischluft separat zugef{\"u}hrt wird. In diesem Beitrag wird die Wirkung der PL auf die thermische Behaglichkeit der Nutzer unter sommerlichen Randbedingungen untersucht. Hierf{\"u}r wurden zwei Ans{\"a}tze zur Bewertung des K{\"u}hlungseffekts der PL untersucht: basierend auf (1) der {\"a}quivalenten Temperatur und (2) dem thermischen Empfinden. Grundlage der Auswertung sind in einer Klimakammer gemessene sowie numerisch simulierte Daten. Vor der Durchf{\"u}hrung der Simulationen wurde das numerische Modell zun{\"a}chst anhand der gemessenen Daten validiert. Die Ergebnisse zeigen, dass der Ansatz basierend auf dem thermischen Empfinden zur Evaluierung des K{\"u}hlungseffekts der PL sinnvoller sein kann, da bei diesem die komplexen physiologischen Faktoren besser ber{\"u}cksichtigt werden.}, subject = {L{\"u}ftung}, language = {de} } @incollection{Reinartz, author = {Reinartz, Juli}, title = {Der perfekte Bankraub}, series = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, booktitle = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, publisher = {Nocturne}, address = {B}, doi = {10.25643/bauhaus-universitaet.4259}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201009-42591}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {209 -- 231}, abstract = {Finanzielle Unabh{\"a}ngigkeit, {\"u}berleben k{\"o}nnen, Superheld*in oder Pop-Star sein, Adrenalin-Kick, lebenslange Kompliz*innenschaft und ewige romanti- sche Verbundenheit, Verschw{\"o}rung, siegreiches {\"U}ber- listen, T{\"a}uschungstechniken - die Fantasien, die sich um die Idee des Bankraubs ranken, sind so verschieden wie die Menschen, die sie haben. Ein Bank{\"u}berfall ist wahrscheinlich der Traum Vieler, angesichts der zuneh- menden Prekarisierung pers{\"o}nlicher {\"O}konomien und - gleichzeitig oder gerade deswegen - ein spektakulari- siertes, fast popkulturelles Ereignis, das in den Medien gut dokumentiert und in unz{\"a}hligen Filmen illustriert und weitergesponnen wird.}, subject = {Theater}, language = {de} } @techreport{BuschowWellbrock, author = {Buschow, Christopher and Wellbrock, Christian-Mathias}, title = {Die Innovationslandschaft des Journalismus in Deutschland}, organization = {Landesanstalt f{\"u}r Medien NRW}, doi = {10.25643/bauhaus-universitaet.4240}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200924-42407}, pages = {51}, abstract = {Das vorliegende Gutachten befasst sich mit der Innovationslandschaft des deutschen Journalismus. Innovation wird als eine essenzielle Voraussetzung verstanden, um tragf{\"a}hige L{\"o}sungsans{\"a}tze f{\"u}r die gegenw{\"a}rtigen Probleme des Journa-lismus zu entwickeln. Im Mittelpunkt des Gutachtens steht die Frage, wie Innovationspolitik im Journalismus - d. h. die Unterst{\"u}tzung von Innovation durch die {\"o}ffentliche Hand - funktionst{\"u}chtig ausgestaltet werden kann. Dabei wird dem Innovationssysteme-Ansatz gefolgt, welcher Probleme, Barrieren und Hemmnisse identifiziert, die der Innovationsf{\"a}higkeit des Journalismus in Deutschland grundlegend im Wege stehen.}, subject = {Journalismus}, language = {de} } @article{MengNomanQasemShokrietal., author = {Meng, Yinghui and Noman Qasem, Sultan and Shokri, Manouchehr and Shamshirband, Shahaboddin}, title = {Dimension Reduction of Machine Learning-Based Forecasting Models Employing Principal Component Analysis}, series = {Mathematics}, volume = {2020}, journal = {Mathematics}, number = {volume 8, issue 8, article 1233}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/math8081233}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200811-42125}, pages = {15}, abstract = {In this research, an attempt was made to reduce the dimension of wavelet-ANFIS/ANN (artificial neural network/adaptive neuro-fuzzy inference system) models toward reliable forecasts as well as to decrease computational cost. In this regard, the principal component analysis was performed on the input time series decomposed by a discrete wavelet transform to feed the ANN/ANFIS models. The models were applied for dissolved oxygen (DO) forecasting in rivers which is an important variable affecting aquatic life and water quality. The current values of DO, water surface temperature, salinity, and turbidity have been considered as the input variable to forecast DO in a three-time step further. The results of the study revealed that PCA can be employed as a powerful tool for dimension reduction of input variables and also to detect inter-correlation of input variables. Results of the PCA-wavelet-ANN models are compared with those obtained from wavelet-ANN models while the earlier one has the advantage of less computational time than the later models. Dealing with ANFIS models, PCA is more beneficial to avoid wavelet-ANFIS models creating too many rules which deteriorate the efficiency of the ANFIS models. Moreover, manipulating the wavelet-ANFIS models utilizing PCA leads to a significant decreasing in computational time. Finally, it was found that the PCA-wavelet-ANN/ANFIS models can provide reliable forecasts of dissolved oxygen as an important water quality indicator in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{CerejeirasKaehlerLegatiuketal., author = {Cerejeiras, Paula and K{\"a}hler, Uwe and Legatiuk, Anastasiia and Legatiuk, Dmitrii}, title = {Discrete Hardy Spaces for Bounded Domains in Rn}, series = {Complex Analysis and Operator Theory}, volume = {2021}, journal = {Complex Analysis and Operator Theory}, number = {Volume 15, article 4}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s11785-020-01047-6}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44746}, pages = {1 -- 32}, abstract = {Discrete function theory in higher-dimensional setting has been in active development since many years. However, available results focus on studying discrete setting for such canonical domains as half-space, while the case of bounded domains generally remained unconsidered. Therefore, this paper presents the extension of the higher-dimensional function theory to the case of arbitrary bounded domains in Rn. On this way, discrete Stokes' formula, discrete Borel-Pompeiu formula, as well as discrete Hardy spaces for general bounded domains are constructed. Finally, several discrete Hilbert problems are considered.}, subject = {Dirac-Operator}, language = {en} } @article{HarirchianLahmerRasulzade, author = {Harirchian, Ehsan and Lahmer, Tom and Rasulzade, Shahla}, title = {Earthquake Hazard Safety Assessment of Existing Buildings Using Optimized Multi-Layer Perceptron Neural Network}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {Volume 13, Issue 8, 2060}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13082060}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200504-41575}, pages = {16}, abstract = {The latest earthquakes have proven that several existing buildings, particularly in developing countries, are not secured from damages of earthquake. A variety of statistical and machine-learning approaches have been proposed to identify vulnerable buildings for the prioritization of retrofitting. The present work aims to investigate earthquake susceptibility through the combination of six building performance variables that can be used to obtain an optimal prediction of the damage state of reinforced concrete buildings using artificial neural network (ANN). In this regard, a multi-layer perceptron network is trained and optimized using a database of 484 damaged buildings from the D{\"u}zce earthquake in Turkey. The results demonstrate the feasibility and effectiveness of the selected ANN approach to classify concrete structural damage that can be used as a preliminary assessment technique to identify vulnerable buildings in disaster risk-management programs.}, subject = {Erdbeben}, language = {en} } @article{HarirchianLahmerBuddhirajuetal., author = {Harirchian, Ehsan and Lahmer, Tom and Buddhiraju, Sreekanth and Mohammad, Kifaytullah and Mosavi, Amir}, title = {Earthquake Safety Assessment of Buildings through Rapid Visual Screening}, series = {Buildings}, volume = {2020}, journal = {Buildings}, number = {Volume 10, Issue 3}, publisher = {MDPI}, doi = {10.3390/buildings10030051}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200331-41153}, pages = {15}, abstract = {Earthquake is among the most devastating natural disasters causing severe economical, environmental, and social destruction. Earthquake safety assessment and building hazard monitoring can highly contribute to urban sustainability through identification and insight into optimum materials and structures. While the vulnerability of structures mainly depends on the structural resistance, the safety assessment of buildings can be highly challenging. In this paper, we consider the Rapid Visual Screening (RVS) method, which is a qualitative procedure for estimating structural scores for buildings suitable for medium- to high-seismic cases. This paper presents an overview of the common RVS methods, i.e., FEMA P-154, IITK-GGSDMA, and EMPI. To examine the accuracy and validation, a practical comparison is performed between their assessment and observed damage of reinforced concrete buildings from a street survey in the Bing{\"o}l region, Turkey, after the 1 May 2003 earthquake. The results demonstrate that the application of RVS methods for preliminary damage estimation is a vital tool. Furthermore, the comparative analysis showed that FEMA P-154 creates an assessment that overestimates damage states and is not economically viable, while EMPI and IITK-GGSDMA provide more accurate and practical estimation, respectively.}, subject = {Maschinelles Lernen}, language = {en} } @article{TutalPartschefeldSchneideretal., author = {Tutal, Adrian and Partschefeld, Stephan and Schneider, Jens and Osburg, Andrea}, title = {Effects of Bio-Based Plasticizers, Made From Starch, on the Properties of Fresh and Hardened Metakaolin-Geopolymer Mortar: Basic Investigations}, series = {Clays and Clay Minerals}, volume = {2020}, journal = {Clays and Clay Minerals}, number = {volume 68, No. 5}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s42860-020-00084-8}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210804-44737}, pages = {413 -- 427}, abstract = {Conventional superplasticizers based on polycarboxylate ether (PCE) show an intolerance to clay minerals due to intercalation of their polyethylene glycol (PEG) side chains into the interlayers of the clay mineral. An intolerance to very basic media is also known. This makes PCE an unsuitable choice as a superplasticizer for geopolymers. Bio-based superplasticizers derived from starch showed comparable effects to PCE in a cementitious system. The aim of the present study was to determine if starch superplasticizers (SSPs) could be a suitable additive for geopolymers by carrying out basic investigations with respect to slump, hardening, compressive and flexural strength, shrinkage, and porosity. Four SSPs were synthesized, differing in charge polarity and specific charge density. Two conventional PCE superplasticizers, differing in terms of molecular structure, were also included in this study. The results revealed that SSPs improved the slump of a metakaolin-based geopolymer (MK-geopolymer) mortar while the PCE investigated showed no improvement. The impact of superplasticizers on early hardening (up to 72 h) was negligible. Less linear shrinkage over the course of 56 days was seen for all samples in comparison with the reference. Compressive strengths of SSP specimens tested after 7 and 28 days of curing were comparable to the reference, while PCE led to a decline. The SSPs had a small impact on porosity with a shift to the formation of more gel pores while PCE caused an increase in porosity. Throughout this research, SSPs were identified as promising superplasticizers for MK-geopolymer mortar and concrete.}, subject = {Geopolymere}, language = {en} } @article{BrokowLogaNessler, author = {Brokow-Loga, Anton and Neßler, Miriam}, title = {Eine Frage der Fl{\"a}chengerechtigkeit! Kommentar zu Lisa Vollmer und Boris Michel „Wohnen in der Klimakrise. Die Wohnungsfrage als {\"o}kologische Frage"}, series = {s u b \ u r b a n. zeitschrift f{\"u}r kritische stadtforschung}, volume = {2020}, journal = {s u b \ u r b a n. zeitschrift f{\"u}r kritische stadtforschung}, number = {Band 8, Heft 1/2}, publisher = {Sub\urban e.V.}, address = {Leipzig}, doi = {10.36900/suburban.v8i1/2.572}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43333}, pages = {183 -- 192}, abstract = {Die derzeitige Wohnungskrise hat eine sozial-{\"o}kologische Kernproblematik. Dabei ist die sozial ungerechte und {\"o}kologisch problematische Verteilung von Wohnfl{\"a}che meist unsichtbar und wird weder in wissenschaftlichen noch in aktivistischen Kontexten ausreichend als Frage der Fl{\"a}chengerechtigkeit problematisiert. Denn Wohnraum und Fl{\"a}che in einer Stadt sind keine endlos verf{\"u}gbaren G{\"u}ter: Wenn einige Menschen auf viel Raum leben, bleibt f{\"u}r andere Menschen weniger Fl{\"a}che {\"u}brig. Und die Menschen, die am wenigstens f{\"u}r eine Verknappung von Wohnraum verantwortlich sind, leiden am meisten darunter. Dieser Artikel arbeitet zun{\"a}chst den Begriff der Wohnfl{\"a}chengerechtigkeit heraus, wobei auf die Ungleichverteilung von Wohnfl{\"a}che und deren gesellschaftliche Implikationen unter derzeitigen Wohnungsverteilungsmechanismen Bezug genommen wird. Anschließend wird der Verbrauch von (Wohn-)Fl{\"a}che aus {\"o}kologischer Perspektive problematisiert. Der Artikel diskutiert scheinbare und transformationsorientierte L{\"o}sungs- und Handlungsans{\"a}tze. Abschließend fordert er in der kritischen Stadtforschung und in aktivistischen Kontexten eine st{\"a}rkere Debatte um eine Wohnfl{\"a}chengerechtigkeit, deren Verwirklichung gleichermaßen eine soziale wie {\"o}kologische Dimension hat.}, subject = {Wohnen}, language = {de} } @article{AmirinasabShamshirbandChronopoulosetal., author = {Amirinasab, Mehdi and Shamshirband, Shahaboddin and Chronopoulos, Anthony Theodore and Mosavi, Amir and Nabipour, Narjes}, title = {Energy-Efficient Method for Wireless Sensor Networks Low-Power Radio Operation in Internet of Things}, series = {electronics}, volume = {2020}, journal = {electronics}, number = {volume 9, issue 2, 320}, publisher = {MDPI}, doi = {10.3390/electronics9020320}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40954}, pages = {20}, abstract = {The radio operation in wireless sensor networks (WSN) in Internet of Things (IoT)applications is the most common source for power consumption. Consequently, recognizing and controlling the factors affecting radio operation can be valuable for managing the node power consumption. Among essential factors affecting radio operation, the time spent for checking the radio is of utmost importance for monitoring power consumption. It can lead to false WakeUp or idle listening in radio duty cycles and ContikiMAC. ContikiMAC is a low-power radio duty-cycle protocol in Contiki OS used in WakeUp mode, as a clear channel assessment (CCA) for checking radio status periodically. This paper presents a detailed analysis of radio WakeUp time factors of ContikiMAC. Furthermore, we propose a lightweight CCA (LW-CCA) as an extension to ContikiMAC to reduce the Radio Duty-Cycles in false WakeUps and idle listening though using dynamic received signal strength indicator (RSSI) status check time. The simulation results in the Cooja simulator show that LW-CCA reduces about 8\% energy consumption in nodes while maintaining up to 99\% of the packet delivery rate (PDR).}, subject = {Internet der Dinge}, language = {en} } @phdthesis{Bunte, author = {Bunte, Andreas}, title = {Entwicklung einer ontologiebasierten Beschreibung zur Erh{\"o}hung des Automatisierungsgrades in der Produktion}, doi = {10.25643/bauhaus-universitaet.4315}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201215-43156}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {189}, abstract = {Die zu beobachtenden k{\"u}rzeren Produktlebenszyklen und eine schnellere Marktdurchdringung von Produkttechnologien erfordern adaptive und leistungsf{\"a}hige Produktionsanlagen. Die Adaptivit{\"a}t erm{\"o}glicht eine Anpassung der Produktionsanlage an neue Produkte, und die Leistungsf{\"a}higkeit der Anlage stellt sicher, dass ausreichend Produkte in kurzer Zeit und zu geringen Kosten hergestellt werden k{\"o}nnen. Durch eine Modularisierung der Produktionsanlage kann die Adaptivit{\"a}t erreicht werden. Jedoch erfordert heutzutage jede Adaption manuellen Aufwand, z.B. zur Anpassung von propriet{\"a}ren Signalen oder zur Anpassung {\"u}bergeordneter Funktionen. Dadurch sinkt die Leistungsf{\"a}higkeit der Anlage. Das Ziel dieser Arbeit ist es, die Interoperabilit{\"a}t in Bezug auf die Informationsverwendung in modularen Produktionsanlagen zu gew{\"a}hrleisten. Dazu werden Informationen durch semantische Modelle beschrieben. Damit wird ein einheitlicher Informationszugriff erm{\"o}glicht, und {\"u}bergeordnete Funktionen erhalten Zugriff auf alle Informationen der Produktionsmodule, unabh{\"a}ngig von dem Typ, dem Hersteller und dem Alter des Moduls. Dadurch entf{\"a}llt der manuelle Aufwand bei Anpassungen des modularen Produktionssystems, wodurch die Leistungsf{\"a}higkeit der Anlage gesteigert und Stillstandszeiten reduziert werden. Nach dem Ermitteln der Anforderungen an einen Modellierungsformalismus wurden potentielle Formalismen mit den Anforderungen abgeglichen. OWL DL stellte sich als geeigneter Formalismus heraus und wurde f{\"u}r die Erstellung des semantischen Modells in dieser Arbeit verwendet. Es wurde exemplarisch ein semantisches Modell f{\"u}r die drei Anwendungsf{\"a}lle Interaktion, Orchestrierung und Diagnose erstellt. Durch einen Vergleich der Modellierungselemente von unterschiedlichen Anwendungsf{\"a}llen wurde die Allgemeing{\"u}ltigkeit des Modells bewertet. Dabei wurde gezeigt, dass die Erreichung eines allgemeinen Modells f{\"u}r technische Anwendungsf{\"a}lle m{\"o}glich ist und lediglich einige Hundert Begriffe ben{\"o}tigt. Zur Evaluierung der erstellten Modelle wurde ein wandlungsf{\"a}higes Produktionssystem der SmartFactoryOWL verwendet, an dem die Anwendungsf{\"a}lle umgesetzt wurden. Dazu wurde eine Laufzeitumgebung erstellt, die die semantischen Modelle der einzelnen Module zu einem Gesamtmodell vereint, Daten aus der Anlage in das Modell {\"u}bertr{\"a}gt und eine Schnittstelle f{\"u}r die Services bereitstellt. Die Services realisieren {\"u}bergeordnete Funktionen und verwenden die Informationen des semantischen Modells. In allen drei Anwendungsf{\"a}llen wurden die semantischen Modelle korrekt zusammengef{\"u}gt und mit den darin enthaltenen Informationen konnte die Aufgabe des jeweiligen Anwendungsfalles ohne zus{\"a}tzlichen manuellen Aufwand gel{\"o}st werden.}, subject = {Ontologie}, language = {de} } @phdthesis{Rost, author = {Rost, Grit}, title = {Entwicklung eines Toolboxmodells als Planungswerkzeug f{\"u}r ein transdisziplin{\"a}res Wasserressourcenmanagement am Beispiel der Stadt Darkhan, Mongolei}, publisher = {Rhombus}, address = {Berlin}, isbn = {978-3-941216-94-5}, doi = {10.25643/bauhaus-universitaet.4287}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201113-42874}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {172}, abstract = {Im Rahmen der Dissertation wurde ein Toolboxmodell f{\"u}r transdisziplin{\"a}res Wasserressourcenmanagement entwickelt. Das Modell liefert den methodischen Rahmen Wasserressourcen nachhaltig und transdisziplin{\"a}r zu bewirtschaften. Der Begriff der Nachhaltigkeit und eine Konkretisierung der nachhaltigen Bewirtschaftung globaler Wasserressourcen scheinen un{\"u}berschaubar und suggerieren die Forderung nach einer neuen Weltformel. Die globale Bedeutung der Wasserressourcen, die f{\"u}r Regionen spezifischen Besonderheiten des nat{\"u}rlichen Wasserhaushalts und der anthropogenen Nutzung, die Zeitskala und die Kontextualisierung in alle betroffenen und benachbarten Disziplinen deuten auf die Komplexit{\"a}t der Thematik hin. Es wird eine Systematisierung des Planungsprozesses von Wasserressourcen notwendig, anhand derer eine holistische Herangehensweise mit einer Strategieentwicklung f{\"u}r Regionen spezifischer Schwerpunktprobleme erfolgt. Ziel der Arbeit ist die Erarbeitung einer Strategie zur Systematisierung nach diesen Forderungen und die Bereitstellung eines Toolboxmodelles als Planungswerkzeug f{\"u}r das transdisziplin{\"a}re Wasserressourcenmanagement. Das Toolboxmodell stellt den konzeptionellen Rahmen f{\"u}r die Bewirtschaftung von Wasserressourcen mit der Anwendung transdisziplin{\"a}rer Forschungsmethoden bereit. Wesentliche Herausforderung bei der Anwendung der transdisziplin{\"a}ren Methode sind die Implementierung verschiedener Skalenbereiche, der Umgang mit der Komplexit{\"a}t von Daten, das Bewahren von Transparenz und Objektivit{\"a}t sowie die Erm{\"o}glichung eines auf andere Regionen {\"u}bertragbaren Planungsprozesses. Die theoretischen Grundlagen naturwissenschaftlicher Forschung zur Nachhaltigkeit haben ihren Ursprung in den biologischen und geographischen Disziplinen. Das Ineinandergreifen naturr{\"a}umlicher Zusammenh{\"a}nge und der Einfluss anthropogener Nutzung und technischer Innovationen auf den Naturhaushalt sind Kern der Kausalit{\"a}t {\"u}bergreifenden Denkens und Verstehens. Mit dem Ansatz des integrierten Wasserressourcenmanagements (IWRM) erfolgt die Ber{\"u}cksichtigung wirtschaftlicher und sozio{\"o}konomischer Ziele in den Planungsprozess f{\"u}r {\"o}kologisch nachhaltige Wasserwirtschaft. Das Instrument der Wasserrahmenrichtlinie (EU-WRRL) ist auf eine Gew{\"a}sser{\"o}kologie ausgerichtete Richtlinie, welche die Integration verschiedener Interessenvertreter in den Planungsprozess vorsieht. Das Konzept der neuartigen Sanit{\"a}rsysteme basiert auf Stofffl{\"u}ssen zwischen konkurrierenden Handlungsbereichen, wie Abfall-, Ressourcen- und Landwirtschaft. Den integrierten Ans{\"a}tzen fehlt eine {\"u}bergeordnete gemeinsame Zielstrategie - eine sogenannte Phase Null. Diese Phase Null - das Lernen aller 7 Zusammenfassung 157 relevanten, konkurrierenden und harmonisierenden Handlungsfelder eines Planungshorizontes wird durch eine transdisziplin{\"a}re Perspektive erm{\"o}glicht. W{\"a}hrend bei der integralen Perspektive eine disziplinorientierte Kooperation im Vordergrund steht, verlangt die transdisziplin{\"a}re Perspektive nach einer problemorientierten Kooperation zwischen den Interessenvertretern (Werlen 2015). Die bestehenden Konzepte und Richtlinien f{\"u}r das nachhaltige Management von Wasserressourcen sind etabliert und evaluiert. Der Literatur zur Folge ist eine Weiterentwicklung nach der Perspektive der Transdisziplinarit{\"a}t erforderlich. Das Toolboxmodell f{\"u}r integrales Wasserressourcenmanagement entspricht einem Planungstool bestehend aus Werkzeugen f{\"u}r die Anwendung wissenschaftlicher Methoden. Die Zusammenstellung der Methoden/Werkzeuge erf{\"u}llt im Rahmen die Methode transdisziplin{\"a}rer Forschung. Das Werkzeug zum Aufstellen der relevanten Handlungsfelder umfasst die Charakterisierung eines Untersuchungsgebietes und Planungsrahmens, die kausale Verkn{\"u}pfung des Bewirtschaftungskonzeptes und konkurrierender sowie sich unterst{\"u}tzender Stakeholder. Mit dem Werkzeug der Kontextualisierung und Indikatorenaufstellung wird eine Methode der stufenweisen und von einer Skala unabh{\"a}ngigen Bewertung des Umweltzustandes f{\"u}r die Zielpriorisierung vorgenommen. Damit wird das Toolboxmodell dem Problem der Komplexit{\"a}t und Datenverf{\"u}gbarkeit gerecht. Anhand der eingesetzten ABC Methode, werden die Bewertungsgr{\"o}ßen differenziert strukturiert auf verschiedene Skalen und Datenressourcen (A=Ersterkennung,B=Zeigerwerte, C=Modell/Index). Die ABC-Methode erm{\"o}glicht die Planung bereits mit unsicherer und l{\"u}ckenhafter Datengrundlage, ist jederzeit erweiterbar und bietet somit eine operative Wissensgenerierung w{\"a}hrend des Gestaltungsprozesses. F{\"u}r das Werkzeug zur Bewertung und Priorisierung wird der Algorithmus der Composite Programmierung angewandt. Diese Methode der Mehrfachzielplanung erf{\"u}llt den Anspruch der permanenten Erweiterbarkeit und der transparenten und objektiven Entscheidungsfindung. Die Komplexit{\"a}t des transdisziplin{\"a}ren Wasserressourcenmanagements kann durch die Methode der Composite Programmierung systematisiert werden. Das wesentliche Ergebnis der Arbeit stellt die erfolgreiche Erarbeitung und Anwendung des Tool-boxmodells f{\"u}r das transdisziplin{\"a}re Wasserressourcenmanagement im Untersuchungsgebiet Stadt Darkhan in der Mongolei dar. Auf Grund seiner besonderen hydrologischen und strukturellen Situa-tion wird die Relevanz eines nachhaltigen Bewirtschaftungskonzeptes deutlich. Im Rahmen des Querschnittsmoduls des MoMo-Projektes wurde eine f{\"u}r das Toolboxmodell geeignete Datengrundlage erarbeitet. Planungsrelevante Handlungsfelder wurden im Rahmen eines Workshops mit verschiedenen Interessenvertretern erarbeitet. Im Ergebnis dessen wurde die Systematik eines Zielbaumes mit Hauptzielen und untergeordneten Teilzielen als Grundlage der Priorisierung nach den holistischen Anspruch der transdisziplin{\"a}ren Forschung aufgestellt. F{\"u}r die Messbarkeit, in-wieweit Teilziele erreicht sind oder Handlungsbedarf besteht, wurden Indikatoren erarbeitet. Die Indikatoren-Aufstellung erfolgte exemplarisch f{\"u}r das Handlungsfeld Siedlungswasserwirtschaft in allen Skalen des ABC-Systems. Die im BMBF-MoMo Projekt generierte umfassende Datengrundlage erm{\"o}glichte die Anwendung und Evaluierung des Toolboxmodells mit unterschiedlichem quantitativem und qualitativem Dateninput. Verschiedene Kombination von A (Ersterkennung), B (Zeigerwerte) und C (Modell/Index) als Grundlage der Priorisierung mit der Compostite Programmierung erm{\"o}glichten die Durchf{\"u}hrung und Bewertung des transdisziplin{\"a}ren Planungstools. Die er-mittelten Rangfolgen von Teilzielen mit unterschiedlichen Bewertungsvarianten ergaben {\"a}hnliche Tendenzen. Das ist ein Hinweis daf{\"u}r, dass f{\"u}r die zuk{\"u}nftige Anwendung des Toolboxmodells die operative Wissensgenerierung, d.h. das schrittweise Hinzuf{\"u}gen neu ermittelter, gesicherterer Daten, funktioniert. Eine schwierige Datenverf{\"u}gbarkeit oder eine noch im Prozess befindliche wissenschaftliche Analyse sollen keine Hindernisse f{\"u}r eine schrittweise und erweiterbare Zielpriorisierung und Maßnahmenplanung sein. Trotz der Komplexit{\"a}t des transdisziplin{\"a}ren Ansatzes wird durch die Anwendung des Toolboxmodells eine effiziente und zielorientierte Handlungspriorisierung erm{\"o}glicht. Die Effizienz wird erreicht durch ressourcenschonende und flexible, Ziel fokussierte Datenermittlung. Zeit und Kosten im Planungsprozess k{\"o}nnen eingespart werden. Die erzielte Priorisierung von letztlich Handlungsempfehlungen erfolgt individuell auf die Eigenart des Untersuchungsgebietes angepasst, was hinsichtlich seiner Wirkung als erfolgsversprechend gilt.}, subject = {Wasserreserve}, language = {de} } @phdthesis{Schmitz, author = {Schmitz, Tonia Annick}, title = {Entwicklung und Bilanzierung eines Photobioreaktorsystems zur Makroalgenkultivierung am Standort einer landwirtschaftlichen Biogasanlage}, doi = {10.25643/bauhaus-universitaet.4177}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200604-41774}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {220}, abstract = {Marine Makroalgen besitzen vielversprechende Eigenschaften und Inhaltsstoffe f{\"u}r die Verwendung als Energietr{\"a}ger, Nahrungsmittel oder als Ausgangsstoff f{\"u}r Pharmazeutika. Dass die Quantit{\"a}t und Qualit{\"a}t der in nat{\"u}rlicher Umgebung wachsenden Makroalgen schwankt, reduziert jedoch deren Verwertbarkeit und erschwert die Erschließung hochpreisiger Marktsegmente. Zudem ist eine Ausweitung der Zucht in marinen und k{\"u}stennahen Aquakulturen in Europa gegenw{\"a}rtig wenig aussichtsreich, da vielversprechende Areale bereits zum Fischfang oder als Erholungs- bzw. Naturschutzgebiete ausgewiesen sind. Im Rahmen dieser Arbeit wird demzufolge ein geschlossenes Photobioreaktorsystem zur Makroalgenkultivierung entwickelt, welches eine umfassende Kontrolle der abiotischen Kultivierungsparameter und eine effektive Aufbereitung des Kulturmediums vorsieht, um eine standortunabh{\"a}ngige Algenproduktion zu erm{\"o}glichen. Zur Bilanzierung des Gesamtkonzeptes einer Kultivierung und Verwertung (stofflich oder energetisch) werden die spezifischen Wachstumsraten und Methanbildungspotentiale der Algenarten Ulva intestinalis, Fucus vesiculosus und Palmaria palmata in praktischen Versuchen ermittelt. Im Ergebnis wird f{\"u}r den gegenw{\"a}rtigen Entwicklungsstand der Kultivierungsanlage eine positive Bilanz f{\"u}r die stoffliche Verwertung der Algenart Ulva intestinalis und eine negative Bilanz f{\"u}r die energetische Verwertung aller untersuchten Algenarten erzielt. Wird ein Optimalszenario betrachtet, indem die Besatzdichten und Wachstumsraten der Algen in der Zucht erh{\"o}ht werden, bleibt die Energiebilanz negativ. Allerdings summieren sich die finanzielle Einnahmen durch einen Verkauf der Algen als Produkt auf j{\"a}hrlich 460.869€ f{\"u}r Ulva intestinalis, 4.010€ f{\"u}r Fucus vesiculosus und 16.913€ f{\"u}r Palmaria palmata. Im Ergebnis ist insbesondere eine stoffliche Verwertung der gez{\"u}chteten Gr{\"u}nalge Ulva intestinalis anzustreben und die Produktivit{\"a}t der Zuchtanlage im Sinne des Optimalszenarios zu steigern.}, subject = {Makroalgen}, language = {de} } @article{Schoenig, author = {Sch{\"o}nig, Barbara}, title = {Ererbte Transformation. Kommentar zu Matthias Bernt und Andrej Holm „Die Ostdeutschlandforschung muss das Wohnen in den Blick nehmen"}, series = {s u b \ u r b a n. zeitschrift f{\"u}r kritische stadtforschung}, volume = {2020}, journal = {s u b \ u r b a n. zeitschrift f{\"u}r kritische stadtforschung}, number = {Band 8, Heft 3}, publisher = {Sub\urban e.V.}, address = {Leipzig}, doi = {10.36900/suburban.v8i3.620}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43296}, pages = {115 -- 122}, abstract = {Matthias Bernt und Andrej Holm weisen zu Recht darauf hin, dass es einer Forschung zu ostdeutschen St{\"a}dten als konzeptionell eigenst{\"a}ndigem Feld bedarf, die die spezifische Verr{\"a}umlichung des tiefgreifenden gesellschaftlichen Transformationsprozesses nach 1990 ins Zentrum stellt. Dabei betrachten sie insbesondere das Feld des Wohnens als produktiv, um Kenntnis {\"u}ber die Struktur und Wirkung dieses Prozesses zu erlangen. Allerdings bleiben sie vage dabei, wie eine solche spezifisch auf Ostdeutschland gerichtete Wohnungsforschung zu konzipieren w{\"a}re und in welcher Weise die Besonderheiten und Parallelit{\"a}ten ostdeutscher Entwicklungen zu den Transformationen von Wohnungs- und Stadtentwicklungspolitik in Westdeutschland, aber auch international, in Bezug zu setzen w{\"a}ren.}, subject = {Deutschland <{\"O}stliche L{\"a}nder>}, language = {de} } @article{KargarSamadianfardParsaetal., author = {Kargar, Katayoun and Samadianfard, Saeed and Parsa, Javad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir and Chau, Kwok-Wing}, title = {Estimating longitudinal dispersion coefficient in natural streams using empirical models and machine learning algorithms}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1712260}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40775}, pages = {311 -- 322}, abstract = {The longitudinal dispersion coefficient (LDC) plays an important role in modeling the transport of pollutants and sediment in natural rivers. As a result of transportation processes, the concentration of pollutants changes along the river. Various studies have been conducted to provide simple equations for estimating LDC. In this study, machine learning methods, namely support vector regression, Gaussian process regression, M5 model tree (M5P) and random forest, and multiple linear regression were examined in predicting the LDC in natural streams. Data sets from 60 rivers around the world with different hydraulic and geometric features were gathered to develop models for LDC estimation. Statistical criteria, including correlation coefficient (CC), root mean squared error (RMSE) and mean absolute error (MAE), were used to scrutinize the models. The LDC values estimated by these models were compared with the corresponding results of common empirical models. The Taylor chart was used to evaluate the models and the results showed that among the machine learning models, M5P had superior performance, with CC of 0.823, RMSE of 454.9 and MAE of 380.9. The model of Sahay and Dutta, with CC of 0.795, RMSE of 460.7 and MAE of 306.1, gave more precise results than the other empirical models. The main advantage of M5P models is their ability to provide practical formulae. In conclusion, the results proved that the developed M5P model with simple formulations was superior to other machine learning models and empirical models; therefore, it can be used as a proper tool for estimating the LDC in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{BandJanizadehSahaetal., author = {Band, Shahab S. and Janizadeh, Saeid and Saha, Sunil and Mukherjee, Kaustuv and Khosrobeigi Bozchaloei, Saeid and Cerd{\`a}, Artemi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Evaluating the Efficiency of Different Regression, Decision Tree, and Bayesian Machine Learning Algorithms in Spatial Piping Erosion Susceptibility Using ALOS/PALSAR Data}, series = {Land}, volume = {2020}, journal = {Land}, number = {volume 9, issue 10, article 346}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/land9100346}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210122-43424}, pages = {1 -- 22}, abstract = {Piping erosion is one form of water erosion that leads to significant changes in the landscape and environmental degradation. In the present study, we evaluated piping erosion modeling in the Zarandieh watershed of Markazi province in Iran based on random forest (RF), support vector machine (SVM), and Bayesian generalized linear models (Bayesian GLM) machine learning algorithms. For this goal, due to the importance of various geo-environmental and soil properties in the evolution and creation of piping erosion, 18 variables were considered for modeling the piping erosion susceptibility in the Zarandieh watershed. A total of 152 points of piping erosion were recognized in the study area that were divided into training (70\%) and validation (30\%) for modeling. The area under curve (AUC) was used to assess the effeciency of the RF, SVM, and Bayesian GLM. Piping erosion susceptibility results indicated that all three RF, SVM, and Bayesian GLM models had high efficiency in the testing step, such as the AUC shown with values of 0.9 for RF, 0.88 for SVM, and 0.87 for Bayesian GLM. Altitude, pH, and bulk density were the variables that had the greatest influence on the piping erosion susceptibility in the Zarandieh watershed. This result indicates that geo-environmental and soil chemical variables are accountable for the expansion of piping erosion in the Zarandieh watershed.}, subject = {Maschinelles Lernen}, language = {en} } @article{AhmadiBaghbanSadeghzadehetal., author = {Ahmadi, Mohammad Hossein and Baghban, Alireza and Sadeghzadeh, Milad and Zamen, Mohammad and Mosavi, Amir and Shamshirband, Shahaboddin and Kumar, Ravinder and Mohammadi-Khanaposhtani, Mohammad}, title = {Evaluation of electrical efficiency of photovoltaic thermal solar collector}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {volume 14, issue 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1734094}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200304-41049}, pages = {545 -- 565}, abstract = {In this study, machine learning methods of artificial neural networks (ANNs), least squares support vector machines (LSSVM), and neuro-fuzzy are used for advancing prediction models for thermal performance of a photovoltaic-thermal solar collector (PV/T). In the proposed models, the inlet temperature, flow rate, heat, solar radiation, and the sun heat have been considered as the input variables. Data set has been extracted through experimental measurements from a novel solar collector system. Different analyses are performed to examine the credibility of the introduced models and evaluate their performances. The proposed LSSVM model outperformed the ANFIS and ANNs models. LSSVM model is reported suitable when the laboratory measurements are costly and time-consuming, or achieving such values requires sophisticated interpretations.}, subject = {Fotovoltaik}, language = {en} } @incollection{BeeEgert, author = {Bee, Julia and Egert, Gerko}, title = {Experimente lernen, techniken tauschen, zur Einleitung}, series = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, booktitle = {Experimente lernen, Techniken tauschen. Ein spekulatives Handbuch}, publisher = {Nocturne}, address = {Berlin/Weimar}, doi = {10.25643/bauhaus-universitaet.4255}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20201008-42553}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {7 -- 26}, abstract = {Experimente lernen, Techniken tauschen Ein spekulatives Handbuch Das spekulative Handbuch bietet vielf{\"a}ltige Techniken f{\"u}r ein radikales Lernen und Vermitteln. Es umfasst konkrete Anleitungen, Erfahrungen und theoretische {\"U}berlegungen. Die Texte beteiligen sich an der Konzeption einer Vermittlung, die das gemeinsame Experimentieren (wieder) einf{\"u}hrt. Im Seminarraum, in Workshops, auf Festivals, in Fluren, Parks und der Stadt finden Lernen und Verlernen statt. Texte und Anleitungen u. a. zu: Filmessays, Collagen, Bank{\"u}berf{\"a}llen, der Universit{\"a}t der Toten, wildem Schreiben, konzeptuellem speed Dating, neurodiversem Lernen, Format-Denken, dem Theater der Sorge, dem Schreiblabor, dem K{\"o}rperstreik.}, subject = {K{\"u}nstlerische Forschung}, language = {de} }