@phdthesis{Mthunzi, author = {Mthunzi, Everett}, title = {Interactive Surface Environments: Design and Implementation}, doi = {10.25643/bauhaus-universitaet.6406}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230704-64065}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {128}, abstract = {This dissertation presents three studies on the design and implementation of interactive surface environments. It puts forward approaches to engineering interactive surface prototypes using prevailing methodologies and technologies. The scholarly findings from each study have been condensed into academic manuscripts, which are conferred herewith. The first study identifies a communication gap between engineers of interactive surface systems (i.e., originators of concepts) and future developers. To bridge the gap, it explores a UML-based framework to establish a formal syntax for modeling hardware, middleware, and software of interactive surface prototypes. The proposed framework targets models-as-end-products, towards enabling a shared view of research prototypes thereby facilitating dialogue between concept originators and future developers. The second study positions itself to support developers with an open-source solution for exploiting 3D point clouds for interactive tabletop applications using CPU architectures. Given dense 3D point-cloud representations of tabletop environments, the study aims toward mitigating high computational effort by segmenting candidate interaction regions as a preprocessing step. The study contributes a robust open-source solution for reducing computational costs when leveraging 3D point clouds for interactive tabletop applications. The solution itself is flexible and adaptable to variable interactive surface applications. The third study contributes an archetypal concept for integrating mobile devices as active components in augmented tabletop surfaces. With emphasis on transparent development trails, the study demonstrates the utility of the open-source tool developed in the second study. In addition to leveraging 3D point clouds for real-time interaction, the research considers recent advances in computer vision and wireless communication to realize a modern, interactive tabletop application. A robust strategy that combines spatial augmented reality, point-cloud-based depth perception, CNN-based object detection, and Bluetooth communication is put forward. In addition to seamless communication between adhoc mobile devices and interactive tabletop systems, the archetypal concept demonstrates the benefits of preprocessing point clouds by segmenting candidate interaction regions, as suggested in the second study. Collectively, the studies presented in this dissertation contribute; 1—bridging the gap between originators of interactive surface concepts and future developers, 2— promoting the exploration of 3D point clouds for interactive surface applications using CPU-based architectures, and 3—leveraging 3D point clouds together with emerging CNN-based object detection, and Bluetooth communication technologies to advance existing surface interaction concepts.}, subject = {Mensch-Maschiene-Kommunikation}, language = {en} } @article{SchwenkeSoebkeKraft, author = {Schwenke, Nicolas and S{\"o}bke, Heinrich and Kraft, Eckhard}, title = {Potentials and Challenges of Chatbot-Supported Thesis Writing: An Autoethnography}, series = {Trends in Higher Education}, volume = {2023}, journal = {Trends in Higher Education}, number = {Volume 2, issue 4}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/higheredu2040037}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20231207-65016}, pages = {611 -- 635}, abstract = {The release of the large language model-based chatbot ChatGPT 3.5 in November 2022 has brought considerable attention to the subject of artificial intelligence, not only to the public. From the perspective of higher education, ChatGPT challenges various learning and assessment formats as it significantly reduces the effectiveness of their learning and assessment functionalities. In particular, ChatGPT might be applied to formats that require learners to generate text, such as bachelor theses or student research papers. Accordingly, the research question arises to what extent writing of bachelor theses is still a valid learning and assessment format. Correspondingly, in this exploratory study, the first author was asked to write his bachelor's thesis exploiting ChatGPT. For tracing the impact of ChatGPT methodically, an autoethnographic approach was used. First, all considerations on the potential use of ChatGPT were documented in logs, and second, all ChatGPT chats were logged. Both logs and chat histories were analyzed and are presented along with the recommendations for students regarding the use of ChatGPT suggested by a common framework. In conclusion, ChatGPT is beneficial for thesis writing during various activities, such as brainstorming, structuring, and text revision. However, there are limitations that arise, e.g., in referencing. Thus, ChatGPT requires continuous validation of the outcomes generated and thus fosters learning. Currently, ChatGPT is valued as a beneficial tool in thesis writing. However, writing a conclusive thesis still requires the learner's meaningful engagement. Accordingly, writing a thesis is still a valid learning and assessment format. With further releases of ChatGPT, an increase in capabilities is to be expected, and the research question needs to be reevaluated from time to time.}, subject = {Chatbot}, language = {en} } @article{HahlbrockBraunHeideletal., author = {Hahlbrock, David and Braun, Michael and Heidel, Robin and Lemmen, Patrik and Boumann, Roland and Bruckmann, Tobias and Schramm, Dieter and Helm, Volker and Willmann, Jan}, title = {Cable Robotic 3D-printing: additive manufacturing on the construction site}, series = {Construction Robotics}, volume = {2022}, journal = {Construction Robotics}, publisher = {Springer International Publishing}, address = {Cham}, doi = {10.1007/s41693-022-00082-3}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230124-48791}, pages = {1 -- 14}, abstract = {This paper outlines an important step in characterizing a novel field of robotic construction research where a cable-driven parallel robot is used to extrude cementitious material in three-dimensional space, and thus offering a comprehensive new approach to computational design and construction, and to robotic fabrication at larger scales. Developed by the Faculty of Art and Design at Bauhaus-University Weimar (Germany), the faculty of Architecture at the University of Applied Sciences Dortmund (Germany) and the Chair of Mechatronics at the University of Duisburg-Essen (Germany), this approach offers unique advantages over existing additive manufacturing methods: the system is easily transportable and scalable, it does not require additional formwork or scaffolding, and it offers digital integration and informational oversight across the entire design and building process. This paper considers 1) key research components of cable robotic 3D-printing (such as computational design, material exploration, and robotic control), and 2) the integration of these parameters into a unified design and building process. The demonstration of the approach at full-scale is of particular concern.}, subject = {Robotik}, language = {en} } @article{Stadler, author = {Stadler, Max}, title = {Gr{\"u}nderzeit. Hightech und Alternativen der Wissenschaft in West-Berlin}, series = {NTM Zeitschrift f{\"u}r Geschichte der Wissenschaften, Technik und Medizin}, volume = {2022}, journal = {NTM Zeitschrift f{\"u}r Geschichte der Wissenschaften, Technik und Medizin}, number = {30 (2022)}, publisher = {Basel}, address = {Birkh{\"a}user}, doi = {10.1007/s00048-022-00352-9}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20230124-48800}, pages = {599 -- 632}, abstract = {Zu den diversen Unternehmungen sozialbewegter „Gegenwissenschaft", die um 1980 auf der Bildfl{\"a}che der BRD erschienen, z{\"a}hlte der 1982 gegr{\"u}ndete Berliner Wissenschaftsladen e. V., kurz WILAB - eine Art „alternatives" Spin-off der Technischen Universit{\"a}t Berlin. Der vorliegende Beitrag situiert die Ausgr{\"u}ndung des „Ladens" im Kontext zeitgen{\"o}ssischer Fortschritte der (regionalen) Forschungs- und Technologiepolitik. Gezeigt wird, wie der deindustrialisierenden Inselstadt, qua „innovationspolitischer" Gegensteuerung, dabei sogar eine gewisse Vorreiterrolle zukam: {\"u}ber die Stadtgrenzen hinaus sichtbare Neuerungen wie die Gr{\"u}ndermesse BIG TECH oder das 1983 er{\"o}ffnete Berliner Innovations- und Gr{\"u}nderzentrum (BIG), der erste „Incubator" [sic] der BRD, etwa gingen auf das Konto der 1977/78 lancierten Technologie-Transferstelle der TU Berlin, TU-transfer. Anders gesagt: tendenziell bekam man es hier nun mit Verh{\"a}ltnissen zu tun, die immer weniger mit den Tr{\"a}umen einer „kritischen", nicht-fremdbestimmten (Gegen‑)Wissenschaft kompatibel waren. Latent kontr{\"a}r zur historiographischen Prominenz des wissenschaftskritischen Zeitgeists fristeten „alternativen" Zielsetzungen verpflichtete Unternehmungen wie „WILAB" ein relativ marginalisiertes Nischendasein. Dennoch wirft das am WILAB verfolgte, so gesehen wenig aussichtsreiche Anliegen, eine andere, n{\"a}mlich „humanere" Informationstechnologie in die Wege zu leiten, ein instruktives Licht auf die Aufbr{\"u}che „unternehmerischer" Wissenschaft in der BRD um 1980.}, subject = {Berlin }, language = {de} } @misc{CarvalhoDaher, type = {Master Thesis}, author = {Carvalho Daher, Cesar Felipe}, title = {Horoskopos: a virtual planetarium for astrology}, doi = {10.25643/bauhaus-universitaet.4718}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220930-47181}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {104}, abstract = {This report details the development of Horoskopos, a virtual planetarium for astrology. This project was an attempt to develop a learning tool for studying astrological concepts as connected to observational astronomy. The premise that astrology and observational astronomy were once inseparable from each other in ancient times guided the conceptualization of this tool as an interactive planetarium. The main references were existing software and applications for visualization in astrology and astronomy. Professional astrology teachers were consulted in order to understand better the state of astrological teaching and learning, as well as existing tools and practice. Horoskopos was built using the Unity3D development interface, which is based on the C\# programming language. It also relied on the Swiss Ephemeris coding interface from Astrodienst. The development process was experimental and many of the needed skills were developed as needed. Usability tests were performed as new features were added to the interface. The final version of Horoskopos is fully usable, with many interactive visualization features and a defined visual identity. It was validated together with professional astrologers for its effectiveness in concept and visualization.}, subject = {Mediendesign}, language = {en} } @phdthesis{CarvajalBermudez, author = {Carvajal Berm{\´u}dez, Juan Carlos}, title = {New methods of citizen participation based on digital technologies}, doi = {10.25643/bauhaus-universitaet.4712}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220906-47124}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {The current thesis presents research about new methods of citizen participation based on digital technologies. The focus on the research lies on decentralized methods of participation where citizens take the role of co-creators. The research project first conducted a review of the literature on citizen participation, its origins and the different paradigms that have emerged over the years. The literature review also looked at the influence of technologies on participation processes and the theoretical frameworks that have emerged to understand the introduction of technologies in the context of urban development. The literature review generated the conceptual basis for the further development of the thesis. The research begins with a survey of technology enabled participation applications that examined the roles and structures emerging due to the introduction of technology. The results showed that cities use technology mostly to control and monitor urban infrastructure and are rather reluctant to give citizens the role of co-creators. Based on these findings, three case studies were developed. Digital tools for citizen participation were conceived and introduced for each case study. The adoption and reaction of the citizens were observed using three data collection methods. The results of the case studies showed consistently that previous participation and engagement with informal citizen participation are a determinining factor in the potential adoption of digital tools for decentralized engagement. Based on these results, the case studies proposed methods and frameworks that can be used for the conception and introduction of technologies for decentralized citizen participation.}, subject = {Partizipation}, language = {en} } @phdthesis{Vogler, author = {Vogler, Verena}, title = {A framework for artificial coral reef design: Integrating computational modelling and high precision monitoring strategies for artificial coral reefs - an Ecosystem-aware design approach in times of climate change}, isbn = {978-3-00-074495-2}, doi = {10.25643/bauhaus-universitaet.4611}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220322-46115}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {243}, abstract = {Tropical coral reefs, one of the world's oldest ecosystems which support some of the highest levels of biodiversity on the planet, are currently facing an unprecedented ecological crisis during this massive human-activity-induced period of extinction. Hence, tropical reefs symbolically stand for the destructive effects of human activities on nature [4], [5]. Artificial reefs are excellent examples of how architectural design can be combined with ecosystem regeneration [6], [7], [8]. However, to work at the interface between the artificial and the complex and temporal nature of natural systems presents a challenge, i.a. in respect to the B-rep modelling legacy of computational modelling. The presented doctorate investigates strategies on how to apply digital practice to realise what is an essential bulwark to retain reefs in impossibly challenging times. Beyond the main question of integrating computational modelling and high precision monitoring strategies in artificial coral reef design, this doctorate explores techniques, methods, and linking frameworks to support future research and practice in ecology led design contexts. Considering the many existing approaches for artificial coral reefs design, one finds they often fall short in precisely understanding the relationships between architectural and ecological aspects (e.g. how a surface design and material composition can foster coral larvae settlement, or structural three-dimensionality enhance biodiversity) and lack an integrated underwater (UW) monitoring process. Such a process is necessary in order to gather knowledge about the ecosystem and make it available for design, and to learn whether artificial structures contribute to reef regeneration or rather harm the coral reef ecosystem. For the research, empirical experimental methods were applied: Algorithmic coral reef design, high precision UW monitoring, computational modelling and simulation, and validated through parallel real-world physical experimentation - two Artificial Reef Prototypes (ARPs) in Gili Trawangan, Indonesia (2012-today). Multiple discrete methods and sub techniques were developed in seventeen computational experiments and applied in a way in which many are cross valid and integrated in an overall framework that is offered as a significant contribution to the field. Other main contributions include the Ecosystem-aware design approach, Key Performance Indicators (KPIs) for coral reef design, algorithmic design and fabrication of Biorock cathodes, new high precision UW monitoring strategies, long-term real-world constructed experiments, new digital analysis methods and two new front-end web-based tools for reef design and monitoring reefs. The methodological framework is a finding of the research that has many technical components that were tested and combined in this way for the very first time. In summary, the thesis responds to the urgency and relevance in preserving marine species in tropical reefs during this massive extinction period by offering a differentiated approach towards artificial coral reefs - demonstrating the feasibility of digitally designing such 'living architecture' according to multiple context and performance parameters. It also provides an in-depth critical discussion of computational design and architecture in the context of ecosystem regeneration and Planetary Thinking. In that respect, the thesis functions as both theoretical and practical background for computational design, ecology and marine conservation - not only to foster the design of artificial coral reefs technically but also to provide essential criteria and techniques for conceiving them. Keywords: Artificial coral reefs, computational modelling, high precision underwater monitoring, ecology in design.}, subject = {Korallenriff}, language = {en} } @phdthesis{Gollub, author = {Gollub, Tim}, title = {Information Retrieval for the Digital Humanities}, doi = {10.25643/bauhaus-universitaet.4673}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220801-46738}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {177}, abstract = {In ten chapters, this thesis presents information retrieval technology which is tailored to the research activities that arise in the context of corpus-based digital humanities projects. The presentation is structured by a conceptual research process that is introduced in Chapter 1. The process distinguishes a set of five research activities: research question generation, corpus acquisition, research question modeling, corpus annotation, and result dissemination. Each of these research activities elicits different information retrieval tasks with special challenges, for which algorithmic approaches are presented after an introduction of the core information retrieval concepts in Chapter 2. A vital concept in many of the presented approaches is the keyquery paradigm introduced in Chapter 3, which represents an operation that returns relevant search queries in response to a given set of input documents. Keyqueries are proposed in Chapter 4 for the recommendation of related work, and in Chapter 5 for improving access to aspects hidden in the long tail of search result lists. With pseudo-descriptions, a document expansion approach is presented in Chapter 6. The approach improves the retrieval performance for corpora where only bibliographic meta-data is originally available. In Chapter 7, the keyquery paradigm is employed to generate dynamic taxonomies for corpora in an unsupervised fashion. Chapter 8 turns to the exploration of annotated corpora, and presents scoped facets as a conceptual extension to faceted search systems, which is particularly useful in exploratory search settings. For the purpose of highlighting the major topical differences in a sequence of sub-corpora, an algorithm called topical sequence profiling is presented in Chapter 9. The thesis concludes with two pilot studies regarding the visualization of (re)search results for the means of successful result dissemination: a metaphoric interpretation of the information nutrition label, as well as the philosophical bodies, which are 3D-printed search results.}, subject = {Information Retrieval}, language = {en} } @article{SoebkeLueck, author = {S{\"o}bke, Heinrich and L{\"u}ck, Andrea}, title = {Framing Algorithm-Driven Development of Sets of Objectives Using Elementary Interactions}, series = {Applied System Innovation}, volume = {2022}, journal = {Applied System Innovation}, number = {Volume 5, issue 3, article 49}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/asi5030049}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220713-46624}, pages = {1 -- 20}, abstract = {Multi-criteria decision analysis (MCDA) is an established methodology to support the decision-making of multi-objective problems. For conducting an MCDA, in most cases, a set of objectives (SOO) is required, which consists of a hierarchical structure comprised of objectives, criteria, and indicators. The development of an SOO is usually based on moderated development processes requiring high organizational and cognitive effort from all stakeholders involved. This article proposes elementary interactions as a key paradigm of an algorithm-driven development process for an SOO that requires little moderation efforts. Elementary interactions are self-contained information requests that may be answered with little cognitive effort. The pairwise comparison of elements in the well-known analytical hierarchical process (AHP) is an example of an elementary interaction. Each elementary interaction in the development process presented contributes to the stepwise development of an SOO. Based on the hypothesis that an SOO may be developed exclusively using elementary interactions (EIs), a concept for a multi-user platform is proposed. Essential components of the platform are a Model Aggregator, an Elementary Interaction Stream Generator, a Participant Manager, and a Discussion Forum. While the latter component serves the professional exchange of the participants, the first three components are intended to be automatable by algorithms. The platform concept proposed has been evaluated partly in an explorative validation study demonstrating the general functionality of the algorithms outlined. In summary, the platform concept suggested demonstrates the potential to ease SOO development processes as the platform concept does not restrict the application domain; it is intended to work with little administration moderation efforts, and it supports the further development of an existing SOO in the event of changes in external conditions. The algorithm-driven development of SOOs proposed in this article may ease the development of MCDA applications and, thus, may have a positive effect on the spread of MCDA applications.}, subject = {Multikriteria-Entscheidung}, language = {en} } @book{BreuerBartFreieretal., author = {Breuer, Johannes and Bart, Marlene and Freier, Alex Leo and R{\"u}nker, Maximilian and Jakubek, Kristin and Rubiano, Juan and Groos, Cora and Š{\´a}lek, Martin and Fritz, Henrieke and Kokkinidou, Eirini and Richter, Fabian and Liu, Ani and Held, Tobias and Moses, Gabriel S and Blasius, Clara Maria and Sp{\aa}ng, Fanny and Bencicova, Evelyn and R{\"u}ckeis, Julia and Thurow, Katharina and Maas, Frederike and Farf{\´a}n, Vanessa and Tikka, Emilia and Lee, Sang and Holzheu, Stefanie}, title = {Atlas der Datenk{\"o}rper. K{\"o}rperbilder in Kunst, Design und Wissenschaft im Zeitalter digitaler Medien}, volume = {2022}, editor = {Breuer, Johannes and Bart, Marlene and Freier, Alex Leo}, publisher = {transcript Verlag}, address = {Bielefeld}, issn = {2750-7483}, doi = {10.1515/9783839461785}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220411-46248}, publisher = {Bauhaus-Universit{\"a}t Weimar}, pages = {172}, abstract = {Digitale Technologien und soziale Medien ver{\"a}ndern die Selbst- und K{\"o}rperwahrnehmung und verzerren, verst{\"a}rken oder produzieren dabei spezifische K{\"o}rperbilder. Die Beitr{\"a}ger*innen kartographieren diese Ph{\"a}nomene, fragen nach ihrer medialen Existenzweise sowie nach den M{\"o}glichkeiten ihrer Kritik. Dabei begegnen sie ihrer Neuartigkeit mit einer transdisziplin{\"a}ren Herangehensweise. Aus sowohl der Perspektive k{\"u}nstlerischer und gestalterischer Forschung als auch der Kunst-, Kultur- und Medienwissenschaft sowie der Psychologie und Neurowissenschaft wird die Landschaft rezenter K{\"o}rperbilder und Techniken einer digitalen K{\"o}rperlichkeit untersucht.}, subject = {K{\"o}rperbild}, language = {de} } @phdthesis{List, author = {List, Eik}, title = {Design, Analysis, and Implementation of Symmetric-key (Authenticated) Ciphers}, doi = {10.25643/bauhaus-universitaet.4523}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211103-45235}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {258}, abstract = {Modern cryptography has become an often ubiquitous but essential part of our daily lives. Protocols for secure authentication and encryption protect our communication with various digital services, from private messaging, online shopping, to bank transactions or exchanging sensitive information. Those high-level protocols can naturally be only as secure as the authentication or encryption schemes underneath. Moreover, on a more detailed level, those schemes can also at best inherit the security of their underlying primitives. While widespread standards in modern symmetric-key cryptography, such as the Advanced Encryption Standard (AES), have shown to resist analysis until now, closer analysis and design of related primitives can deepen our understanding. The present thesis consists of two parts that portray six contributions: The first part considers block-cipher cryptanalysis of the round-reduced AES, the AES-based tweakable block cipher Kiasu-BC, and TNT. The second part studies the design, analysis, and implementation of provably secure authenticated encryption schemes. In general, cryptanalysis aims at finding distinguishable properties in the output distribution. Block ciphers are a core primitive of symmetric-key cryptography which are useful for the construction of various higher-level schemes, ranging from authentication, encryption, authenticated encryption up to integrity protection. Therefore, their analysis is crucial to secure cryptographic schemes at their lowest level. With rare exceptions, block-cipher cryptanalysis employs a systematic strategy of investigating known attack techniques. Modern proposals are expected to be evaluated against these techniques. The considerable effort for evaluation, however, demands efforts not only from the designers but also from external sources. The Advanced Encryption Standard (AES) is one of the most widespread block ciphers nowadays. Therefore, it is naturally an interesting target for further analysis. Tweakable block ciphers augment the usual inputs of a secret key and a public plaintext by an additional public input called tweak. Among various proposals through the previous decade, this thesis identifies Kiasu-BC as a noteworthy attempt to construct a tweakable block cipher that is very close to the AES. Hence, its analysis intertwines closely with that of the AES and illustrates the impact of the tweak on its security best. Moreover, it revisits a generic tweakable block cipher Tweak-and-Tweak (TNT) and its instantiation based on the round-reduced AES. The first part investigates the security of the AES against several forms of differential cryptanalysis, developing distinguishers on four to six (out of ten) rounds of AES. For Kiasu-BC, it exploits the additional freedom in the tweak to develop two forms of differential-based attacks: rectangles and impossible differentials. The results on Kiasu-BC consider an additional round compared to attacks on the (untweaked) AES. The authors of TNT had provided an initial security analysis that still left a gap between provable guarantees and attacks. Our analysis conducts a considerable step towards closing this gap. For TNT-AES - an instantiation of TNT built upon the AES round function - this thesis further shows how to transform our distinguisher into a key-recovery attack. Many applications require the simultaneous authentication and encryption of transmitted data. Authenticated encryption (AE) schemes provide both properties. Modern AE schemes usually demand a unique public input called nonce that must not repeat. Though, this requirement cannot always be guaranteed in practice. As part of a remedy, misuse-resistant and robust AE tries to reduce the impact of occasional misuses. However, robust AE considers not only the potential reuse of nonces. Common authenticated encryption also demanded that the entire ciphertext would have to be buffered until the authentication tag has been successfully verified. In practice, this approach is difficult to ensure since the setting may lack the resources for buffering the messages. Moreover, robustness guarantees in the case of misuse are valuable features. The second part of this thesis proposes three authenticated encryption schemes: RIV, SIV-x, and DCT. RIV is robust against nonce misuse and the release of unverified plaintexts. Both SIV-x and DCT provide high security independent from nonce repetitions. As the core under SIV-x, this thesis revisits the proof of a highly secure parallel MAC, PMAC-x, revises its details, and proposes SIV-x as a highly secure authenticated encryption scheme. Finally, DCT is a generic approach to have n-bit secure deterministic AE but without the need of expanding the ciphertext-tag string by more than n bits more than the plaintext. From its first part, this thesis aims to extend the understanding of the (1) cryptanalysis of round-reduced AES, as well as the understanding of (2) AES-like tweakable block ciphers. From its second part, it demonstrates how to simply extend known approaches for (3) robust nonce-based as well as (4) highly secure deterministic authenticated encryption.}, subject = {Kryptologie}, language = {en} } @phdthesis{Berhe, author = {Berhe, Asgedom Haile}, title = {Mitigating Risks of Corruption in Construction: A theoretical rationale for BIM adoption in Ethiopia}, doi = {10.25643/bauhaus-universitaet.4517}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211007-45175}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {336}, abstract = {This PhD thesis sets out to investigate the potentials of Building Information Modeling (BIM) to mitigate risks of corruption in the Ethiopian public construction sector. The wide-ranging capabilities and promises of BIM have led to the strong perception among researchers and practitioners that it is an indispensable technology. Consequently, it has become the frequent subject of science and research. Meanwhile, many countries, especially the developed ones, have committed themselves to applying the technology extensively. Increasing productivity is the most common and frequently cited reason for that. However, both technology developers and adopters are oblivious to the potentials of BIM in addressing critical challenges in the construction sector, such as corruption. This particularly would be significant in developing countries like Ethiopia, where its problems and effects are acute. Studies reveal that bribery and corruption have long pervaded the construction industry worldwide. The complex and fragmented nature of the sector provides an environment for corruption. The Ethiopian construction sector is not immune from this epidemic reality. In fact, it is regarded as one of the most vulnerable sectors owing to varying socio-economic and political factors. Since 2015, Ethiopia has started adopting BIM, yet without clear goals and strategies. As a result, the potential of BIM for combating concrete problems of the sector remains untapped. To this end, this dissertation does pioneering work by showing how collaboration and coordination features of the technology contribute to minimizing the opportunities for corruption. Tracing loopholes, otherwise, would remain complex and ineffective in the traditional documentation processes. Proceeding from this anticipation, this thesis brings up two primary questions: what are areas and risks of corruption in case of the Ethiopian public construction projects; and how could BIM be leveraged to mitigate these risks? To tackle these and other secondary questions, the research employs a mixed-method approach. The selected main research strategies are Survey, Grounded Theory (GT) and Archival Study. First, the author disseminates an online questionnaire among Ethiopian construction engineering professionals to pinpoint areas of vulnerability to corruption. 155 responses are compiled and scrutinized quantitatively. Then, a semi-structured in-depth interview is conducted with 20 senior professionals, primarily to comprehend opportunities for and risks of corruption in those identified highly vulnerable project stages and decision points. At the same time, open interviews (consultations) are held with 14 informants to be aware of state of the construction documentation, BIM and loopholes for corruption in the country. Consequently, these qualitative data are analyzed utilizing the principles of GT, heat/risk mapping and Social Network Analysis (SNA). The risk mapping assists the researcher in the course of prioritizing corruption risks; whilst through SNA, methodically, it is feasible to identify key actors/stakeholders in the corruption venture. Based on the generated research data, the author constructs a [substantive] grounded theory around the elements of corruption in the Ethiopian public construction sector. This theory, later, guides the subsequent strategic proposition of BIM. Finally, 85 public construction related cases are also analyzed systematically to substantiate and confirm previous findings. By ways of these multiple research endeavors that is based, first and foremost, on the triangulation of qualitative and quantitative data analysis, the author conveys a number of key findings. First, estimations, tender document preparation and evaluation, construction material as well as quality control and additional work orders are found to be the most vulnerable stages in the design, tendering and construction phases respectively. Second, middle management personnel of contractors and clients, aided by brokers, play most critical roles in corrupt transactions within the prevalent corruption network. Third, grand corruption persists in the sector, attributed to the fact that top management and higher officials entertain their overriding power, supported by the lack of project audits and accountability. Contrarily, individuals at operation level utilize intentional and unintentional 'errors' as an opportunity for corruption. In light of these findings, two conceptual BIM-based risk mitigation strategies are prescribed: active and passive automation of project audits; and the monitoring of project information throughout projects' value chain. These propositions are made in reliance on BIM's present dimensional capabilities and the promises of Integrated Project Delivery (IPD). Moreover, BIM's synchronous potentials with other technologies such as Information and Communication Technology (ICT), and Radio Frequency technologies are topics which received a treatment. All these arguments form the basis for the main thesis of this dissertation, that BIM is able to mitigate corruption risks in the Ethiopian public construction sector. The discourse on the skepticisms about BIM that would stem from the complex nature of corruption and strategic as well as technological limitations of BIM is also illuminated and complemented by this work. Thus, the thesis uncovers possible research gaps and lays the foundation for further studies.}, subject = {Building Information Modeling}, language = {en} } @phdthesis{PreisDutra, author = {Preis Dutra, Joatan}, title = {Cultural Heritage on Mobile Devices: Building Guidelines for UNESCO World Heritage Sites' Apps}, doi = {10.25643/bauhaus-universitaet.4531}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20211129-45319}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {360}, abstract = {Technological improvements and access provide a fertile scenario for creating and developing mobile applications (apps). This scenario results in a myriad of Apps providing information regarding touristic destinations, including those with a cultural profile, such as those dedicated to UNESCO World Heritage Sites (WHS). However, not all of the Apps have the same efficiency. In order to have a successful app, its development must consider usability aspects and features aligned with reliable content. Despite the guidelines for mobile usability being broadly available, they are generic, and none of them concentrates specifically into cultural heritage places, especially on those placed in an open-air scenario. This research aims to fulfil this literature gap and discusses how to adequate and develop specific guidelines for a better outdoor WHS experience. It uses an empirical approach applied to an open-air WHS city: Weimar and its Bauhaus and Classical Weimar sites. In order to build a new set of guidelines applied for open-air WHS, this research used a systematic approach to compare literature-based guidelines to industry-based ones (based on affordances), extracted from the available Apps dedicated to WHS set in Germany. The instructions compiled from both sources have been comparatively tested by using two built prototypes from the distinctive guidelines, creating a set of recommendations collecting the best approach from both sources, plus suggesting new ones the evaluation.}, subject = {Benutzerschnittstellenentwurfssystem}, language = {en} } @article{MeiabadiMoradiKaramimoghadametal., author = {Meiabadi, Mohammad Saleh and Moradi, Mahmoud and Karamimoghadam, Mojtaba and Ardabili, Sina and Bodaghi, Mahdi and Shokri, Manouchehr and Mosavi, Amir Hosein}, title = {Modeling the Producibility of 3D Printing in Polylactic Acid Using Artificial Neural Networks and Fused Filament Fabrication}, series = {polymers}, volume = {2021}, journal = {polymers}, number = {Volume 13, issue 19, article 3219}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/polym13193219}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20220110-45518}, pages = {1 -- 21}, abstract = {Polylactic acid (PLA) is a highly applicable material that is used in 3D printers due to some significant features such as its deformation property and affordable cost. For improvement of the end-use quality, it is of significant importance to enhance the quality of fused filament fabrication (FFF)-printed objects in PLA. The purpose of this investigation was to boost toughness and to reduce the production cost of the FFF-printed tensile test samples with the desired part thickness. To remove the need for numerous and idle printing samples, the response surface method (RSM) was used. Statistical analysis was performed to deal with this concern by considering extruder temperature (ET), infill percentage (IP), and layer thickness (LT) as controlled factors. The artificial intelligence method of artificial neural network (ANN) and ANN-genetic algorithm (ANN-GA) were further developed to estimate the toughness, part thickness, and production-cost-dependent variables. Results were evaluated by correlation coefficient and RMSE values. According to the modeling results, ANN-GA as a hybrid machine learning (ML) technique could enhance the accuracy of modeling by about 7.5, 11.5, and 4.5\% for toughness, part thickness, and production cost, respectively, in comparison with those for the single ANN method. On the other hand, the optimization results confirm that the optimized specimen is cost-effective and able to comparatively undergo deformation, which enables the usability of printed PLA objects.}, subject = {3D-Druck}, language = {en} } @phdthesis{AlKhatib2021, author = {Al Khatib, Khalid}, title = {Computational Analysis of Argumentation Strategies}, doi = {10.25643/bauhaus-universitaet.4461}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20210719-44612}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {134}, year = {2021}, abstract = {The computational analysis of argumentation strategies is substantial for many downstream applications. It is required for nearly all kinds of text synthesis, writing assistance, and dialogue-management tools. While various tasks have been tackled in the area of computational argumentation, such as argumentation mining and quality assessment, the task of the computational analysis of argumentation strategies in texts has so far been overlooked. This thesis principally approaches the analysis of the strategies manifested in the persuasive argumentative discourses that aim for persuasion as well as in the deliberative argumentative discourses that aim for consensus. To this end, the thesis presents a novel view of argumentation strategies for the above two goals. Based on this view, new models for pragmatic and stylistic argument attributes are proposed, new methods for the identification of the modelled attributes have been developed, and a new set of strategy principles in texts according to the identified attributes is presented and explored. Overall, the thesis contributes to the theory, data, method, and evaluation aspects of the analysis of argumentation strategies. The models, methods, and principles developed and explored in this thesis can be regarded as essential for promoting the applications mentioned above, among others.}, subject = {Argumentation}, language = {en} } @article{AhmadiBaghbanSadeghzadehetal., author = {Ahmadi, Mohammad Hossein and Baghban, Alireza and Sadeghzadeh, Milad and Zamen, Mohammad and Mosavi, Amir and Shamshirband, Shahaboddin and Kumar, Ravinder and Mohammadi-Khanaposhtani, Mohammad}, title = {Evaluation of electrical efficiency of photovoltaic thermal solar collector}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {volume 14, issue 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1734094}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200304-41049}, pages = {545 -- 565}, abstract = {In this study, machine learning methods of artificial neural networks (ANNs), least squares support vector machines (LSSVM), and neuro-fuzzy are used for advancing prediction models for thermal performance of a photovoltaic-thermal solar collector (PV/T). In the proposed models, the inlet temperature, flow rate, heat, solar radiation, and the sun heat have been considered as the input variables. Data set has been extracted through experimental measurements from a novel solar collector system. Different analyses are performed to examine the credibility of the introduced models and evaluate their performances. The proposed LSSVM model outperformed the ANFIS and ANNs models. LSSVM model is reported suitable when the laboratory measurements are costly and time-consuming, or achieving such values requires sophisticated interpretations.}, subject = {Fotovoltaik}, language = {en} } @article{MosaviShamshirbandEsmaeilbeikietal., author = {Mosavi, Amir and Shamshirband, Shahaboddin and Esmaeilbeiki, Fatemeh and Zarehaghi, Davoud and Neyshabouri, Mohammadreza and Samadianfard, Saeed and Ghorbani, Mohammad Ali and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Comparative analysis of hybrid models of firefly optimization algorithm with support vector machines and multilayer perceptron for predicting soil temperature at different depths}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, Issue 1}, doi = {10.1080/19942060.2020.1788644}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200911-42347}, pages = {939 -- 953}, abstract = {This research aims to model soil temperature (ST) using machine learning models of multilayer perceptron (MLP) algorithm and support vector machine (SVM) in hybrid form with the Firefly optimization algorithm, i.e. MLP-FFA and SVM-FFA. In the current study, measured ST and meteorological parameters of Tabriz and Ahar weather stations in a period of 2013-2015 are used for training and testing of the studied models with one and two days as a delay. To ascertain conclusive results for validation of the proposed hybrid models, the error metrics are benchmarked in an independent testing period. Moreover, Taylor diagrams utilized for that purpose. Obtained results showed that, in a case of one day delay, except in predicting ST at 5 cm below the soil surface (ST5cm) at Tabriz station, MLP-FFA produced superior results compared with MLP, SVM, and SVM-FFA models. However, for two days delay, MLP-FFA indicated increased accuracy in predicting ST5cm and ST 20cm of Tabriz station and ST10cm of Ahar station in comparison with SVM-FFA. Additionally, for all of the prescribed models, the performance of the MLP-FFA and SVM-FFA hybrid models in the testing phase was found to be meaningfully superior to the classical MLP and SVM models.}, subject = {Bodentemperatur}, language = {en} } @article{HomaeiSoleimaniShamshirbandetal., author = {Homaei, Mohammad Hossein and Soleimani, Faezeh and Shamshirband, Shahaboddin and Mosavi, Amir and Nabipour, Narjes and Varkonyi-Koczy, Annamaria R.}, title = {An Enhanced Distributed Congestion Control Method for Classical 6LowPAN Protocols Using Fuzzy Decision System}, series = {IEEE Access}, journal = {IEEE Access}, number = {volume 8}, publisher = {IEEE}, doi = {10.1109/ACCESS.2020.2968524}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40805}, pages = {20628 -- 20645}, abstract = {The classical Internet of things routing and wireless sensor networks can provide more precise monitoring of the covered area due to the higher number of utilized nodes. Because of the limitations in shared transfer media, many nodes in the network are prone to the collision in simultaneous transmissions. Medium access control protocols are usually more practical in networks with low traffic, which are not subjected to external noise from adjacent frequencies. There are preventive, detection and control solutions to congestion management in the network which are all the focus of this study. In the congestion prevention phase, the proposed method chooses the next step of the path using the Fuzzy decision-making system to distribute network traffic via optimal paths. In the congestion detection phase, a dynamic approach to queue management was designed to detect congestion in the least amount of time and prevent the collision. In the congestion control phase, the back-pressure method was used based on the quality of the queue to decrease the probability of linking in the pathway from the pre-congested node. The main goals of this study are to balance energy consumption in network nodes, reducing the rate of lost packets and increasing quality of service in routing. Simulation results proved the proposed Congestion Control Fuzzy Decision Making (CCFDM) method was more capable in improving routing parameters as compared to recent algorithms.}, subject = {Internet der dinge}, language = {en} } @article{HassannatajJoloudariHassannatajJoloudariSaadatfaretal., author = {Hassannataj Joloudari, Javad and Hassannataj Joloudari, Edris and Saadatfar, Hamid and GhasemiGol, Mohammad and Razavi, Seyyed Mohammad and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Nadai, Laszlo}, title = {Coronary Artery Disease Diagnosis: Ranking the Significant Features Using a Random Trees Model}, series = {International Journal of Environmental Research and Public Health, IJERPH}, volume = {2020}, journal = {International Journal of Environmental Research and Public Health, IJERPH}, number = {Volume 17, Issue 3, 731}, publisher = {MDPI}, doi = {10.3390/ijerph17030731}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200213-40819}, pages = {24}, abstract = {Heart disease is one of the most common diseases in middle-aged citizens. Among the vast number of heart diseases, coronary artery disease (CAD) is considered as a common cardiovascular disease with a high death rate. The most popular tool for diagnosing CAD is the use of medical imaging, e.g., angiography. However, angiography is known for being costly and also associated with a number of side effects. Hence, the purpose of this study is to increase the accuracy of coronary heart disease diagnosis through selecting significant predictive features in order of their ranking. In this study, we propose an integrated method using machine learning. The machine learning methods of random trees (RTs), decision tree of C5.0, support vector machine (SVM), and decision tree of Chi-squared automatic interaction detection (CHAID) are used in this study. The proposed method shows promising results and the study confirms that the RTs model outperforms other models.}, subject = {Maschinelles Lernen}, language = {en} } @article{KargarSamadianfardParsaetal., author = {Kargar, Katayoun and Samadianfard, Saeed and Parsa, Javad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir and Chau, Kwok-Wing}, title = {Estimating longitudinal dispersion coefficient in natural streams using empirical models and machine learning algorithms}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2020.1712260}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40775}, pages = {311 -- 322}, abstract = {The longitudinal dispersion coefficient (LDC) plays an important role in modeling the transport of pollutants and sediment in natural rivers. As a result of transportation processes, the concentration of pollutants changes along the river. Various studies have been conducted to provide simple equations for estimating LDC. In this study, machine learning methods, namely support vector regression, Gaussian process regression, M5 model tree (M5P) and random forest, and multiple linear regression were examined in predicting the LDC in natural streams. Data sets from 60 rivers around the world with different hydraulic and geometric features were gathered to develop models for LDC estimation. Statistical criteria, including correlation coefficient (CC), root mean squared error (RMSE) and mean absolute error (MAE), were used to scrutinize the models. The LDC values estimated by these models were compared with the corresponding results of common empirical models. The Taylor chart was used to evaluate the models and the results showed that among the machine learning models, M5P had superior performance, with CC of 0.823, RMSE of 454.9 and MAE of 380.9. The model of Sahay and Dutta, with CC of 0.795, RMSE of 460.7 and MAE of 306.1, gave more precise results than the other empirical models. The main advantage of M5P models is their ability to provide practical formulae. In conclusion, the results proved that the developed M5P model with simple formulations was superior to other machine learning models and empirical models; therefore, it can be used as a proper tool for estimating the LDC in rivers.}, subject = {Maschinelles Lernen}, language = {en} } @article{DehghaniSalehiMosavietal., author = {Dehghani, Majid and Salehi, Somayeh and Mosavi, Amir and Nabipour, Narjes and Shamshirband, Shahaboddin and Ghamisi, Pedram}, title = {Spatial Analysis of Seasonal Precipitation over Iran: Co-Variation with Climate Indices}, series = {ISPRS, International Journal of Geo-Information}, volume = {2020}, journal = {ISPRS, International Journal of Geo-Information}, number = {Volume 9, Issue 2, 73}, publisher = {MDPI}, doi = {10.3390/ijgi9020073}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200128-40740}, pages = {23}, abstract = {Temporary changes in precipitation may lead to sustained and severe drought or massive floods in different parts of the world. Knowing the variation in precipitation can effectively help the water resources decision-makers in water resources management. Large-scale circulation drivers have a considerable impact on precipitation in different parts of the world. In this research, the impact of El Ni{\~n}o-Southern Oscillation (ENSO), Pacific Decadal Oscillation (PDO), and North Atlantic Oscillation (NAO) on seasonal precipitation over Iran was investigated. For this purpose, 103 synoptic stations with at least 30 years of data were utilized. The Spearman correlation coefficient between the indices in the previous 12 months with seasonal precipitation was calculated, and the meaningful correlations were extracted. Then, the month in which each of these indices has the highest correlation with seasonal precipitation was determined. Finally, the overall amount of increase or decrease in seasonal precipitation due to each of these indices was calculated. Results indicate the Southern Oscillation Index (SOI), NAO, and PDO have the most impact on seasonal precipitation, respectively. Additionally, these indices have the highest impact on the precipitation in winter, autumn, spring, and summer, respectively. SOI has a diverse impact on winter precipitation compared to the PDO and NAO, while in the other seasons, each index has its special impact on seasonal precipitation. Generally, all indices in different phases may decrease the seasonal precipitation up to 100\%. However, the seasonal precipitation may increase more than 100\% in different seasons due to the impact of these indices. The results of this study can be used effectively in water resources management and especially in dam operation.}, subject = {Maschinelles Lernen}, language = {en} } @article{SaqlaiGhaniKhanetal., author = {Saqlai, Syed Muhammad and Ghani, Anwar and Khan, Imran and Ahmed Khan Ghayyur, Shahbaz and Shamshirband, Shahaboddin and Nabipour, Narjes and Shokri, Manouchehr}, title = {Image Analysis Using Human Body Geometry and Size Proportion Science for Action Classification}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {volume 10, issue 16, article 5453}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/app10165453}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200904-42322}, pages = {24}, abstract = {Gestures are one of the basic modes of human communication and are usually used to represent different actions. Automatic recognition of these actions forms the basis for solving more complex problems like human behavior analysis, video surveillance, event detection, and sign language recognition, etc. Action recognition from images is a challenging task as the key information like temporal data, object trajectory, and optical flow are not available in still images. While measuring the size of different regions of the human body i.e., step size, arms span, length of the arm, forearm, and hand, etc., provides valuable clues for identification of the human actions. In this article, a framework for classification of the human actions is presented where humans are detected and localized through faster region-convolutional neural networks followed by morphological image processing techniques. Furthermore, geometric features from human blob are extracted and incorporated into the classification rules for the six human actions i.e., standing, walking, single-hand side wave, single-hand top wave, both hands side wave, and both hands top wave. The performance of the proposed technique has been evaluated using precision, recall, omission error, and commission error. The proposed technique has been comparatively analyzed in terms of overall accuracy with existing approaches showing that it performs well in contrast to its counterparts.}, subject = {Bildanalyse}, language = {en} } @misc{Froehlich, type = {Master Thesis}, author = {Fr{\"o}hlich, Jan}, title = {On systematic approaches for interpreted information transfer of inspection data from bridge models to structural analysis}, doi = {10.25643/bauhaus-universitaet.4131}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200416-41310}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {82}, abstract = {In conjunction with the improved methods of monitoring damage and degradation processes, the interest in reliability assessment of reinforced concrete bridges is increasing in recent years. Automated imagebased inspections of the structural surface provide valuable data to extract quantitative information about deteriorations, such as crack patterns. However, the knowledge gain results from processing this information in a structural context, i.e. relating the damage artifacts to building components. This way, transformation to structural analysis is enabled. This approach sets two further requirements: availability of structural bridge information and a standardized storage for interoperability with subsequent analysis tools. Since the involved large datasets are only efficiently processed in an automated manner, the implementation of the complete workflow from damage and building data to structural analysis is targeted in this work. First, domain concepts are derived from the back-end tasks: structural analysis, damage modeling, and life-cycle assessment. The common interoperability format, the Industry Foundation Class (IFC), and processes in these domains are further assessed. The need for usercontrolled interpretation steps is identified and the developed prototype thus allows interaction at subsequent model stages. The latter has the advantage that interpretation steps can be individually separated into either a structural analysis or a damage information model or a combination of both. This approach to damage information processing from the perspective of structural analysis is then validated in different case studies.}, subject = {Br{\"u}ckenbau}, language = {en} } @article{HarirchianLahmerKumarietal., author = {Harirchian, Ehsan and Lahmer, Tom and Kumari, Vandana and Jadhav, Kirti}, title = {Application of Support Vector Machine Modeling for the Rapid Seismic Hazard Safety Evaluation of Existing Buildings}, series = {Energies}, volume = {2020}, journal = {Energies}, number = {volume 13, issue 13, 3340}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en13133340}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200707-41915}, pages = {15}, abstract = {The economic losses from earthquakes tend to hit the national economy considerably; therefore, models that are capable of estimating the vulnerability and losses of future earthquakes are highly consequential for emergency planners with the purpose of risk mitigation. This demands a mass prioritization filtering of structures to identify vulnerable buildings for retrofitting purposes. The application of advanced structural analysis on each building to study the earthquake response is impractical due to complex calculations, long computational time, and exorbitant cost. This exhibits the need for a fast, reliable, and rapid method, commonly known as Rapid Visual Screening (RVS). The method serves as a preliminary screening platform, using an optimum number of seismic parameters of the structure and predefined output damage states. In this study, the efficacy of the Machine Learning (ML) application in damage prediction through a Support Vector Machine (SVM) model as the damage classification technique has been investigated. The developed model was trained and examined based on damage data from the 1999 D{\"u}zce Earthquake in Turkey, where the building's data consists of 22 performance modifiers that have been implemented with supervised machine learning.}, subject = {Erdbeben}, language = {en} } @article{KavrakovKareemMorgenthal, author = {Kavrakov, Igor and Kareem, Ahsan and Morgenthal, Guido}, title = {Comparison Metrics for Time-histories: Application to Bridge Aerodynamics}, doi = {10.25643/bauhaus-universitaet.4186}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200625-41863}, pages = {28}, abstract = {Wind effects can be critical for the design of lifelines such as long-span bridges. The existence of a significant number of aerodynamic force models, used to assess the performance of bridges, poses an important question regarding their comparison and validation. This study utilizes a unified set of metrics for a quantitative comparison of time-histories in bridge aerodynamics with a host of characteristics. Accordingly, nine comparison metrics are included to quantify the discrepancies in local and global signal features such as phase, time-varying frequency and magnitude content, probability density, nonstationarity and nonlinearity. Among these, seven metrics available in the literature are introduced after recasting them for time-histories associated with bridge aerodynamics. Two additional metrics are established to overcome the shortcomings of the existing metrics. The performance of the comparison metrics is first assessed using generic signals with prescribed signal features. Subsequently, the metrics are applied to a practical example from bridge aerodynamics to quantify the discrepancies in the aerodynamic forces and response based on numerical and semi-analytical aerodynamic models. In this context, it is demonstrated how a discussion based on the set of comparison metrics presented here can aid a model evaluation by offering deeper insight. The outcome of the study is intended to provide a framework for quantitative comparison and validation of aerodynamic models based on the underlying physics of fluid-structure interaction. Immediate further applications are expected for the comparison of time-histories that are simulated by data-driven approaches.}, subject = {Ingenieurwissenschaften}, language = {en} } @article{ShabaniSamadianfardSattarietal., author = {Shabani, Sevda and Samadianfard, Saeed and Sattari, Mohammad Taghi and Mosavi, Amir and Shamshirband, Shahaboddin and Kmet, Tibor and V{\´a}rkonyi-K{\´o}czy, Annam{\´a}ria R.}, title = {Modeling Pan Evaporation Using Gaussian Process Regression K-Nearest Neighbors Random Forest and Support Vector Machines; Comparative Analysis}, series = {Atmosphere}, volume = {2020}, journal = {Atmosphere}, number = {Volume 11, Issue 1, 66}, doi = {10.3390/atmos11010066}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200110-40561}, pages = {17}, abstract = {Evaporation is a very important process; it is one of the most critical factors in agricultural, hydrological, and meteorological studies. Due to the interactions of multiple climatic factors, evaporation is considered as a complex and nonlinear phenomenon to model. Thus, machine learning methods have gained popularity in this realm. In the present study, four machine learning methods of Gaussian Process Regression (GPR), K-Nearest Neighbors (KNN), Random Forest (RF) and Support Vector Regression (SVR) were used to predict the pan evaporation (PE). Meteorological data including PE, temperature (T), relative humidity (RH), wind speed (W), and sunny hours (S) collected from 2011 through 2017. The accuracy of the studied methods was determined using the statistical indices of Root Mean Squared Error (RMSE), correlation coefficient (R) and Mean Absolute Error (MAE). Furthermore, the Taylor charts utilized for evaluating the accuracy of the mentioned models. The results of this study showed that at Gonbad-e Kavus, Gorgan and Bandar Torkman stations, GPR with RMSE of 1.521 mm/day, 1.244 mm/day, and 1.254 mm/day, KNN with RMSE of 1.991 mm/day, 1.775 mm/day, and 1.577 mm/day, RF with RMSE of 1.614 mm/day, 1.337 mm/day, and 1.316 mm/day, and SVR with RMSE of 1.55 mm/day, 1.262 mm/day, and 1.275 mm/day had more appropriate performances in estimating PE values. It was found that GPR for Gonbad-e Kavus Station with input parameters of T, W and S and GPR for Gorgan and Bandar Torkmen stations with input parameters of T, RH, W and S had the most accurate predictions and were proposed for precise estimation of PE. The findings of the current study indicated that the PE values may be accurately estimated with few easily measured meteorological parameters.}, subject = {Maschinelles Lernen}, language = {en} } @article{AbbaspourGilandehMolaeeSabzietal., author = {Abbaspour-Gilandeh, Yousef and Molaee, Amir and Sabzi, Sajad and Nabipour, Narjes and Shamshirband, Shahaboddin and Mosavi, Amir}, title = {A Combined Method of Image Processing and Artificial Neural Network for the Identification of 13 Iranian Rice Cultivars}, series = {agronomy}, volume = {2020}, journal = {agronomy}, number = {Volume 10, Issue 1, 117}, publisher = {MDPI}, doi = {10.3390/agronomy10010117}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200123-40695}, pages = {21}, abstract = {Due to the importance of identifying crop cultivars, the advancement of accurate assessment of cultivars is considered essential. The existing methods for identifying rice cultivars are mainly time-consuming, costly, and destructive. Therefore, the development of novel methods is highly beneficial. The aim of the present research is to classify common rice cultivars in Iran based on color, morphologic, and texture properties using artificial intelligence (AI) methods. In doing so, digital images of 13 rice cultivars in Iran in three forms of paddy, brown, and white are analyzed through pre-processing and segmentation of using MATLAB. Ninety-two specificities, including 60 color, 14 morphologic, and 18 texture properties, were identified for each rice cultivar. In the next step, the normal distribution of data was evaluated, and the possibility of observing a significant difference between all specificities of cultivars was studied using variance analysis. In addition, the least significant difference (LSD) test was performed to obtain a more accurate comparison between cultivars. To reduce data dimensions and focus on the most effective components, principal component analysis (PCA) was employed. Accordingly, the accuracy of rice cultivar separations was calculated for paddy, brown rice, and white rice using discriminant analysis (DA), which was 89.2\%, 87.7\%, and 83.1\%, respectively. To identify and classify the desired cultivars, a multilayered perceptron neural network was implemented based on the most effective components. The results showed 100\% accuracy of the network in identifying and classifying all mentioned rice cultivars. Hence, it is concluded that the integrated method of image processing and pattern recognition methods, such as statistical classification and artificial neural networks, can be used for identifying and classification of rice cultivars.}, subject = {Maschinelles Lernen}, language = {en} } @article{FaroughiKarimimoshaverArametal., author = {Faroughi, Maryam and Karimimoshaver, Mehrdad and Aram, Farshid and Solgi, Ebrahim and Mosavi, Amir and Nabipour, Narjes and Chau, Kwok-Wing}, title = {Computational modeling of land surface temperature using remote sensing data to investigate the spatial arrangement of buildings and energy consumption relationship}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2020}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {Volume 14, No. 1}, publisher = {Taylor \& Francis}, doi = {https://doi.org/10.1080/19942060.2019.1707711}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200110-40585}, pages = {254 -- 270}, abstract = {The effect of urban form on energy consumption has been the subject of various studies around the world. Having examined the effect of buildings on energy consumption, these studies indicate that the physical form of a city has a notable impact on the amount of energy consumed in its spaces. The present study identified the variables that affected energy consumption in residential buildings and analyzed their effects on energy consumption in four neighborhoods in Tehran: Apadana, Bimeh, Ekbatan-phase I, and Ekbatan-phase II. After extracting the variables, their effects are estimated with statistical methods, and the results are compared with the land surface temperature (LST) remote sensing data derived from Landsat 8 satellite images taken in the winter of 2019. The results showed that physical variables, such as the size of buildings, population density, vegetation cover, texture concentration, and surface color, have the greatest impacts on energy usage. For the Apadana neighborhood, the factors with the most potent effect on energy consumption were found to be the size of buildings and the population density. However, for other neighborhoods, in addition to these two factors, a third factor was also recognized to have a significant effect on energy consumption. This third factor for the Bimeh, Ekbatan-I, and Ekbatan-II neighborhoods was the type of buildings, texture concentration, and orientation of buildings, respectively.}, subject = {Fernerkung}, language = {en} } @article{NabipourMosaviBaghbanetal., author = {Nabipour, Narjes and Mosavi, Amir and Baghban, Alireza and Shamshirband, Shahaboddin and Felde, Imre}, title = {Extreme Learning Machine-Based Model for Solubility Estimation of Hydrocarbon Gases in Electrolyte Solutions}, series = {Processes}, volume = {2020}, journal = {Processes}, number = {Volume 8, Issue 1, 92}, publisher = {MDPI}, doi = {10.3390/pr8010092}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200113-40624}, pages = {12}, abstract = {Calculating hydrocarbon components solubility of natural gases is known as one of the important issues for operational works in petroleum and chemical engineering. In this work, a novel solubility estimation tool has been proposed for hydrocarbon gases—including methane, ethane, propane, and butane—in aqueous electrolyte solutions based on extreme learning machine (ELM) algorithm. Comparing the ELM outputs with a comprehensive real databank which has 1175 solubility points yielded R-squared values of 0.985 and 0.987 for training and testing phases respectively. Furthermore, the visual comparison of estimated and actual hydrocarbon solubility led to confirm the ability of proposed solubility model. Additionally, sensitivity analysis has been employed on the input variables of model to identify their impacts on hydrocarbon solubility. Such a comprehensive and reliable study can help engineers and scientists to successfully determine the important thermodynamic properties, which are key factors in optimizing and designing different industrial units such as refineries and petrochemical plants.}, subject = {Maschinelles Lernen}, language = {en} } @unpublished{AbbasKavrakovMorgenthaletal., author = {Abbas, Tajammal and Kavrakov, Igor and Morgenthal, Guido and Lahmer, Tom}, title = {Prediction of aeroelastic response of bridge decks using artificial neural networks}, doi = {10.25643/bauhaus-universitaet.4097}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200225-40974}, abstract = {The assessment of wind-induced vibrations is considered vital for the design of long-span bridges. The aim of this research is to develop a methodological framework for robust and efficient prediction strategies for complex aerodynamic phenomena using hybrid models that employ numerical analyses as well as meta-models. Here, an approach to predict motion-induced aerodynamic forces is developed using artificial neural network (ANN). The ANN is implemented in the classical formulation and trained with a comprehensive dataset which is obtained from computational fluid dynamics forced vibration simulations. The input to the ANN is the response time histories of a bridge section, whereas the output is the motion-induced forces. The developed ANN has been tested for training and test data of different cross section geometries which provide promising predictions. The prediction is also performed for an ambient response input with multiple frequencies. Moreover, the trained ANN for aerodynamic forcing is coupled with the structural model to perform fully-coupled fluid--structure interaction analysis to determine the aeroelastic instability limit. The sensitivity of the ANN parameters to the model prediction quality and the efficiency has also been highlighted. The proposed methodology has wide application in the analysis and design of long-span bridges.}, subject = {Aerodynamik}, language = {en} } @article{BielikSchneiderKuligaetal., author = {Bielik, Martin and Schneider, Sven and Kuliga, Saskia and Griego, Danielle and Ojha, Varun and K{\"o}nig, Reinhard and Schmitt, Gerhard and Donath, Dirk}, title = {Examining Trade-Offs between Social, Psychological, and Energy Potential of Urban Form}, series = {ISPRS International Journal of Geo-Information}, volume = {2019}, journal = {ISPRS International Journal of Geo-Information}, editor = {Resch, Bernd and Szell, Michael}, doi = {10.3390/ijgi8020052}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190408-38695}, abstract = {Urban planners are often challenged with the task of developing design solutions which must meet multiple, and often contradictory, criteria. In this paper, we investigated the trade-offs between social, psychological, and energy potential of the fundamental elements of urban form: the street network and the building massing. Since formal mehods to evaluate urban form from the psychological and social point of view are not readily available, we developed a methodological framework to quantify these criteria as the first contribution in this paper. To evaluate the psychological potential, we conducted a three-tiered empirical study starting from real world environments and then abstracting them to virtual environments. In each context, the implicit (physiological) response and explicit (subjective) response of pedestrians were measured. To quantify the social potential, we developed a street network centrality-based measure of social accessibility. For the energy potential, we created an energy model to analyze the impact of pure geometric form on the energy demand of the building stock. The second contribution of this work is a method to identify distinct clusters of urban form and, for each, explore the trade-offs between the select design criteria. We applied this method to two case studies identifying nine types of urban form and their respective potential trade-offs, which are directly applicable for the assessment of strategic decisions regarding urban form during the early planning stages.}, subject = {Planung}, language = {en} } @phdthesis{Beck, author = {Beck, Stephan}, title = {Immersive Telepresence Systems and Technologies}, doi = {10.25643/bauhaus-universitaet.3856}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190218-38569}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {149}, abstract = {Modern immersive telepresence systems enable people at different locations to meet in virtual environments using realistic three-dimensional representations of their bodies. For the realization of such a three-dimensional version of a video conferencing system, each user is continuously recorded in 3D. These 3D recordings are exchanged over the network between remote sites. At each site, the remote recordings of the users, referred to as 3D video avatars, are seamlessly integrated into a shared virtual scenery and displayed in stereoscopic 3D for each user from his or her perspective. This thesis reports on algorithmic and technical contributions to modern immersive telepresence systems and presents the design, implementation and evaluation of the first immersive group-to-group telepresence system in which each user is represented as realistic life-size 3D video avatar. The system enabled two remote user groups to meet and collaborate in a consistent shared virtual environment. The system relied on novel methods for the precise calibration and registration of color- and depth- sensors (RGBD) into the coordinate system of the application as well as an advanced distributed processing pipeline that reconstructs realistic 3D video avatars in real-time. During the course of this thesis, the calibration of 3D capturing systems was greatly improved. While the first development focused on precisely calibrating individual RGBD-sensors, the second stage presents a new method for calibrating and registering multiple color and depth sensors at a very high precision throughout a large 3D capturing volume. This method was further refined by a novel automatic optimization process that significantly speeds up the manual operation and yields similarly high accuracy. A core benefit of the new calibration method is its high runtime efficiency by directly mapping from raw depth sensor measurements into an application coordinate system and to the coordinates of its associated color sensor. As a result, the calibration method is an efficient solution in terms of precision and applicability in virtual reality and immersive telepresence applications. In addition to the core contributions, the results of two case studies which address 3D reconstruction and data streaming lead to the final conclusion of this thesis and to directions of future work in the rapidly advancing field of immersive telepresence research.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{OuaerHosseiniAmaretal., author = {Ouaer, Hocine and Hosseini, Amir Hossein and Amar, Menad Nait and Ben Seghier, Mohamed El Amine and Ghriga, Mohammed Abdelfetah and Nabipour, Narjes and Andersen, P{\aa}l {\O}steb{\o} and Mosavi, Amir and Shamshirband, Shahaboddin}, title = {Rigorous Connectionist Models to Predict Carbon Dioxide Solubility in Various Ionic Liquids}, series = {Applied Sciences}, volume = {2020}, journal = {Applied Sciences}, number = {Volume 10, Issue 1, 304}, publisher = {MDPI}, doi = {https://doi.org/10.3390/app10010304}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200107-40558}, pages = {18}, abstract = {Estimating the solubility of carbon dioxide in ionic liquids, using reliable models, is of paramount importance from both environmental and economic points of view. In this regard, the current research aims at evaluating the performance of two data-driven techniques, namely multilayer perceptron (MLP) and gene expression programming (GEP), for predicting the solubility of carbon dioxide (CO2) in ionic liquids (ILs) as the function of pressure, temperature, and four thermodynamical parameters of the ionic liquid. To develop the above techniques, 744 experimental data points derived from the literature including 13 ILs were used (80\% of the points for training and 20\% for validation). Two backpropagation-based methods, namely Levenberg-Marquardt (LM) and Bayesian Regularization (BR), were applied to optimize the MLP algorithm. Various statistical and graphical assessments were applied to check the credibility of the developed techniques. The results were then compared with those calculated using Peng-Robinson (PR) or Soave-Redlich-Kwong (SRK) equations of state (EoS). The highest coefficient of determination (R2 = 0.9965) and the lowest root mean square error (RMSE = 0.0116) were recorded for the MLP-LMA model on the full dataset (with a negligible difference to the MLP-BR model). The comparison of results from this model with the vastly applied thermodynamic equation of state models revealed slightly better performance, but the EoS approaches also performed well with R2 from 0.984 up to 0.996. Lastly, the newly established correlation based on the GEP model exhibited very satisfactory results with overall values of R2 = 0.9896 and RMSE = 0.0201.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Azari, author = {Azari, Banafsheh}, title = {Bidirectional Texture Functions: Acquisition, Rendering and Quality Evaluation}, doi = {10.25643/bauhaus-universitaet.3779}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180820-37790}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {As one of its primary objectives, Computer Graphics aims at the simulation of fabrics' complex reflection behaviour. Characteristic surface reflectance of fabrics, such as highlights, anisotropy or retro-reflection arise the difficulty of synthesizing. This problem can be solved by using Bidirectional Texture Functions (BTFs), a 2D-texture under various light and view direction. But the acquisition of Bidirectional Texture Functions requires an expensive setup and the measurement process is very time-consuming. Moreover, the size of BTF data can range from hundreds of megabytes to several gigabytes, as a large number of high resolution pictures have to be used in any ideal cases. Furthermore, the three-dimensional textured models rendered through BTF rendering method are subject to various types of distortion during acquisition, synthesis, compression, and processing. An appropriate image quality assessment scheme is a useful tool for evaluating image processing algorithms, especially algorithms designed to leave the image visually unchanged. In this contribution, we present and conduct an investigation aimed at locating a robust threshold for downsampling BTF images without loosing perceptual quality. To this end, an experimental study on how decreasing the texture resolution influences perceived quality of the rendered images has been presented and discussed. Next, two basic improvements to the use of BTFs for rendering are presented: firstly, the study addresses the cost of BTF acquisition by introducing a flexible low-cost step motor setup for BTF acquisition allowing to generate a high quality BTF database taken at user-defined arbitrary angles. Secondly, the number of acquired textures to the perceptual quality of renderings is adapted so that the database size is not overloaded and can fit better in memory when rendered. Although visual attention is one of the essential attributes of HVS, it is neglected in most existing quality metrics. In this thesis an appropriate objective quality metric based on extracting visual attention regions from images and adequate investigation of the influence of visual attention on perceived image quality assessment, called Visual Attention Based Image Quality Metric (VABIQM), has been proposed. The novel metric indicates that considering visual saliency can offer significant benefits with regard to constructing objective quality metrics to predict the visible quality differences in images rendered by compressed and non-compressed BTFs and also outperforms straightforward existing image quality metrics at detecting perceivable differences.}, subject = {Wahrnehmung}, language = {en} } @article{MosaviHosseiniImaniZalzaretal., author = {Mosavi, Amir and Hosseini Imani, Mahmood and Zalzar, Shaghayegh and Shamshirband, Shahaboddin}, title = {Strategic Behavior of Retailers for Risk Reduction and Profit Increment via Distributed Generators and Demand Response Programs}, series = {Energies}, volume = {2018}, journal = {Energies}, number = {11, 6}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11061602}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180628-37546}, pages = {24}, abstract = {Following restructuring of power industry, electricity supply to end-use customers has undergone fundamental changes. In the restructured power system, some of the responsibilities of the vertically integrated distribution companies have been assigned to network managers and retailers. Under the new situation, retailers are in charge of providing electrical energy to electricity consumers who have already signed contract with them. Retailers usually provide the required energy at a variable price, from wholesale electricity markets, forward contracts with energy producers, or distributed energy generators, and sell it at a fixed retail price to its clients. Different strategies are implemented by retailers to reduce the potential financial losses and risks associated with the uncertain nature of wholesale spot electricity market prices and electrical load of the consumers. In this paper, the strategic behavior of retailers in implementing forward contracts, distributed energy sources, and demand-response programs with the aim of increasing their profit and reducing their risk, while keeping their retail prices as low as possible, is investigated. For this purpose, risk management problem of the retailer companies collaborating with wholesale electricity markets, is modeled through bi-level programming approach and a comprehensive framework for retail electricity pricing, considering customers' constraints, is provided in this paper. In the first level of the proposed bi-level optimization problem, the retailer maximizes its expected profit for a given risk level of profit variability, while in the second level, the customers minimize their consumption costs. The proposed programming problem is modeled as Mixed Integer programming (MIP) problem and can be efficiently solved using available commercial solvers. The simulation results on a test case approve the effectiveness of the proposed demand-response program based on dynamic pricing approach on reducing the retailer's risk and increasing its profit. In this paper, the decision-making problem of the retailers under dynamic pricing approach for demand response integration have been investigated. The retailer was supposed to rely on forward contracts, DGs, and spot electricity market to supply the required active and reactive power of its customers. To verify the effectiveness of the proposed model, four schemes for retailer's scheduling problem are considered and the resulted profit under each scheme are analyzed and compared. The simulation results on a test case indicate that providing more options for the retailer to buy the required power of its customers and increase its flexibility in buying energy from spot electricity market reduces the retailers' risk and increases its profit. From the customers' perspective also the retailers'accesstodifferentpowersupplysourcesmayleadtoareductionintheretailelectricityprices. Since the retailer would be able to decrease its electricity selling price to the customers without losing its profitability, with the aim of attracting more customers. Inthiswork,theconditionalvalueatrisk(CVaR)measureisusedforconsideringandquantifying riskinthedecision-makingproblems. Amongallthepossibleoptioninfrontoftheretailertooptimize its profit and risk, demand response programs are the most beneficial option for both retailer and its customers. The simulation results on the case study prove that implementing dynamic pricing approach on retail electricity prices to integrate demand response programs can successfully provoke customers to shift their flexible demand from peak-load hours to mid-load and low-load hours. Comparing the simulation results of the third and fourth schemes evidences the impact of DRPs and customers' load shifting on the reduction of retailer's risk, as well as the reduction of retailer's payment to contract holders, DG owners, and spot electricity market. Furthermore, the numerical results imply on the potential of reducing average retail prices up to 8\%, under demand response activation. Consequently, it provides a win-win solution for both retailer and its customers.}, subject = {Risikomanagement}, language = {en} } @article{GhazvineiDarvishiMosavietal., author = {Ghazvinei, Pezhman Taherei and Darvishi, Hossein Hassanpour and Mosavi, Amir and Yusof, Khamaruzaman bin Wan and Alizamir, Meysam and Shamshirband, Shahaboddin and Chau, Kwok-Wing}, title = {Sugarcane growth prediction based on meteorological parameters using extreme learning machine and artificial neural network}, series = {Engineering Applications of Computational Fluid Mechanics}, volume = {2018}, journal = {Engineering Applications of Computational Fluid Mechanics}, number = {12,1}, publisher = {Taylor \& Francis}, doi = {10.1080/19942060.2018.1526119}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181017-38129}, pages = {738 -- 749}, abstract = {Management strategies for sustainable sugarcane production need to deal with the increasing complexity and variability of the whole sugar system. Moreover, they need to accommodate the multiple goals of different industry sectors and the wider community. Traditional disciplinary approaches are unable to provide integrated management solutions, and an approach based on whole systems analysis is essential to bring about beneficial change to industry and the community. The application of this approach to water management, environmental management and cane supply management is outlined, where the literature indicates that the application of extreme learning machine (ELM) has never been explored in this realm. Consequently, the leading objective of the current research was set to filling this gap by applying ELM to launch swift and accurate model for crop production data-driven. The key learning has been the need for innovation both in the technical aspects of system function underpinned by modelling of sugarcane growth. Therefore, the current study is an attempt to establish an integrate model using ELM to predict the concluding growth amount of sugarcane. Prediction results were evaluated and further compared with artificial neural network (ANN) and genetic programming models. Accuracy of the ELM model is calculated using the statistics indicators of Root Means Square Error (RMSE), Pearson Coefficient (r), and Coefficient of Determination (R2) with promising results of 0.8, 0.47, and 0.89, respectively. The results also show better generalization ability in addition to faster learning curve. Thus, proficiency of the ELM for supplementary work on advancement of prediction model for sugarcane growth was approved with promising results.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @article{FaizollahzadehArdabiliNajafiAlizamiretal., author = {Faizollahzadeh Ardabili, Sina and Najafi, Bahman and Alizamir, Meysam and Mosavi, Amir and Shamshirband, Shahaboddin and Rabczuk, Timon}, title = {Using SVM-RSM and ELM-RSM Approaches for Optimizing the Production Process of Methyl and Ethyl Esters}, series = {Energies}, journal = {Energies}, number = {11, 2889}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/en11112889}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181025-38170}, pages = {1 -- 20}, abstract = {The production of a desired product needs an effective use of the experimental model. The present study proposes an extreme learning machine (ELM) and a support vector machine (SVM) integrated with the response surface methodology (RSM) to solve the complexity in optimization and prediction of the ethyl ester and methyl ester production process. The novel hybrid models of ELM-RSM and ELM-SVM are further used as a case study to estimate the yield of methyl and ethyl esters through a trans-esterification process from waste cooking oil (WCO) based on American Society for Testing and Materials (ASTM) standards. The results of the prediction phase were also compared with artificial neural networks (ANNs) and adaptive neuro-fuzzy inference system (ANFIS), which were recently developed by the second author of this study. Based on the results, an ELM with a correlation coefficient of 0.9815 and 0.9863 for methyl and ethyl esters, respectively, had a high estimation capability compared with that for SVM, ANNs, and ANFIS. Accordingly, the maximum production yield was obtained in the case of using ELM-RSM of 96.86\% for ethyl ester at a temperature of 68.48 °C, a catalyst value of 1.15 wt. \%, mixing intensity of 650.07 rpm, and an alcohol to oil molar ratio (A/O) of 5.77; for methyl ester, the production yield was 98.46\% at a temperature of 67.62 °C, a catalyst value of 1.1 wt. \%, mixing intensity of 709.42 rpm, and an A/O of 6.09. Therefore, ELM-RSM increased the production yield by 3.6\% for ethyl ester and 3.1\% for methyl ester, compared with those for the experimental data.}, subject = {Biodiesel}, language = {en} } @phdthesis{Schollmeyer, author = {Schollmeyer, Andre}, title = {Efficient and High-Quality Rendering of Higher-Order Geometric Data Representations}, doi = {10.25643/bauhaus-universitaet.3823}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181120-38234}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {143}, abstract = {Computer-Aided Design (CAD) bezeichnet den Entwurf industrieller Produkte mit Hilfe von virtuellen 3D Modellen. Ein CAD-Modell besteht aus parametrischen Kurven und Fl{\"a}chen, in den meisten F{\"a}llen non-uniform rational B-Splines (NURBS). Diese mathematische Beschreibung wird ebenfalls zur Analyse, Optimierung und Pr{\"a}sentation des Modells verwendet. In jeder dieser Entwicklungsphasen wird eine unterschiedliche visuelle Darstellung ben{\"o}tigt, um den entsprechenden Nutzern ein geeignetes Feedback zu geben. Designer bevorzugen beispielsweise illustrative oder realistische Darstellungen, Ingenieure ben{\"o}tigen eine verst{\"a}ndliche Visualisierung der Simulationsergebnisse, w{\"a}hrend eine immersive 3D Darstellung bei einer Benutzbarkeitsanalyse oder der Designauswahl hilfreich sein kann. Die interaktive Darstellung von NURBS-Modellen und -Simulationsdaten ist jedoch aufgrund des hohen Rechenaufwandes und der eingeschr{\"a}nkten Hardwareunterst{\"u}tzung eine große Herausforderung. Diese Arbeit stellt vier neuartige Verfahren vor, welche sich mit der interaktiven Darstellung von NURBS-Modellen und Simulationensdaten befassen. Die vorgestellten Algorithmen nutzen neue F{\"a}higkeiten aktueller Grafikkarten aus, um den Stand der Technik bez{\"u}glich Qualit{\"a}t, Effizienz und Darstellungsgeschwindigkeit zu verbessern. Zwei dieser Verfahren befassen sich mit der direkten Darstellung der parametrischen Beschreibung ohne Approximationen oder zeitaufw{\"a}ndige Vorberechnungen. Die dabei vorgestellten Datenstrukturen und Algorithmen erm{\"o}glichen die effiziente Unterteilung, Klassifizierung, Tessellierung und Darstellung getrimmter NURBS-Fl{\"a}chen und einen interaktiven Ray-Casting-Algorithmus f{\"u}r die Isofl{\"a}chenvisualisierung von NURBSbasierten isogeometrischen Analysen. Die weiteren zwei Verfahren beschreiben zum einen das vielseitige Konzept der programmierbaren Transparenz f{\"u}r illustrative und verst{\"a}ndliche Visualisierungen tiefenkomplexer CAD-Modelle und zum anderen eine neue hybride Methode zur Reprojektion halbtransparenter und undurchsichtiger Bildinformation f{\"u}r die Beschleunigung der Erzeugung von stereoskopischen Bildpaaren. Die beiden letztgenannten Ans{\"a}tze basieren auf rasterisierter Geometrie und sind somit ebenfalls f{\"u}r normale Dreiecksmodelle anwendbar, wodurch die Arbeiten auch einen wichtigen Beitrag in den Bereichen der Computergrafik und der virtuellen Realit{\"a}t darstellen. Die Auswertung der Arbeit wurde mit großen, realen NURBS-Datens{\"a}tzen durchgef{\"u}hrt. Die Resultate zeigen, dass die direkte Darstellung auf Grundlage der parametrischen Beschreibung mit interaktiven Bildwiederholraten und in subpixelgenauer Qualit{\"a}t m{\"o}glich ist. Die Einf{\"u}hrung programmierbarer Transparenz erm{\"o}glicht zudem die Umsetzung kollaborativer 3D Interaktionstechniken f{\"u}r die Exploration der Modelle in virtuellenUmgebungen sowie illustrative und verst{\"a}ndliche Visualisierungen tiefenkomplexer CAD-Modelle. Die Erzeugung stereoskopischer Bildpaare f{\"u}r die interaktive Visualisierung auf 3D Displays konnte beschleunigt werden. Diese messbare Verbesserung wurde zudem im Rahmen einer Nutzerstudie als wahrnehmbar und vorteilhaft befunden.}, subject = {Rendering}, language = {en} } @inproceedings{FediorHamel, author = {Fedior, Marco and Hamel, Wido}, title = {Simulationsumgebung zur Evaluation von umweltorientierten Verkehrsmanagement-Strategien}, series = {30. Forum Bauinformatik}, booktitle = {30. Forum Bauinformatik}, editor = {Steiner, Maria and Theiler, Michael and Mirboland, Mahsa}, organization = {Bauhaus-Universit{\"a}t Weimar}, doi = {10.25643/bauhaus-universitaet.3867}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190328-38678}, pages = {6}, abstract = {Der vorliegende Beitrag beschreibt die Problematik bei der Prognose verkehrsbedingter Schadstoff-Immissionen. Im Mittelpunkt steht die Entwicklung und der Aufbau einer Simulationsumgebung zur Evaluation von umweltorientierten Verkehrsmanagement-Strategien. Die Simulationsumgebung wird {\"u}ber die drei Felder Verkehr, Emission, Immission entwickelt und findet zun{\"a}chst Anwendung in der Evaluation verkehrlicher Maßnahmen f{\"u}r die Friedberger Landstraße in Frankfurt am Main.}, subject = {Verkehr}, language = {de} } @unpublished{SteinerBourinetLahmer, author = {Steiner, Maria and Bourinet, Jean-Marc and Lahmer, Tom}, title = {An adaptive sampling method for global sensitivity analysis based on least-squares support vector regression}, doi = {10.25643/BAUHAUS-UNIVERSITAET.3832}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181218-38320}, pages = {1 -- 33}, abstract = {In the field of engineering, surrogate models are commonly used for approximating the behavior of a physical phenomenon in order to reduce the computational costs. Generally, a surrogate model is created based on a set of training data, where a typical method for the statistical design is the Latin hypercube sampling (LHS). Even though a space filling distribution of the training data is reached, the sampling process takes no information on the underlying behavior of the physical phenomenon into account and new data cannot be sampled in the same distribution if the approximation quality is not sufficient. Therefore, in this study we present a novel adaptive sampling method based on a specific surrogate model, the least-squares support vector regresson. The adaptive sampling method generates training data based on the uncertainty in local prognosis capabilities of the surrogate model - areas of higher uncertainty require more sample data. The approach offers a cost efficient calculation due to the properties of the least-squares support vector regression. The opportunities of the adaptive sampling method are proven in comparison with the LHS on different analytical examples. Furthermore, the adaptive sampling method is applied to the calculation of global sensitivity values according to Sobol, where it shows faster convergence than the LHS method. With the applications in this paper it is shown that the presented adaptive sampling method improves the estimation of global sensitivity values, hence reducing the overall computational costs visibly.}, subject = {Approximation}, language = {en} } @unpublished{RezakazemiMosaviShirazian, author = {Rezakazemi, Mashallah and Mosavi, Amir and Shirazian, Saeed}, title = {ANFIS pattern for molecular membranes separation optimization}, volume = {2018}, doi = {10.25643/BAUHAUS-UNIVERSITAET.3821}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20181122-38212}, pages = {1 -- 20}, abstract = {In this work, molecular separation of aqueous-organic was simulated by using combined soft computing-mechanistic approaches. The considered separation system was a microporous membrane contactor for separation of benzoic acid from water by contacting with an organic phase containing extractor molecules. Indeed, extractive separation is carried out using membrane technology where complex of solute-organic is formed at the interface. The main focus was to develop a simulation methodology for prediction of concentration distribution of solute (benzoic acid) in the feed side of the membrane system, as the removal efficiency of the system is determined by concentration distribution of the solute in the feed channel. The pattern of Adaptive Neuro-Fuzzy Inference System (ANFIS) was optimized by finding the optimum membership function, learning percentage, and a number of rules. The ANFIS was trained using the extracted data from the CFD simulation of the membrane system. The comparisons between the predicted concentration distribution by ANFIS and CFD data revealed that the optimized ANFIS pattern can be used as a predictive tool for simulation of the process. The R2 of higher than 0.99 was obtained for the optimized ANFIS model. The main privilege of the developed methodology is its very low computational time for simulation of the system and can be used as a rigorous simulation tool for understanding and design of membrane-based systems. Highlights are, Molecular separation using microporous membranes. Developing hybrid model based on ANFIS-CFD for the separation process, Optimization of ANFIS structure for prediction of separation process}, subject = {Fluid}, language = {en} } @unpublished{KavrakovMorgenthal, author = {Kavrakov, Igor and Morgenthal, Guido}, title = {A synergistic study of a CFD and semi-analytical models for aeroelastic analysis of bridges in turbulent wind conditions}, doi = {10.25643/bauhaus-universitaet.4087}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20200206-40873}, abstract = {Long-span bridges are prone to wind-induced vibrations. Therefore, a reliable representation of the aerodynamic forces acting on a bridge deck is of a major significance for the design of such structures. This paper presents a systematic study of the two-dimensional (2D) fluid-structure interaction of a bridge deck under smooth and turbulent wind conditions. Aerodynamic forces are modeled by two approaches: a computational fluid dynamics (CFD) model and six semi-analytical models. The vortex particle method is utilized for the CFD model and the free-stream turbulence is introduced by seeding vortex particles upstream of the deck with prescribed spectral characteristics. The employed semi-analytical models are based on the quasi-steady and linear unsteady assumptions and aerodynamic coefficients obtained from CFD analyses. The underlying assumptions of the semi-analytical aerodynamic models are used to interpret the results of buffeting forces and aeroelastic response due to a free-stream turbulence in comparison with the CFD model. Extensive discussions are provided to analyze the effect of linear fluid memory and quasi-steady nonlinearity from a CFD perspective. The outcome of the analyses indicates that the fluid memory is a governing effect in the buffeting forces and aeroelastic response, while the effect of the nonlinearity is overestimated by the quasi-steady models. Finally, flutter analyses are performed and the obtained critical velocities are further compared with wind tunnel results, followed by a brief examination of the post-flutter behavior. The results of this study provide a deeper understanding of the extent of which the applied models are able to replicate the physical processes for fluid-structure interaction phenomena in bridge aerodynamics and aeroelasticity.}, subject = {Ingenieurwissenschaften}, language = {en} } @unpublished{MosaviTorabiHashemietal., author = {Mosavi, Amir and Torabi, Mehrnoosh and Hashemi, Sattar and Saybani, Mahmoud Reza and Shamshirband, Shahaboddin}, title = {A Hybrid Clustering and Classification Technique for Forecasting Short-Term Energy Consumption}, doi = {10.25643/bauhaus-universitaet.3755}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20180907-37550}, abstract = {Electrical energy distributor companies in Iran have to announce their energy demand at least three 3-day ahead of the market opening. Therefore, an accurate load estimation is highly crucial. This research invoked methodology based on CRISP data mining and used SVM, ANN, and CBA-ANN-SVM (a novel hybrid model of clustering with both widely used ANN and SVM) to predict short-term electrical energy demand of Bandarabbas. In previous studies, researchers introduced few effective parameters with no reasonable error about Bandarabbas power consumption. In this research we tried to recognize all efficient parameters and with the use of CBA-ANN-SVM model, the rate of error has been minimized. After consulting with experts in the field of power consumption and plotting daily power consumption for each week, this research showed that official holidays and weekends have impact on the power consumption. When the weather gets warmer, the consumption of electrical energy increases due to turning on electrical air conditioner. Also, con-sumption patterns in warm and cold months are different. Analyzing power consumption of the same month for different years had shown high similarity in power consumption patterns. Factors with high impact on power consumption were identified and statistical methods were utilized to prove their impacts. Using SVM, ANN and CBA-ANN-SVM, the model was built. Sine the proposed method (CBA-ANN-SVM) has low MAPE 5 1.474 (4 clusters) and MAPE 5 1.297 (3 clusters) in comparison with SVM (MAPE 5 2.015) and ANN (MAPE 5 1.790), this model was selected as the final model. The final model has the benefits from both models and the benefits of clustering. Clustering algorithm with discovering data structure, divides data into several clusters based on similarities and differences between them. Because data inside each cluster are more similar than entire data, modeling in each cluster will present better results. For future research, we suggest using fuzzy methods and genetic algorithm or a hybrid of both to forecast each cluster. It is also possible to use fuzzy methods or genetic algorithms or a hybrid of both without using clustering. It is issued that such models will produce better and more accurate results. This paper presents a hybrid approach to predict the electric energy usage of weather-sensitive loads. The presented methodutilizes the clustering paradigm along with ANN and SVMapproaches for accurate short-term prediction of electric energyusage, using weather data. Since the methodology beinginvoked in this research is based on CRISP data mining, datapreparation has received a gr eat deal of attention in thisresear ch. Once data pre-processing was done, the underlyingpattern of electric energy consumption was extracted by themeans of machine learning methods to precisely forecast short-term energy consumption. The proposed approach (CBA-ANN-SVM) was applied to real load data and resulting higher accu-racy comparing to the existing models. 2018 American Institute of Chemical Engineers Environ Prog, 2018 https://doi.org/10.1002/ep.12934}, subject = {Data Mining}, language = {en} } @misc{Lang, type = {Master Thesis}, author = {Lang, Kevin}, title = {Argument Search with Voice Assistants}, doi = {10.25643/bauhaus-universitaet.3935}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20190617-39353}, school = {Bauhaus-Universit{\"a}t Weimar}, pages = {100}, abstract = {The need for finding persuasive arguments can arise in a variety of domains such as politics, finance, marketing or personal entertainment. In these domains, there is a demand to make decisions by oneself or to convince somebody about a specific topic. To obtain a conclusion, one has to search thoroughly different sources in literature and on the web to compare various arguments. Voice interfaces, in form of smartphone applications or smart speakers, present the user with natural conversations in a comfortable way to make search requests in contrast to a traditional search interface with keyboard and display. Benefits and obstacles of such a new interface are analyzed by conducting two studies. The first one consists of a survey for analyzing the target group with questions about situations, motivations, and possible demanding features. The latter one is a wizard-of-oz experiment to investigate possible queries on how a user formulates requests to such a novel system. The results indicate that a search interface with conversational abilities can build a helpful assistant, but to satisfy the demands of a broader audience some additional information retrieval and visualization features need to be implemented.}, subject = {Amazon Alexa}, language = {en} } @unpublished{SimonRitzRudolf, author = {Simon-Ritz, Frank and Rudolf, Sylvelin}, title = {Ein Schaufenster f{\"u}r die Kunst}, volume = {69}, number = {Heft 6}, doi = {10.25643/bauhaus-universitaet.3230}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170711-32307}, pages = {312 -- 317}, abstract = {Kunstausstellungen in Bibliotheken}, subject = {Ausstellung}, language = {de} } @misc{Genc, type = {Master Thesis}, author = {Genc, Emir}, title = {Decoding Public Life in Urban Soundscape: The Case of Weimar}, doi = {10.25643/bauhaus-universitaet.2743}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20170213-27438}, school = {Bauhaus-Universit{\"a}t Weimar}, abstract = {INTRODUCTION The research field of sound landscape and public life, initially drew my attention during the master class of 'Media of the Urban', originally 'Medien des Urbanen, which was given by Prof. Dr. Gabriele Schabacher in the 2015 summer semester. For the relevant class, I conducted an conceptual case study in Istanbul, Beyoglu District, with the intention of analysing the perception of the space by urban sound. During the summer 2015 I recorded various sounds of different spatial settings and developed the analysis by comparing the situations. By that time, I realized the inherent property of the sound as a medium for our perception in urban context. In the 2015-2016 winter semester, I participated in the master class of the architectural project, named 'Build Allegory', which was given by Prof. Dipl.-Ing. Heike B{\"u}ttner. The project was situated in Berlin Westkreuz, AVUS north curve, on the highway and was originally a race track from 1921. In this context, the aim of my project was to answer various questions, main of which was, how does the architectural form shape the sound of the place? And, how does the sound of the place shape the architectural from? Since the place is still serving mainly to the vehicles, although the function has differed, the sound objects and the context have remained. Through the existence of contextual references, I started with creating a computational tool for analysing the acoustic characteristics of this urban setting, which is fundamentally providing results as the sound cloud, driven from the sound ray tracing method. Regarding to this soundscape analysis method, which I developed, this computational tool assisted me to find an optimum reciprocal relation between architecture and sound. Since I have been working on soundscape in the context of architecture, urban situations, public life and public space, I was determined to produce a comprehensive research in this field and propound the hypothesis; the existence of the reciprocity between the social behaviours in public space and the sound landscape. In which extent does this reciprocity exist? What are the effects of the public life on the sonic configurations of the space and the other way around?}, subject = {{\"O}ffentlicher Raum}, language = {en} } @inproceedings{ChirkinKoenig, author = {Chirkin, Artem and K{\"o}nig, Reinhard}, title = {Concept of Interactive Machine Learning in Urban Design Problems : proceedings}, publisher = {ACM New York, NY, USA}, address = {San Jose, CA, USA}, doi = {10.25643/bauhaus-universitaet.2600}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26000}, pages = {10 -- 13}, abstract = {This work presents a concept of interactive machine learning in a human design process. An urban design problem is viewed as a multiple-criteria optimization problem. The outlined feature of an urban design problem is the dependence of a design goal on a context of the problem. We model the design goal as a randomized fitness measure that depends on the context. In terms of multiple-criteria decision analysis (MCDA), the defined measure corresponds to a subjective expected utility of a user. In the first stage of the proposed approach we let the algorithm explore a design space using clustering techniques. The second stage is an interactive design loop; the user makes a proposal, then the program optimizes it, gets the user's feedback and returns back the control over the application interface.}, subject = {Stadtgestaltung}, language = {en} } @article{KleinKoenig, author = {Klein, Bernhard and K{\"o}nig, Reinhard}, title = {Computational Urban Planning: Using the Value Lab as Control Center}, series = {FCL Magazine, Special Issue Simulation Platform}, journal = {FCL Magazine, Special Issue Simulation Platform}, doi = {10.25643/bauhaus-universitaet.2601}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26011}, pages = {38 -- 45}, abstract = {Urban planning involves many aspects and various disciplines, demanding an asynchronous planning approach. The level of complexity rises with each aspect to be considered and makes it difficult to find universally satisfactory solutions. To improve this situation we propose a new approach, which complement traditional design methods with a computational urban plan- ning method that can fulfil formalizable design requirements automatically. Based on this approach we present a design space exploration framework for complex urban planning projects. For a better understanding of the idea of design space exploration, we introduce the concept of a digital scout which guides planners through the design space and assists them in their creative explorations. The scout can support planners during manual design by informing them about potential im- pacts or by suggesting different solutions that fulfill predefined quality requirements. The planner can change flexibly between a manually controlled and a completely automated design process. The developed system is presented using an exemplary urban planning scenario on two levels from the street layout to the placement of building volumes. Based on Self-Organizing Maps we implemented a method which makes it possible to visualize the multi-dimensional solution space in an easily analysable and comprehensible form.}, subject = {Stadtgestaltung}, language = {en} } @article{TreyerKleinKoenigetal., author = {Treyer, Lukas and Klein, Bernhard and K{\"o}nig, Reinhard and Meixner, Christine}, title = {Lightweight Urban Computation Interchange (LUCI): A System to Couple Heterogenous Simulations and Views}, series = {Spatial Information Research}, journal = {Spatial Information Research}, doi = {10.25643/bauhaus-universitaet.2603}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-26037}, pages = {1 -- 12}, abstract = {In this paper we introduce LUCI, a Lightweight Urban Calculation Interchange system, designed to bring the advantages of calculation and content co-ordination system to small planning and design groups by the means of an open source middle-ware. The middle-ware focuses on problems typical to urban planning and therefore features a geo-data repository as well as a job runtime administration, to coordinate simulation models and its multiple views. The described system architecture is accompanied by two exemplary use cases, that have been used to test and further develop our concepts and implementations.}, language = {en} } @inproceedings{KoenigSchmitt, author = {K{\"o}nig, Reinhard and Schmitt, Gerhard}, title = {Backcasting and a new way of command in computational design : Proceedings}, series = {CAADence in Architecture Conference}, booktitle = {CAADence in Architecture Conference}, editor = {Szoboszlai, Mih{\´a}ly}, address = {Budapest}, doi = {10.25643/bauhaus-universitaet.2599}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:wim2-20160622-25996}, pages = {15 -- 25}, abstract = {It's not uncommon that analysis and simulation methods are used mainly to evaluate finished designs and to proof their quality. Whereas the potential of such methods is to lead or control a design process from the beginning on. Therefore, we introduce a design method that move away from a "what-if" forecasting philosophy and increase the focus on backcasting approaches. We use the power of computation by combining sophisticated methods to generate design with analysis methods to close the gap between analysis and synthesis of designs. For the development of a future-oriented computational design support we need to be aware of the human designer's role. A productive combination of the excellence of human cognition with the power of modern computing technology is needed. We call this approach "cognitive design computing". The computational part aim to mimic the way a designer's brain works by combining state-of-the-art optimization and machine learning approaches with available simulation methods. The cognition part respects the complex nature of design problems by the provision of models for human-computation interaction. This means that a design problem is distributed between computer and designer. In the context of the conference slogan "back to command", we ask how we may imagine the command over a cognitive design computing system. We expect that designers will need to let go control of some parts of the design process to machines, but in exchange they will get a new powerful command on complex computing processes. This means that designers have to explore the potentials of their role as commanders of partially automated design processes. In this contribution we describe an approach for the development of a future cognitive design computing system with the focus on urban design issues. The aim of this system is to enable an urban planner to treat a planning problem as a backcasting problem by defining what performance a design solution should achieve and to automatically query or generate a set of best possible solutions. This kind of computational planning process offers proof that the designer meets the original explicitly defined design requirements. A key way in which digital tools can support designers is by generating design proposals. Evolutionary multi-criteria optimization methods allow us to explore a multi-dimensional design space and provide a basis for the designer to evaluate contradicting requirements: a task urban planners are faced with frequently. We also reflect why designers will give more and more control to machines. Therefore, we investigate first approaches learn how designers use computational design support systems in combination with manual design strategies to deal with urban design problems by employing machine learning methods. By observing how designers work, it is possible to derive more complex artificial solution strategies that can help computers make better suggestions in the future.}, subject = {CAD}, language = {en} }