-
Notifications
You must be signed in to change notification settings - Fork 1
/
publications.html
406 lines (360 loc) · 26.5 KB
/
publications.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta content="width=device-width, initial-scale=1.0" name="viewport">
<title>V-EUPNEA | Publications</title>
<meta content="" name="description">
<meta content="" name="keywords">
<!-- Favicons -->
<link href="assets/img/favicon-light.png" rel="icon" media="(prefers-color-scheme: dark)"/>
<link href="assets/img/favicon-dark.png" rel="icon" media="(prefers-color-scheme: light)"/>
<link href="assets/img/apple-touch-ico-light.png" rel="apple-touch-icon" media="(prefers-color-scheme: dark)"/>
<link href="assets/img/apple-touch-ico-dark.png" rel="apple-touch-icon" media="(prefers-color-scheme: light)"/>
<!-- Google Fonts -->
<link href="https://fonts.googleapis.com/css?family=Open+Sans:300,300i,400,400i,600,600i,700,700i|Roboto:300,300i,400,400i,500,500i,600,600i,700,700i|Poppins:300,300i,400,400i,500,500i,600,600i,700,700i" rel="stylesheet">
<!-- Vendor CSS Files -->
<link href="assets/vendor/animate.css/animate.min.css" rel="stylesheet">
<link href="assets/vendor/aos/aos.css" rel="stylesheet">
<link href="assets/vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<link href="assets/vendor/bootstrap-icons/bootstrap-icons.css" rel="stylesheet">
<link href="assets/vendor/boxicons/css/boxicons.min.css" rel="stylesheet">
<link href="assets/vendor/glightbox/css/glightbox.min.css" rel="stylesheet">
<link href="assets/vendor/remixicon/remixicon.css" rel="stylesheet">
<link href="assets/vendor/swiper/swiper-bundle.min.css" rel="stylesheet">
<!-- Template Main CSS File -->
<link href="assets/css/style_alternative.css" rel="stylesheet">
<!-- =======================================================
* Template Name: Company - v4.7.0
* Template URL: https://bootstrapmade.com/company-free-html-bootstrap-template/
* Author: BootstrapMade.com
* License: https://bootstrapmade.com/license/
======================================================== -->
</head>
<body>
<!-- ======= Header ======= -->
<header id="header" class="fixed-top">
<div class="container d-flex align-items-center">
<h1 class="logo me-auto"><a href="index.html"><img src="assets/img/logos/V-EUPNEA-black_logo.png" alt="" class="img-fluid">V-EUPNEA</br><span>Living, Breathing Virtual Worlds</span></a></h1>
<!-- Uncomment below if you prefer to use an image logo -->
<!-- <a href="index.html" class="logo me-auto me-lg-0"><img src="assets/img/logo.png" alt="" class="img-fluid"></a>-->
<nav id="navbar" class="navbar order-last order-lg-0">
<ul>
<li><a href="index.html">Home</a></li>
<li><a href="news.html">News</a></li>
<li><a href="about.html">About</a></li>
<li><a href="team.html">People</a></li>
<li><a href="publications.html" class="active">Publications</a></li>
<li><a href="projects.html">Projects</a></li>
<li><a href="software_data.html">Software & Data</a></li>
<li><a href="contact.html">Contact</a></li>
</ul>
<i class="bi bi-list mobile-nav-toggle"></i>
</nav><!-- .navbar -->
<div class="header-social-links d-flex">
<a href="https://www.cyens.org.cy/en-gb/" target="_blank" class="twitter">
<img src="assets/img/logos/CYENS-BLACK-LOGO.png" alt="" class="img-fluid">
</a>
</div>
<!--
<div class="header-social-links d-flex">
<a href="#" class="twitter"><i class="bu bi-twitter"></i></a>
<a href="#" class="facebook"><i class="bu bi-facebook"></i></a>
<a href="#" class="instagram"><i class="bu bi-instagram"></i></a>
<a href="#" class="linkedin"><i class="bu bi-linkedin"></i></i></a>
</div>
-->
</div>
</header><!-- End Header -->
<main id="main">
<!-- ======= Breadcrumbs ======= -->
<section id="breadcrumbs" class="breadcrumbs">
<div class="container">
<div class="d-flex justify-content-between align-items-center">
<h2>Publications</h2>
<!--<ol>
<li><a href="index.html">Home</a></li>
<li>Publications</li>
</ol> -->
</div>
</div>
</section><!-- End Breadcrumbs -->
<!-- ======= Portfolio Section ======= -->
<section id="portfolio" class="portfolio">
<div class="container">
<div class="row">
<div class="col-lg-12 d-flex justify-content-center">
<ul id="portfolio-flters">
<li data-filter="*" class="filter-active">All</li>
<li data-filter=".filter-journal" class="">Journals</li>
<li data-filter=".filter-conf" class="">Conferences</li>
<!--<li data-filter=".filter-book">Book Chapters</li>-->
</ul>
</div>
</div>
<div class="row portfolio-container aos-init" data-aos="fade">
<!-- Under this - Publications 2023-->
<div class="section-title portfolio-item * filter-journal filter-conf">
<h2>2023</h2>
</div>
<div class="portfolio-item filter-journal">
<div id="siggraph23-greil" class="row publistItem fade-up ">
<div class="col-sm-2 portfolio-wrap">
<img src="assets/img/publications/sig2023greil-thumb.jpg" alt="
GREIL-Crowds: Crowd Simulation with Deep Reinforcement Learning and Examples
" class="img-fluid img-rounded center-block">
</div>
<div class="col-sm-10 text-justify">
<h2 class="publistTitle">
<a href="publication_pages/siggraph23-greil.html">
GREIL-Crowds: Crowd Simulation with Deep Reinforcement Learning and Examples<i class="bi bi-box-arrow-up-right"></i></a>
</h2>
<span class="publistAuthors">
Panayiotis Charalambous, Julien Pettré, Vassilis Vassiliades, Yiorgos Chrysanthou, and Nuria Pelechano
</span>
<br>
<span class="publistJournal">
ACM Transaction on Computer Graphics 42, 4, Article 1 (August 2023)
</span>
<p id="" class="abstract">
Simulating crowds with realistic behaviors is a difficult but very important task for a variety of applications. Quantifying how a person balances between different conflicting criteria such as goal seeking,collision avoidance and moving within a group is not intuitive, especially if we consider that behaviors differ largely between people. Inspired by recent advances in Deep Reinforcement Learning, we propose Guided REinforcement Learning (GREIL) Crowds, a method that learns a model for pedestrian behaviors which is guided by reference crowd data. The model successfully captures behaviors such as goal seeking, being part of consistent groups without the need to define explicit relationships and wandering around seemingly without a specific purpose. Two fundamental concepts are important in achieving these results: (a) the per agent state representation and (b) the reward function. The agent state is a temporal representation of the situation around each agent. The reward function is based on the idea that people try to move in situations/states in which they feel comfortable in. Therefore, in order for agents to stay in a comfortable state space, we first obtain a distribution of states extracted from real crowd data; then we evaluate states based on how much of an outlier they are compared to such a distribution. We demonstrate that our system can capture and simulate many complex and subtle crowd interactions in varied scenarios. Additionally, the proposed method generalizes to unseen situations, generates consistent behaviors and does not suffer from the limitations of other data-driven and reinforcement learning approaches.
</p>
</div>
</div>
</div>
<!-- Under this - Publications 2022-->
<div class="section-title portfolio-item * filter-journal filter-conf">
<h2>2022</h2>
</div>
<div class="portfolio-item filter-conf">
<div id="siggraph22-ccp" class="row publistItem fade-up ">
<div class="col-sm-2 portfolio-wrap">
<img src="assets/img/publications/siggraph22-ccp-thumb.jpg" alt="CCP: Configurable Crowd Profiles" class="img-fluid img-rounded center-block">
</div>
<div class="col-sm-10 text-justify">
<h2 class="publistTitle">
<a href="publication_pages/siggraph22-ccp.html">CCP: Configurable Crowd Profiles <i class="bi bi-box-arrow-up-right"></i></a>
</h2>
<span class="publistAuthors">
Andreas Panayiotou, Theodoros Kyriakou, Marilena Lemonari, Yiorgos Chrysanthou, Panayiotis Charalambous
</span>
<br>
<span class="publistJournal">
SIGGRAPH '22 Conference Proceedings
</span>
<p id="" class="abstract">
Diversity among agents' behaviors and heterogeneity in virtual crowds in general, is an important aspect of crowd simulation as it is crucial to the perceived realism and plausibility of the resulting simulations. Heterogeneous crowds constitute the pillar in creating numerous real-life scenarios such as museum exhibitions, which require variety in agent behaviors, from basic collision avoidance to more complex interactions both among agents and with environmental features. Most of the existing systems optimize for specific behaviors such as goal seeking, and neglect to take into account other behaviors and how these interact together to form diverse agent profiles.In this paper, we present a RL-based framework for learning multiple agent behaviors concurrently. We optimize the agent policy by varying the importance of the selected behaviors (goal seeking, collision avoidance, interaction with environment, and grouping) while training; essentially we have a reward function that changes dynamically during training. The importance of each separate sub-behavior is added as input to the policy, resulting in the development of a single model capable of capturing as well as enabling dynamic run-time manipulation of agent profiles; thus allowing configurable profiles. Through a series of experiments, we verify that our system provides users with the ability to design virtual scenes; control and mix agent behaviors thus creating personality profiles, and assign different profiles to groups of agents. Moreover, we demonstrate that interestingly the proposed model generalizes to situations not seen in the training data such as a) crowds with higher density, b) behavior weights that are outside the training intervals and c) to scenes with more intricate environment layouts.
</p>
</div>
</div>
</div>
<!-- Journal Template Start-->
<div class="portfolio-item filter-journal">
<div id="" class="row publistItem fade-up ">
<div class="col-sm-2 portfolio-wrap">
<img src="assets/img/publications/eg22-authoringvirtualcrowds.png" alt="..." class="img-fluid img-rounded center-block" title="">
</div>
<div class="col-sm-10 text-justify">
<h2 class="publistTitle">
<a href="publication_pages/eg22-authoringvirtualcrowds.html">Authoring Virtual Crowds: A Survey <i class="bi bi-box-arrow-up-right"></i></a>
</h2>
<span class="publistAuthors">
Marilena Lemonari, Rafael Blanco, Panayiotis Charalambous, Nuria Pelechano, Marios Avraamides, Julien Pettré, Yiorgos Chrysanthou
</span>
<br>
<span class="publistJournal">
Eurographics'22, Computer Graphics Forum
</span>
<p id="" class="abstract">
Recent advancements in crowd simulation unravel a wide range of functionalities for virtual agents, delivering highly-realistic, natural virtual crowds. Such systems are of particular importance to a variety of applications in fields such as: entertainment (e.g., movies, computer games); architectural and urban planning; and simulations for sports and training. However, providing their capabilities to untrained users necessitates the development of authoring frameworks. Authoring virtual crowds is a complex and multi-level task, varying from assuming control and assisting users to realise their creative intents, to delivering intuitive and easy to use interfaces, facilitating such control. In this paper, we present a categorisation of the authorable crowd simulation components, ranging from high-level behaviours and path-planning to local movements, as well as animation and visualisation. We provide a review of the most relevant methods in each area, emphasising the amount and nature of influence that the users have over the final result. Moreover, we discuss the currently available authoring tools (e.g., graphical user interfaces, drag-and-drop), identifying the trends of early and recent work. Finally, we suggest promising directions for future research that mainly stem from the rise of learning-based methods, and the need for a unified authoring framework.
</p>
</div>
</div>
</div>
<!-- Journal Template End-->
<!-- Under this - Publications 2021-->
<div class="section-title portfolio-item * filter-journal filter-conf">
<h2>2021</h2>
</div>
<div class="portfolio-item filter-conf">
<div id="vrst2021-perceived" class="row publistItem fade-up ">
<div class="col-sm-2 portfolio-wrap">
<img src="assets/img/publications/vrst2021-perceived.png" alt="Perceived realism of pedestrian crowds trajectories in VR" class="img-fluid img-rounded center-block">
</div>
<div class="col-sm-10 text-justify">
<h2 class="publistTitle">
<a href="publication_pages/vrst21-Perceived.html">Perceived realism of pedestrian crowds trajectories in VR <i class="bi bi-box-arrow-up-right"></i></a>
</h2>
<span class="publistAuthors">
Daniele Giunchi, Riccardo Bovo, Panayiotis Charalambous, Fotis Liarokapis, Alastair Shipman, Stuart James, Anthony Steed, Thomas Heinis
</span>
<br>
<span class="publistJournal">
VRST '21: Proceedings of the 27th ACM Symposium on Virtual Reality Software and Technology
</span>
<p class="abstract">
Crowd simulation algorithms play an essential role in populating Virtual Reality (VR) environments with multiple autonomous humanoid agents. The generation of plausible trajectories can be a significant computational cost for real-time graphics engines, especially in untethered and mobile devices such as portable VR devices. Previous research explores the plausibility and realism of crowd simulations on desktop computers but fails to account the impact it has on immersion. This study explores how the realism of crowd trajectories affects the perceived immersion in VR. We do so by running a psychophysical experiment in which participants rate the realism of real/synthetic trajectories data, showing similar level of perceived realism.
</p>
</div>
</div>
</div>
<div class="portfolio-item filter-conf">
<div id="CoG21-Towards" class="row publistItem fade-up ">
<div class="col-sm-2 portfolio-wrap">
<img src="assets/img/publications/CoG21-Towards.png" alt="Towards a multi-agent non-player character road network: a Reinforcement Learning approach" class="img-fluid img-rounded center-block" title="">
</div>
<div class="col-sm-10 text-justify">
<h2 class="publistTitle">
<a href="publication_pages/CoG21-Towards.html">Towards a multi-agent non-player character road network: a Reinforcement Learning approach <i class="bi bi-box-arrow-up-right"></i></a>
</h2>
<span class="publistAuthors">
Stela Makri and Panayiotis Charalambous
</span>
<br>
<span class="publistJournal">
2021 IEEE Conference on Games (CoG)
</span>
<p class="abstract">
Creating detailed and interactive game environments is an area of great importance in the video game industry. This includes creating realistic Non-Player Characters which respond seamlessly to the players actions. Machine learning had great contributions to the area, overcoming scalability and robustness shortcomings of hand-scripted models. We introduce the early results of a reinforcement learning approach in building a simulation environment for heterogeneous, multi-agent non-player characters in a dynamic road network game scene.
</p>
</div>
</div>
</div>
<div class="portfolio-item filter-conf">
<div id="CoG21-Emotion" class="row publistItem fade-up ">
<div class="col-sm-2 portfolio-wrap">
<img src="assets/img/publications/CoG21-Emotion.png" alt="Emotion Recognition from 3D Motion Capture Data using Deep CNNs" class="img-fluid img-rounded center-block" title="">
</div>
<div class="col-sm-10 text-justify">
<h2 class="publistTitle">
<a href="publication_pages/CoG21-Emotion.html">Emotion Recognition from 3D Motion Capture Data using Deep CNNs <i class="bi bi-box-arrow-up-right"></i></a>
</h2>
<span class="publistAuthors">
Haris Zacharatos, Christos Gatzoulis, Panayiotis Charalambous and Yiorgos Chrysanthou
</span>
<br>
<span class="publistJournal">
2021 IEEE Conference on Games (CoG)
</span>
<p class="abstract">
Designing computer games requires a player-centered approach. Whilst following guidelines and functional requirement specifications is part of the process, observing and measuring qualities of the players experience is key in providing feedback to game designers. Moreover, it can also be used to create adaptive and personalized experiences for players. With the advancement of affective computing and gaming user interfaces, the opportunity to recognize the player's emotions becomes more feasible and each different modality can offer additional information as affect expression is less defined as compared to action selection. This paper explores the use of 3D skeleton motion data transformed to 2D images that encode pose and movement dynamics to represent annotated emotions. The 2D images are then used to train and test the Inception V3 CNN model on a binary classification emotion recognition between happy and sad emotions. Preliminary results in unseen test data indicate that the above transformation technique can capture emotional information. The paper also discusses future directions that may improve the effectiveness of the proposed method on a wider scale.
</p>
</div>
</div>
</div>
<!-- Under this - Publications 2020-->
<div class="section-title portfolio-item * filter-journal filter-conf">
<h2>2020</h2>
</div>
<div class="portfolio-item filter-conf">
<div id="GCH20-Digital" class="row publistItem fade-up ">
<div class="col-sm-2 portfolio-wrap">
<img src="assets/img/publications/GCH20-Digital.png" alt="Digital Layered Models of Architecture and Mural Paintings over Time" class="img-fluid img-rounded center-block" title="">
</div>
<div class="col-sm-10 text-justify">
<h2 class="publistTitle">
<a href="publication_pages/GCH20-Digital.html">Digital Layered Models of Architecture and Mural Paintings over Time <i class="bi bi-box-arrow-up-right"></i></a>
</h2>
<span class="publistAuthors">
Milagros Guardia, Paola Pogliani, Giulia Bordi, Panayiotis Charalambous, Carlos Andujar, Xavier Pueyo
</span>
<br>
<span class="publistJournal">
Eurographics Workshop on Graphics and Cultural Heritage (GCH), 2020
</span>
<p class="abstract">
The European project Enhancement of Heritage Experiences: The Middle Ages. Digital Layered Models of Architecture and Mural Paintings over Time (EHEM), approved in the call for JPICH Conservation, Protection and Use (0127) in the year 2020, aims to obtain virtual reconstructions of medieval artistic heritage - architecture with mural paintings - that are as close as possible to the original at different times, incorporating historical-artistic knowledge and the diachronic perspective of heritage, as an instrument for researchers, restorers and heritage curators and to improve the visitor's perceptions and experiences.
</p>
</div>
</div>
</div>
<!-- Under this - Publications 2020-->
<div class="section-title portfolio-item * filter-journal filter-conf">
<h2>2019</h2>
</div>
<div class="portfolio-item filter-conf">
<div id="MIG-Why" class="row publistItem fade-up ">
<div class="col-sm-2 portfolio-wrap">
<img src="assets/img/publications/MIG-Why.png" alt="Why did the human cross the Road?" class="img-fluid img-rounded center-block" title="">
</div>
<div class="col-sm-10 text-justify">
<h2 class="publistTitle">
<a href="publication_pages/MIG-Why.html">Why did the human cross the Road? <i class="bi bi-box-arrow-up-right"></i></a>
</h2>
<span class="publistAuthors">
Panayiotis Charalambous and Yiorgos Chrysanthou.
</span>
<br>
<span class="publistJournal">
Motion, Interaction and Games (MIG ’19). Association for Computing Machinery, New York, NY, USA, Article 47, 1–2.
</span>
<p class="abstract">
Humans at rest tend to stay at rest. Humans in motion tend to cross the road – Isaac Newton.” Even though this response is meant to be a joke to indicate the answer is quite obvious, this important feature of real world crowds is rarely considered in simulations. Answering this question involves several things such as how agents balance between reaching goals, avoid collisions with heterogeneous entities and how the environment is being modeled. As part of a preliminary study, we introduce a reinforcement learning framework to train pedestrians to cross streets with bidirectional traffic. Our initial results indicate that by using a very simple goal centric representation of agent state and a simple reward function, we can simulate interesting behaviors such as pedestrians crossing the road through crossings or waiting for cars to pass.
</p>
</div>
</div>
</div>
</div>
</section><!-- End Portfolio Section -->
</main><!-- End #main -->
<!-- ======= Footer ======= -->
<footer id="footer">
<div class="footer-top">
<div class="container">
<div class="row">
<div class="col-lg-4 col-md-12 footer-contact" style="text-align: center;">
<h3>V-EUPNEA</h3>
<div style="font-size: 15px;">Living, Breathing Virtual Worlds</div>
</div>
<div class="col-lg-2 col-6" style="text-align: center; margin-bottom: 30px;">
<a href="https://www.cyens.org.cy/en-gb/research/pillars-groups/visual-sciences/v-eupnea/" target="_blank">
<img src="assets/img/logos/V-EUPNEA-white.png" alt="" style="width: 95%">
</a>
</div>
<div class="col-lg-2 col-6" style="text-align: center; margin-bottom: 30px;">
<a href="https://www.cyens.org.cy/en-gb/" target="_blank">
<img src="assets/img/logos/CYENS-HORIZONTAL-WHITE-TRANSPARENT.png" alt="" style="width: 95%">
</a>
</div>
<div class="col-lg-4 col-md-12" style="text-align: center;">
<p><i class="bi bi-geo-alt-fill"></i> Square, Plateia Dimarchias 23<br>
Nicosia 1016, Cyprus<br>
</p>
</div>
</div>
</div>
</div>
<div class="container d-md-flex py-4">
<div class="me-md-auto text-center text-md-start">
<div class="copyright">
© Copyright <strong><span>Company</span></strong>. All Rights Reserved
</div>
<div class="credits">
<!-- All the links in the footer should remain intact. -->
<!-- You can delete the links only if you purchased the pro version. -->
<!-- Licensing information: https://bootstrapmade.com/license/ -->
<!-- Purchase the pro version with working PHP/AJAX contact form: https://bootstrapmade.com/company-free-html-bootstrap-template/ -->
Designed by <a href="https://bootstrapmade.com/">BootstrapMade</a>
</div>
</div>
<!--
<div class="social-links text-center text-md-right pt-3 pt-md-0">
<a href="https://github.com/veupnea" target="_blank" class="linkedin"><i class="bx bxl-github"></i></a>
</div>
-->
</div>
</footer><!-- End Footer -->
<a href="#" class="back-to-top d-flex align-items-center justify-content-center"><i class="bi bi-arrow-up-short"></i></a>
<!-- Vendor JS Files -->
<script src="assets/vendor/aos/aos.js"></script>
<script src="assets/vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<script src="assets/vendor/glightbox/js/glightbox.min.js"></script>
<script src="assets/vendor/isotope-layout/isotope.pkgd.min.js"></script>
<script src="assets/vendor/swiper/swiper-bundle.min.js"></script>
<script src="assets/vendor/waypoints/noframework.waypoints.js"></script>
<script src="assets/vendor/php-email-form/validate.js"></script>
<script src="assets/vendor/jquery/jquery3.6.0.min.js"></script>
<!-- Template Main JS File -->
<script src="assets/js/main.js"></script>
</body>
</html>