forked from academicpages/academicpages.github.io
-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
475 lines (381 loc) · 22.5 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>
Mushui Liu
</title>
<meta name="description" content="A simple, whitespace theme for academics. Based on [*folio](https://github.com/bogoli/-folio) design.
">
<!-- Open Graph -->
<!-- Bootstrap & MDB -->
<link href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css" rel="stylesheet" integrity="sha512-MoRNloxbStBcD8z3M/2BmnT+rg4IsMxPkXaGh2zD6LGNNFE80W3onsAhRcMAMrSoyWL9xD7Ert0men7vR8LUZg==" crossorigin="anonymous">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/mdbootstrap/4.19.1/css/mdb.min.css" integrity="sha512-RO38pBRxYH3SoOprtPTD86JFOclM51/XTIdEPh5j8sj4tp8jmQIx26twG52UaLi//hQldfrh7e51WzP9wuP32Q==" crossorigin="anonymous" />
<!-- Fonts & Icons -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.14.0/css/all.min.css" integrity="sha512-1PKOgIY59xJ8Co8+NE6FZ+LOAZKjy+KY8iq0G4B3CyeY6wYHN3yt9PW0XpSriVlkMXe40PTKnXrLnZ9+fkDaog==" crossorigin="anonymous">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/academicons/1.9.0/css/academicons.min.css" integrity="sha512-W4yqoT1+8NLkinBLBZko+dFB2ZbHsYLDdr50VElllRcNt2Q4/GSs6u71UHKxB7S6JEMCp5Ve4xjh3eGQl/HRvg==" crossorigin="anonymous">
<link
defer
rel="stylesheet"
type="text/css"
href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700|Roboto+Slab:100,300,400,500,700|Material+Icons&display=swap"
>
<!-- Code Syntax Highlighting -->
<link rel="stylesheet" href="/assets/css/jekyll-pygments-themes/github.css">
<!-- Styles -->
<link rel="stylesheet" href="/assets/css/main.css">
<link rel="canonical" href="/">
</head>
<body class="fixed-top-nav ">
<!-- Header -->
<header>
<!-- Nav Bar -->
<nav id="navbar" class="navbar navbar-light navbar-expand-sm fixed-top">
<div class="container">
<!-- Social Icons -->
<div class="navbar-brand social">
<a href="https://scholar.google.com/citations?user=-WUyWpMAAAAJ&hl=en" title="Google Scholar" target="_blank" rel="noopener noreferrer"><i class="ai ai-google-scholar"></i></a>
<a href="https://github.com/XiaobuL" title="GitHub" target="_blank" rel="noopener noreferrer"><i class="fab fa-github"></i></a>
</div>
<!-- Navbar Toggle -->
<button class="navbar-toggler collapsed ml-auto" type="button" data-toggle="collapse" data-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar top-bar"></span>
<span class="icon-bar middle-bar"></span>
<span class="icon-bar bottom-bar"></span>
</button>
<div class="collapse navbar-collapse text-right" id="navbarNav">
<ul class="navbar-nav ml-auto flex-nowrap">
<!-- About -->
<!-- Other pages -->
</ul>
</div>
</div>
</nav>
</header>
<!-- Content -->
<div class="container mt-5">
<div class="post">
<header class="post-header">
<h1 class="post-title">
Mushui Liu
</h1>
<p class="desc">Zhejiang University</p>
</header>
<article>
<div class="profile float-right">
<img class="img-fluid z-depth-1 rounded" src="" srcset="/assets/img/mushuiliu.jpg 120w">
</div>
<div class="clearfix">
<p>I'm a final year Ph.D. student at <a href="https://en.wikipedia.org/wiki/Zhejiang_University" target="_blank" rel="noopener noreferrer">Zhejiang University</a>, where I am supervised by Prof.Yu. My research is on AIGC, MLLM, Parameter-Efficient Fine-tuning (PEFT), Few-shot Learning (FSL), and Self-supervised Learning (SSL).
</p>
<p>
💬 Feel free to drop me emails (lms@zju.edu.cn) if you have interests on above topics, and remote cooperations are welcomed.
</p>
</div>
<div class="news">
<h2>🔥 News</h2>
<div class="table-responsive" style="height: 250px; overflow-y:scroll">
<table class="table table-sm table-borderless" style="width: 100%">
<colgroup>
<col span="1" style="width: 15%;">
<col span="1" style="width: 85%;">
</colgroup>
<!-- Put <thead>, <tbody>, and <tr>'s here! -->
<tbody>
<tr>
<th scope="row">Dec 20, 2024</th>
<td>
🎉🎉🎉 <strong>One</strong> paper is accepted to <strong>Neural Networks</strong>.
</td>
</tr>
<tr>
<th scope="row">Dec 12, 2024</th>
<td>
🎉🎉🎉 <strong>Four</strong> papers are accepted to <strong>AAAI-2025</strong>.
</td>
</tr>
<tr>
<th scope="row">July 01, 2024</th>
<td>
🎉🎉🎉 <strong>One</strong> paper is accepted to <strong>ECCV-2024</strong>.
</td>
</tr>
<tr>
<th scope="row">July 15, 2024</th>
<td>
🎉🎉🎉 <strong>One</strong> paper is accepted to <strong>ACM MM-2024</strong>.
</td>
</tr>
<tr>
<th scope="row">July 12, 2024</th>
<td>
🎉🎉🎉 <strong>One</strong> paper is accepted to <strong>ECAI-2024</strong>.
</td>
</tr>
<tr>
<th scope="row">Feb 03, 2024</th>
<td>
🎉🎉🎉 <strong>One</strong> paper is accepted to <strong>Neural Networks</strong>.
</td>
</tr>
<tr>
<th scope="row">Oct 23, 2022</th>
<td>
🎉🎉🎉 <strong>One</strong> paper is accepted to <strong>Neurocomputing</strong>.
</td>
</tr>
</tbody>
</table>
</div>
</div>
<div class="publications">
<h2>📝 Selected Publications</h2>
<p> Full publication list can be found on <a href="https://scholar.google.com/citations?user=-WUyWpMAAAAJ&hl=en" target="_blank" rel="noopener noreferrer">Google Scholar</a>. <br>
<sup>*</sup>equal contribution; <sup>#</sup>corresponding author. </p>
<h2 class="year">2024</h2>
<ol class="bibliography">
<li>
<div class="row">
<div class="col-md-3">
<div class="img-fluid rounded">
<img src="/assets/teaser/mars-aaai.jpg" alt="Mars: Mixture of Auto-Regressive Models for Fine-grained Text-to-Image Synthesis" style="width: 100%;">
</div>
</div>
<div id="mars-aaai-2025" class="col-md-9">
<div class="title">
<span>Mars: Mixture of Auto-Regressive Models for Fine-grained Text-to-Image Synthesis</span>
<!-- <span style="color:red;">(Oral)</span> -->
</div>
<div class="author">
Wanggui He<sup>*</sup>, Siming Fu<sup>*</sup>, <em>Mushui Liu<sup>*</sup></em>, Xierui Wang, Wenyi Xiao, Fangxun Shu, Yi Wang, Lei Zhang, Zhelun Yu, Haoyuan Li, Ziwei Huang, LeiLei Gan, Hao Jiang
</div>
<div class="periodical">
<em>In AAAI Conference on Artificial Intelligence (AAAI)</em>
2025
</div>
<div class="links">
<a class="abstract btn btn-sm z-depth-0" role="button">Abs</a>
<a href="https://arxiv.org/pdf/2407.07614" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">PDF</a>
<!-- <a href="https://xdimlab.github.io/TeFF/" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">Website</a> -->
</div>
<!-- Hidden abstract block -->
<div class="abstract hidden">
<p>Auto-regressive models have made significant progress in the realm of text-to-image synthesis, yet devising an appropriate model architecture and training strategy to achieve a satisfactory level remains an important avenue of exploration. In this work, we introduce MARS, a novel framework for T2I generation that incorporates a specially designed Semantic Vision-Language Integration Expert (SemVIE). This innovative component integrates pre-trained LLMs by independently processing linguistic and visual information—freezing the textual component while fine-tuning the visual component. This methodology preserves the NLP capabilities of LLMs while imbuing them with exceptional visual understanding. Building upon the powerful base of the pre-trained Qwen-7B, MARS stands out with its bilingual generative capabilities corresponding to both English and Chinese language prompts and the capacity for joint image and text generation. The flexibility of this framework lends itself to migration towards \textbf{any-to-any} task adaptability. Furthermore, MARS employs a multi-stage training strategy that first establishes robust image-text alignment through complementary bidirectional tasks and subsequently concentrates on refining the T2I generation process, significantly augmenting text-image synchrony and the granularity of image details. Notably, MARS requires only 9% of the GPU days needed by SD1.5, yet it achieves remarkable results across a variety of benchmarks, illustrating the training efficiency and the potential for swift deployment in various applications.
</p>
</div>
<!-- Hidden bibtex block -->
</div>
</div>
</li>
<li>
<div class="row">
<div class="col-md-3">
<div class="img-fluid rounded">
<img src="/assets/teaser/llm4gen.jpg" alt="Mars: Mixture of Auto-Regressive Models for Fine-grained Text-to-Image Synthesis" style="width: 100%;">
</div>
</div>
<div id="llm4gen-aaai-2025" class="col-md-9">
<div class="title">
<span>LLM4GEN: Leveraging Semantic Representation of LLMs for Text-to-Image Generation</span>
<!-- <span style="color:red;">(Oral)</span> -->
</div>
<div class="author">
<em>Mushui Liu<sup>*</sup></em>, Yuhang Ma<sup>*</sup>, Zhen Yang, Jun Dan, Yunlong Yu, Zeng Zhao, Bai Liu, Changjie Fan, Zhipeng Hu
</div>
<div class="periodical">
<em>In AAAI Conference on Artificial Intelligence (AAAI)</em>
2025
</div>
<div class="links">
<a class="abstract btn btn-sm z-depth-0" role="button">Abs</a>
<a href="https://arxiv.org/pdf/2407.00737" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">PDF</a>
<!-- <a href="https://xdimlab.github.io/TeFF/" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">Website</a> -->
</div>
<!-- Hidden abstract block -->
<div class="abstract hidden">
<p>Diffusion models have exhibited substantial success in text-to-image generation. However, they often encounter challenges when dealing with complex and dense prompts involving multiple objects, attribute binding, and long descriptions. In this paper, we propose a novel framework called LLM4GEN, which enhances the semantic understanding of text-to-image diffusion models by leveraging the representation of Large Language Models (LLMs). It can be seamlessly incorporated into various diffusion models as a plug-and-play component. A specially designed Cross-Adapter Module (CAM) integrates the original text features of text-to-image models with LLM features, thereby enhancing text-to-image generation. Additionally, to facilitate and correct entity-attribute relationships in text prompts, we develop an entity-guided regularization loss to further improve generation performance. We also introduce DensePrompts, which contains 7,000 dense prompts to provide a comprehensive evaluation for the text-to-image generation task. Experiments indicate that LLM4GEN significantly improves the semantic alignment of SD1.5 and SDXL, demonstrating increases of 9.69% and 12.90% in color on T2I-CompBench, respectively. Moreover, it surpasses existing models in terms of sample quality, image-text alignment, and human evaluation.
</p>
</div>
<!-- Hidden bibtex block -->
</div>
</div>
</li>
<li>
<div class="row">
<div class="col-md-3">
<div class="img-fluid rounded">
<img src="/assets/teaser/ecer.jpg" alt="Mars: Mixture of Auto-Regressive Models for Fine-grained Text-to-Image Synthesis" style="width: 100%;">
</div>
</div>
<div id="ecer-fsl-aaai-2025" class="col-md-9">
<div class="title">
<span>Envisioning Class Entity Reasoning by Large Language Models for Few-shot Learning</span>
<!-- <span style="color:red;">(Oral)</span> -->
</div>
<div class="author">
<em>Mushui Liu</em>, Fangtai Wu, Bozheng Li, Ziqian Lu, Yunlong Yu, Xi Li
</div>
<div class="periodical">
<em>In AAAI Conference on Artificial Intelligence (AAAI)</em>
2025
</div>
<div class="links">
<a class="abstract btn btn-sm z-depth-0" role="button">Abs</a>
<a href="https://arxiv.org/pdf/2408.12469" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">PDF</a>
<!-- <a href="https://xdimlab.github.io/TeFF/" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">Website</a> -->
</div>
<!-- Hidden abstract block -->
<div class="abstract hidden">
<p>Few-shot learning (FSL) aims to recognize new concepts using a limited number of visual samples. Existing approaches
attempt to incorporate semantic information into the limited
visual data for category understanding. However, these methods often enrich class-level feature representations with abstract category names, failing to capture the nuanced features
essential for effective generalization. To address this issue,
we propose a novel framework for FSL, which incorporates
both the abstract class semantics and the concrete class entities extracted from Large Language Models (LLMs), to enhance the representation of the class prototypes. Specifically,
our framework composes a Semantic-guided Visual Pattern
Extraction (SVPE) module and a Prototype-Calibration (PC)
module, where the SVPE meticulously extracts semanticaware visual patterns across diverse scales, while the PC
module seamlessly integrates these patterns to refine the visual prototype, enhancing its representativeness. Extensive
experiments on four few-shot classification benchmarks and
the BSCD-FSL cross-domain benchmarks showcase remarkable advancements over the current state-of-the-art methods.
Notably, for the challenging one-shot setting, our approach,
utilizing the ResNet-12 backbone, achieves an impressive average improvement of 1.95% over the second-best competitor.
</p>
</div>
<!-- Hidden bibtex block -->
</div>
</div>
</li>
<li>
<div class="row">
<div class="col-md-3">
<div class="img-fluid rounded">
<img src="/assets/teaser/tsam.jpg" alt="Mars: Mixture of Auto-Regressive Models for Fine-grained Text-to-Image Synthesis" style="width: 100%;">
</div>
</div>
<div id="ecer-fsl-aaai-2025" class="col-md-9">
<div class="title">
<span>Frame Order Matters: A Temporal Sequence-Aware Model for Few-Shot Action Recognition</span>
<!-- <span style="color:red;">(Oral)</span> -->
</div>
<div class="author">
Bozheng Li<sup>*</sup>, <em>Mushui Liu<sup>*</sup></em>, Gaoang Wang, Yunlong Yu
</div>
<div class="periodical">
<em>In AAAI Conference on Artificial Intelligence (AAAI)</em>
2025
</div>
<div class="links">
<a class="abstract btn btn-sm z-depth-0" role="button">Abs</a>
<a href="https://arxiv.org/pdf/2408.12475" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">PDF</a>
<!-- <a href="https://xdimlab.github.io/TeFF/" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">Website</a> -->
</div>
<!-- Hidden abstract block -->
<div class="abstract hidden">
<p>In this paper, we propose a novel Temporal Sequence-Aware Model (TSAM) for few-shot action recognition (FSAR), which incorporates a sequential perceiver adapter into the pre-training framework, to integrate both the spatial information and the sequential temporal dynamics into the feature embeddings. Different from the existing fine-tuning approaches that capture temporal information by exploring the relationships among all the frames, our perceiver-based
adapter recurrently captures the sequential dynamics alongside the timeline, which could perceive the order change. To
obtain the discriminative representations for each class, we
extend a textual corpus for each class derived from the large
language models (LLMs) and enrich the visual prototypes
by integrating the contextual semantic information. Besides,
We introduce an unbalanced optimal transport strategy for
feature matching that mitigates the impact of class-unrelated
features, thereby facilitating more effective decision-making.
Experimental results on five FSAR datasets demonstrate that
our method set a new benchmark, beating the second-best
competitors with large margins.
</p>
</div>
<!-- Hidden bibtex block -->
</div>
</div>
</li>
<li>
<div class="row">
<div class="col-md-3">
<div class="img-fluid rounded">
<img src="/assets/teaser/omniclip.jpg" alt="Mars: Mixture of Auto-Regressive Models for Fine-grained Text-to-Image Synthesis" style="width: 100%;">
</div>
</div>
<div id="ecer-fsl-aaai-2025" class="col-md-9">
<div class="title">
<span>OmniCLIP: Adapting CLIP for Video Recognition with Spatial-Temporal Omni-Scale Feature Learning</span>
<!-- <span style="color:red;">(Oral)</span> -->
</div>
<div class="author">
<em>Mushui Liu</em>, Bozheng Li, Yunlong Yu
</div>
<div class="periodical">
<em>In European Conference on Artifical Intelligence (ECAI)</em>
2024
</div>
<div class="links">
<a class="abstract btn btn-sm z-depth-0" role="button">Abs</a>
<a href="https://ebooks.iospress.nl/pdf/doi/10.3233/FAIA240499" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">PDF</a>
<!-- <a href="https://xdimlab.github.io/TeFF/" class="btn btn-sm z-depth-0" role="button" target="_blank" rel="noopener noreferrer">Website</a> -->
</div>
<!-- Hidden abstract block -->
<div class="abstract hidden">
<p>
Recent Vision-Language Models (VLMs) e.g. CLIP have made great progress in video recognition. Despite the improvement
brought by the strong visual backbone in extracting spatial features,
CLIP still falls short in capturing and integrating spatial-temporal
features which is essential for video recognition. In this paper, we
propose OmniCLIP, a framework that adapts CLIP for video recognition by focusing on learning comprehensive features encompassing
spatial, temporal, and dynamic spatial-temporal scales, which we refer to as omni-scale features. This is achieved through the design
of spatial-temporal blocks that include parallel temporal adapters
(PTA), enabling efficient temporal modeling. Additionally, we introduce a self-prompt generator (SPG) module to capture dynamic
object spatial features. The synergy between PTA and SPG allows
OmniCLIP to discern varying spatial information across frames and
assess object scales over time. We have conducted extensive experiments in supervised video recognition, few-shot video recognition,
and zero-shot recognition tasks. The results demonstrate the effectiveness of our method, especially with OmniCLIP achieving a top-1
accuracy of 74.30% on HMDB51 in a 16-shot setting, surpassing the
recent MotionPrompt approach even with full training data.
</p>
</div>
<!-- Hidden bibtex block -->
</div>
</div>
</li>
</div>
</article>
</div>
</div>
<!-- Footer -->
<footer class="fixed-bottom">
<div class="container mt-0">
© Copyright 2024 Mushui Liu.
Powered by <a href="http://jekyllrb.com/" target="_blank" rel="noopener noreferrer">Jekyll</a> with <a href="https://github.com/alshedivat/al-folio" target="_blank" rel="noopener noreferrer">al-folio</a> theme. Hosted by <a href="https://pages.github.com/" target="_blank" rel="noopener noreferrer">GitHub Pages</a>.
Last updated: December 21, 2024.
</div>
</footer>
</body>
<!-- jQuery -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.min.js" integrity="sha512-bLT0Qm9VnAYZDflyKcBaQ2gg0hSYNQrJ8RilYldYQ1FxQYoCLtUjuuRuZo+fjqhx/qtq/1itJ0C2ejDxltZVFg==" crossorigin="anonymous"></script>
<!-- Bootsrap & MDB scripts -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/2.4.4/umd/popper.min.js" integrity="sha512-eUQ9hGdLjBjY3F41CScH3UX+4JDSI9zXeroz7hJ+RteoCaY+GP/LDoM8AO+Pt+DRFw3nXqsjh9Zsts8hnYv8/A==" crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/js/bootstrap.min.js" integrity="sha512-M5KW3ztuIICmVIhjSqXe01oV2bpe248gOxqmlcYrEzAvws7Pw3z6BK0iGbrwvdrUQUhi3eXgtxp5I8PDo9YfjQ==" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mdbootstrap/4.19.1/js/mdb.min.js" integrity="sha512-Mug9KHKmroQFMLm93zGrjhibM2z2Obg9l6qFG2qKjXEXkMp/VDkI4uju9m4QKPjWSwQ6O2qzZEnJDEeCw0Blcw==" crossorigin="anonymous"></script>
<!-- Mansory & imagesLoaded -->
<script defer src="https://unpkg.com/masonry-layout@4/dist/masonry.pkgd.min.js"></script>
<script defer src="https://unpkg.com/imagesloaded@4/imagesloaded.pkgd.min.js"></script>
<script defer src="/assets/js/mansory.js" type="text/javascript"></script>
<!-- Medium Zoom JS -->
<script src="https://cdn.jsdelivr.net/npm/medium-zoom@1.0.6/dist/medium-zoom.min.js" integrity="sha256-EdPgYcPk/IIrw7FYeuJQexva49pVRZNmt3LculEr7zM=" crossorigin="anonymous"></script>
<script src="/assets/js/zoom.js"></script>
<!-- Load Common JS -->
<script src="/assets/js/common.js"></script>
<!-- MathJax -->
<script type="text/javascript">
window.MathJax = {
tex: {
tags: 'ams'
}
};
</script>
<script defer type="text/javascript" id="MathJax-script" src="https://cdn.jsdelivr.net/npm/mathjax@3.2.0/es5/tex-mml-chtml.js"></script>
<script defer src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
</html>