From 4262e9249674faae669f826f34d7b3279b0df54f Mon Sep 17 00:00:00 2001 From: MVIG-RHOS Date: Tue, 2 Jul 2024 13:04:27 +0000 Subject: [PATCH] Update from https://github.com/MVIG-RHOS/rhos_homepage/commit/87c6fb41329b820ce321f28dbfcd73e60a75bd57 --- 404.html | 4 ++-- AT-field.html | 2 +- .../pages/joint_learning-119e8c39cc0d4da1.js | 1 + .../pages/joint_learning-a02d2b846e830056.js | 1 - _next/static/media/teaser.8e9f33ea.png | Bin 248364 -> 0 bytes .../o-LgHNh2HbOpLPCCYhOGd/_buildManifest.js | 1 + .../_ssgManifest.js | 0 .../vnX9a1kfv0D9gOXNWJ53r/_buildManifest.js | 1 - ego_pca.html | 4 ++-- index.html | 2 +- joint_learning.html | 4 ++-- ocl.html | 4 ++-- pangea.html | 4 ++-- recruit.html | 2 +- symbol_llm.html | 4 ++-- video-distill.html | 4 ++-- 16 files changed, 19 insertions(+), 19 deletions(-) create mode 100644 _next/static/chunks/pages/joint_learning-119e8c39cc0d4da1.js delete mode 100644 _next/static/chunks/pages/joint_learning-a02d2b846e830056.js delete mode 100644 _next/static/media/teaser.8e9f33ea.png create mode 100644 _next/static/o-LgHNh2HbOpLPCCYhOGd/_buildManifest.js rename _next/static/{vnX9a1kfv0D9gOXNWJ53r => o-LgHNh2HbOpLPCCYhOGd}/_ssgManifest.js (100%) delete mode 100644 _next/static/vnX9a1kfv0D9gOXNWJ53r/_buildManifest.js diff --git a/404.html b/404.html index 9c8ddb8..b885be7 100644 --- a/404.html +++ b/404.html @@ -1,4 +1,4 @@ -404: This page could not be found

404

This page could not be found.

\ No newline at end of file + }

404

This page could not be found.

\ No newline at end of file diff --git a/AT-field.html b/AT-field.html index 43082b5..2334b77 100644 --- a/AT-field.html +++ b/AT-field.html @@ -1 +1 @@ -Shadow-Teleop

Shadow Hand Teleop System

MVIG-RHOS, SJTU

About

  Recently, with the emergence of GPT, robot planning and reasoning systems based on basic large language models have been rapidly developed, but the current SOTA system (Robot Transformer-2, RoboAgent) already suffers from a number of shortcomings:
  (a) Insufficient robustness to external disturbances: current mainstream models perform poorly in the face of external disturbances, are difficult to replan strategies in real time in dynamic environments, and often require manual intervention or are unable to automatically adapt to new situations, and thus are inefficient in the face of external changes.
  (b) Data Efficiency Problem: Many SOTA systems require a large amount of data during training, which can lead to data inefficiency. Improving these systems to increase data efficiency is an important challenge.
  (c) Insufficient scalability and migratability: current SOTA systems are usually only able to perform well in specific tasks or domains, and struggle to handle a wider range of tasks. Even for very similar environments, it is difficult to do policy migration.
  Specifically, on dexterous hand reasoning and planning: current mainstream models perform well in executing strategies in static environments, but still lack robust and effective Re-Planning capabilities in the face of external disturbances. Our research goal is to address this problem by proposing a new planning approach with a dual-layer planner with replaceable strategies and goals at both coarse and fine grains to cope with changing environments; at the same time, we will introduce a planner and an executor capable of generating or replacing new strategies when planning errors are detected at both the vision and executor side to ensure that the target task is accomplished.

Demo

Resources

Our code is available on Github

© Copyright 2022 MVIG-RHOS • Based on tbakerx
\ No newline at end of file +Shadow-Teleop

Shadow Hand Teleop System

MVIG-RHOS, SJTU

About

  Recently, with the emergence of GPT, robot planning and reasoning systems based on basic large language models have been rapidly developed, but the current SOTA system (Robot Transformer-2, RoboAgent) already suffers from a number of shortcomings:
  (a) Insufficient robustness to external disturbances: current mainstream models perform poorly in the face of external disturbances, are difficult to replan strategies in real time in dynamic environments, and often require manual intervention or are unable to automatically adapt to new situations, and thus are inefficient in the face of external changes.
  (b) Data Efficiency Problem: Many SOTA systems require a large amount of data during training, which can lead to data inefficiency. Improving these systems to increase data efficiency is an important challenge.
  (c) Insufficient scalability and migratability: current SOTA systems are usually only able to perform well in specific tasks or domains, and struggle to handle a wider range of tasks. Even for very similar environments, it is difficult to do policy migration.
  Specifically, on dexterous hand reasoning and planning: current mainstream models perform well in executing strategies in static environments, but still lack robust and effective Re-Planning capabilities in the face of external disturbances. Our research goal is to address this problem by proposing a new planning approach with a dual-layer planner with replaceable strategies and goals at both coarse and fine grains to cope with changing environments; at the same time, we will introduce a planner and an executor capable of generating or replacing new strategies when planning errors are detected at both the vision and executor side to ensure that the target task is accomplished.

Demo

Resources

Our code is available on Github

© Copyright 2022 MVIG-RHOS • Based on tbakerx
\ No newline at end of file diff --git a/_next/static/chunks/pages/joint_learning-119e8c39cc0d4da1.js b/_next/static/chunks/pages/joint_learning-119e8c39cc0d4da1.js new file mode 100644 index 0000000..1f45670 --- /dev/null +++ b/_next/static/chunks/pages/joint_learning-119e8c39cc0d4da1.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[315],{3661:function(e,n,t){(window.__NEXT_P=window.__NEXT_P||[]).push(["/joint_learning",function(){return t(8354)}])},7568:function(e,n,t){"use strict";t.r(n),n.default={src:"/_next/static/media/real_exp.a0ac2359.png",height:581,width:1899,blurDataURL:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAACCAIAAADq9gq6AAAAPUlEQVR42gEyAM3/AMvLycHFvqWlocC+vbSzrMTDvry8t8nIwgC8vLW5vLeur6u/vr2em5KxtKq9trS5tK5oNiJbAnMZEQAAAABJRU5ErkJggg==",blurWidth:8,blurHeight:2}},3189:function(e,n,t){"use strict";t.r(n),n.default={src:"/_next/static/media/sharedcontrol.a824b4b2.png",height:296,width:627,blurDataURL:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAECAIAAAA8r+mnAAAAYklEQVR42g3EyxJEMBAF0Pz/103NYoYFRUqhSeIVHTdBcxZHAembDwcvMUxgJyfsRgFexbCb8u8X4rn2Uy0SurEdTa/Wzc9N6Yos3YID5vd5B6JiZqKh0do6e6ZEurruS0QemNtaJE7oIO4AAAAASUVORK5CYII=",blurWidth:8,blurHeight:4}},2817:function(e,n,t){"use strict";t.r(n),n.default={src:"/_next/static/media/teaser-1.a50f8766.png",height:1026,width:4058,blurDataURL:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAACCAIAAADq9gq6AAAAPUlEQVR42gUAQQqAIMz//yzoIqMagRAbypoIMsWDXsSlmMx6a0bEHLOWcQHcGJyIIL5zzV/Vw8NSz8N/gTY4zSq4et82YAAAAABJRU5ErkJggg==",blurWidth:8,blurHeight:2}},5434:function(e,n,t){"use strict";t.r(n),n.default={src:"/_next/static/media/pentagon.d7cd4fb8.png",height:512,width:1171,blurDataURL:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAADCAYAAACuyE5IAAAAbklEQVR4nAFjAJz/AdnZlwSY02g6D/j3DkcYfsXzB1QDOR0EPwP9+tmCz/LWAajC+ibH+9Rl3PUAMxPzKHahSt4D9djkdQQIAPryIB5tAcfK9wy38OlT5/IKGpvQFZLzY8YbAPkCUAIJBN4G/fysmyQxREB6mb8AAAAASUVORK5CYII=",blurWidth:8,blurHeight:3}},7782:function(e,n,t){"use strict";t.d(n,{jE:function(){return m},yb:function(){return f},zM:function(){return h}});var i=t(5893),r=t(4829),a=t(728),s=t(1561),o=t(4184),l=t.n(o),c=t(1664),u=t.n(c),d=t(7294),f="headerNav",m=(0,d.memo)(function(e){var n=e.navSections,t="-m-1.5 p-1.5 rounded-md font-bold first-letter:uppercase hover:transition-colors hover:duration-300 focus:outline-none focus-visible:ring-2 focus-visible:ring-orange-500 sm:hover:text-orange-500 text-neutral-100",r=l()(t,"text-orange-500"),a=l()(t,"text-neutral-100");return(0,i.jsx)("header",{className:"fixed top-0 z-50 hidden w-full bg-neutral-900/50 p-4 backdrop-blur sm:block",id:f,children:(0,i.jsxs)("nav",{className:"flex justify-center gap-x-8",children:[(0,i.jsx)(u(),{href:"/",passHref:!0,children:(0,i.jsx)("a",{className:a,children:"Home"},"home")}),n.map(function(e){return(0,i.jsx)(g,{activeClass:r,current:!1,inactiveClass:a,section:e},e)})]})})}),h=(0,d.memo)(function(e){var n=e.navSections,t=(0,d.useState)(!1),o=t[0],c=t[1],f=(0,d.useCallback)(function(){c(!o)},[o]),m="p-2 rounded-md first-letter:uppercase transition-colors duration-300 focus:outline-none focus-visible:ring-2 focus-visible:ring-orange-500",h=l()(m,"bg-neutral-900 text-white font-bold"),x=l()(m,"text-neutral-200 font-medium");return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsxs)("button",{"aria-label":"Menu Button",className:"fixed top-2 right-2 z-40 rounded-md bg-orange-500 p-2 ring-offset-gray-800/60 hover:bg-orange-400 focus:outline-none focus:ring-0 focus-visible:ring-2 focus-visible:ring-orange-500 focus-visible:ring-offset-2 sm:hidden",onClick:f,children:[(0,i.jsx)(s.Z,{className:"h-8 w-8 text-white"}),(0,i.jsx)("span",{className:"sr-only",children:"Open sidebar"})]}),(0,i.jsx)(r.u.Root,{as:d.Fragment,show:o,children:(0,i.jsxs)(a.V,{as:"div",className:"fixed inset-0 z-40 flex sm:hidden",onClose:f,children:[(0,i.jsx)(r.u.Child,{as:d.Fragment,enter:"transition-opacity ease-linear duration-300",enterFrom:"opacity-0",enterTo:"opacity-100",leave:"transition-opacity ease-linear duration-300",leaveFrom:"opacity-100",leaveTo:"opacity-0",children:(0,i.jsx)(a.V.Overlay,{className:"fixed inset-0 bg-stone-900 bg-opacity-75"})}),(0,i.jsx)(r.u.Child,{as:d.Fragment,enter:"transition ease-in-out duration-300 transform",enterFrom:"-translate-x-full",enterTo:"translate-x-0",leave:"transition ease-in-out duration-300 transform",leaveFrom:"translate-x-0",leaveTo:"-translate-x-full",children:(0,i.jsx)("div",{className:"relative w-4/5 bg-stone-800",children:(0,i.jsxs)("nav",{className:"mt-5 flex flex-col gap-y-2 px-2",children:[(0,i.jsx)(u(),{href:"/",passHref:!0,children:(0,i.jsx)("a",{className:x,onClick:f,children:"Home"},"home")}),n.map(function(e){return(0,i.jsx)(g,{activeClass:h,current:!1,inactiveClass:x,onClick:f,section:e},e)})]})})})]})})]})}),g=(0,d.memo)(function(e){var n=e.section,t=e.current,r=e.inactiveClass,a=e.activeClass,s=e.onClick;return(0,i.jsx)(u(),{href:"#".concat(n),passHref:!0,children:(0,i.jsx)("a",{className:l()(t?a:r),onClick:s,children:n},n)})})},2535:function(e,n,t){"use strict";t.d(n,{e:function(){return a}});var i=t(7294),r=t(7782),a=function(e,n){(0,i.useEffect)(function(){var t=document.querySelectorAll(e),i=Array.from(t),a=document.getElementById(r.yb),s=new IntersectionObserver(function(e){e.forEach(function(e){var t=e.boundingClientRect.y,r=e.target.getAttribute("id");if(a){var s,o={id:r,currentIndex:i.findIndex(function(e){return e.getAttribute("id")===r}),isIntersecting:e.isIntersecting,currentRatio:e.intersectionRatio,aboveToc:t0&&o.belowToc&&n(null===(s=i[o.currentIndex-1])||void 0===s?void 0:s.getAttribute("id"))}})},{root:null,threshold:.1,rootMargin:"0px 0px -70% 0px"});return t.forEach(function(e){s.observe(e)}),function(){s.disconnect()}},[])}},8354:function(e,n,t){"use strict";t.r(n),t.d(n,{HeaderSectionIdList:function(){return g},SectionId:function(){return h}});var i=t(603),r=t(5893),a=t(7294),s=t(9770),o=t(6571),l=t(3031),c=t(11),u=t(2535),d=t(7782),f=t(5675),m=t.n(f),h={About:"about",Demo:"demo",News:"news",Download:"download",Disclaimer:"disclaimer",Publications:"publications"},g=[h.About,h.Demo,h.News,h.Download,h.Publications,],x=[["2023.4",(0,r.jsxs)(r.Fragment,{children:["Our paper is available on ",(0,r.jsx)("a",{className:"underline text-sky-600",href:"https://arxiv.org/pdf/2304.00553.pdf",children:"arXiv"}),"."]}),],["2023.3",(0,r.jsx)(r.Fragment,{children:"Trail run"}),],],A=(0,a.memo)(function(){return(0,r.jsxs)(s.Z,{description:"HRJL",title:"Human Robot Joint Learning",children:[(0,r.jsx)(p,{}),(0,r.jsx)("div",{className:"relative flex h-screen-no w-screen items-center justify-center bg-neutral-100",children:(0,r.jsxs)("div",{className:"flex flex-col z-10 w-full max-w-screen-lg p-4 lg:px-0 items-center text-center ",children:[(0,r.jsx)("div",{className:"h-20"}),(0,r.jsx)("h1",{className:"text-3xl font-bold text-gray-800 sm:text-4xl lg:text-5xl p-4",children:"Human-Agent Joint Learning for Efficient Robot Manipulation Skill Acquisition"}),(0,r.jsx)("p",{className:"text-gray-600 text-2xl",children:"MVIG-RHOS, SJTU"})]})}),(0,r.jsx)(l.Z,{className:"bg-neutral-100",sectionId:h.About,children:(0,r.jsxs)("div",{className:"flex flex-col",children:[(0,r.jsx)("div",{className:"grid justify-items-center pb-8",children:(0,r.jsxs)("div",{className:"w-3/4",children:[(0,r.jsx)(m(),{alt:"demo",src:t(2817),className:"place-self-center"}),(0,r.jsx)(m(),{alt:"demo",src:t(7568),className:"place-self-center"}),(0,r.jsx)(m(),{alt:"demo",src:t(3189),className:"place-self-center"})]})}),(0,r.jsx)("div",{children:"Employing a teleoperation system for gathering demonstrations offers the potential for more efficient learning of robot manipulation. However, teleoperating a robot arm equipped with a dexterous hand or gripper, via a teleoperation system poses significant challenges due to its high dimensionality, complex motions, and differences in physiological structure. In this study, we introduce a novel system for joint learning between human operators and robots, that enables human operators to share control of a robot end-effector with a learned assistive agent, facilitating simultaneous human demonstration collection and robot manipulation teaching. In this setup, as data accumulates, the assistive agent gradually learns. Consequently, less human effort and attention are required, enhancing the efficiency of the data collection process. It also allows the human operator to adjust the control ratio to achieve a trade-off between manual and automated control. We conducted experiments in both simulated environments and physical real-world settings. Through user studies and quantitative evaluations, it is evident that the proposed system could enhance data collection efficiency and reduce the need for human adaptation while ensuring the collected data is of sufficient quality for downstream tasks."})]})}),(0,r.jsx)(l.Z,{className:"bg-neutral-100",sectionId:h.Demo,children:(0,r.jsx)(c.Z,{title:"Demo",children:(0,r.jsx)("div",{className:"flex flex-col",children:(0,r.jsx)("video",{autoPlay:!0,loop:!0,controls:!0,children:(0,r.jsx)("source",{src:"/media/HAJL.mov",type:"video/mp4"})})})})}),(0,r.jsx)(l.Z,{className:"bg-neutral-100",sectionId:h.News,children:(0,r.jsx)(c.Z,{title:"News and Olds",children:(0,r.jsx)("div",{className:"flex flex-col",children:x.map(function(e,n){var t=(0,i.Z)(e,2),a=t[0],s=t[1];return(0,r.jsxs)("div",{className:"pb-2",children:[(0,r.jsxs)("span",{className:"flex-1 font-bold sm:flex-none",children:["[",a,"] "]}),(0,r.jsx)("span",{className:"flex-1 sm:flex-none",children:s})]},"".concat(a,"-").concat(n))})})})}),(0,r.jsx)(l.Z,{className:"bg-neutral-100",sectionId:h.Demo,children:(0,r.jsx)(c.Z,{title:"Results",children:(0,r.jsx)("div",{className:"flex flex-col",children:(0,r.jsx)("div",{children:(0,r.jsx)(m(),{alt:"pentagon",src:t(5434),className:"place-self-center"})})})})}),(0,r.jsx)(l.Z,{className:"bg-neutral-100",sectionId:h.Download,children:(0,r.jsx)(c.Z,{title:"Download",children:(0,r.jsx)("div",{className:"flex flex-col",children:(0,r.jsx)("div",{children:(0,r.jsx)("p",{children:"Our data and code will come very soon!"})})})})}),(0,r.jsx)(l.Z,{className:"bg-neutral-100",sectionId:h.Publications,children:(0,r.jsx)(c.Z,{title:"Publications",children:(0,r.jsxs)("div",{className:"flex flex-col divide-y-2",children:[(0,r.jsx)("div",{children:"Before using our data and code in your project, please cite:"}),(0,r.jsx)("div",{className:"text-sm bg-neutral-300 p-2",children:(0,r.jsx)("pre",{children:(0,r.jsx)("code",{children:"@misc{luo2024humanagentjointlearningefficient,\n title={Human-Agent Joint Learning for Efficient Robot Manipulation Skill Acquisition}, \n author={Shengcheng Luo and Quanquan Peng and Jun Lv and Kaiwen Hong and Katherine Rose Driggs-Campbell and Cewu Lu and Yong-Lu Li},\n year={2024},\n eprint={2407.00299},\n archivePrefix={arXiv},\n primaryClass={cs.RO},\n}"})})})]})})}),(0,r.jsx)(l.Z,{className:"bg-neutral-100",sectionId:h.Disclaimer,children:(0,r.jsx)(c.Z,{title:"Disclaimer",children:(0,r.jsx)("div",{className:"flex flex-col divide-y-4",children:(0,r.jsxs)("p",{children:[(0,r.jsx)("a",{rel:"license",href:"http://creativecommons.org/licenses/by-nc/4.0/",children:(0,r.jsx)("img",{alt:"Creative Commons License",style:{borderWidth:0},src:"https://i.creativecommons.org/l/by-nc/4.0/88x31.png"})}),(0,r.jsx)("br",{}),"This work is licensed under a ",(0,r.jsx)("a",{className:"text-sky-600",rel:"license",href:"http://creativecommons.org/licenses/by-nc/4.0/",children:"Creative Commons Attribution-NonCommercial 4.0 International License"}),"."]})})})}),(0,r.jsx)(o.Z,{})]})}),p=(0,a.memo)(function(){var e=(0,a.useState)(null),n=e[0],t=e[1],i=(0,a.useMemo)(function(){return g},[]),s=(0,a.useCallback)(function(e){e&&t(e)},[]);return(0,u.e)(i.map(function(e){return"#".concat(e)}).join(","),s),(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)(d.zM,{currentSection:n,navSections:i}),(0,r.jsx)(d.jE,{currentSection:n,navSections:i})]})});n.default=A},603:function(e,n,t){"use strict";function i(e,n){(null==n||n>e.length)&&(n=e.length);for(var t=0,i=Array(n);t