{"payload":{"header_redesign_enabled":false,"results":[{"id":"804580736","archived":false,"color":"#3572A5","followers":3,"has_funding_file":false,"hl_name":"amazon-science/mada_optimizer_search","hl_trunc_description":"Code the ICML 2024 paper: \"MADA: Meta-Adaptive Optimizers through hyper-gradient Descent\"","language":"Python","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":804580736,"name":"mada_optimizer_search","owner_id":70298811,"owner_login":"amazon-science","updated_at":"2024-07-03T20:37:49.071Z","has_issues":true}},"sponsorable":false,"topics":["machine-learning","deep-neural-networks","optimization","machine-learning-algorithms","optimization-algorithms","adam-optimizer","gpt-2","meta-optimizer","large-language-models"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":73,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Aamazon-science%252Fmada_optimizer_search%2B%2Blanguage%253APython","metadata":null,"warn_limited_results":false,"csrf_tokens":{"/amazon-science/mada_optimizer_search/star":{"post":"WQUfxc4EaEf3529VIhqvmclrZK3whefxiOMmry-axTQQ4-teaaBZvPcFrvyuqohdhiZlGRecVBd7_FtRku9qLg"},"/amazon-science/mada_optimizer_search/unstar":{"post":"YOaJREICyJDJsnwDAN-go6XZqtMWmnplSZ3XZe817cwFcEmHT8lYBTJ8_5NtG6_xMfAQf2DSMLItiVsFSVFqgA"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"7JeT2dlI8KjZCheKHAue7mUhbnWV8sThCxSDacLEReXrmCQ078F33548zCdNMthVPjxRmdK32xobi28AA9fQWQ"}}},"title":"Repository search results"}