{"payload":{"header_redesign_enabled":false,"results":[{"id":"638586991","archived":false,"color":"#3572A5","followers":2,"has_funding_file":false,"hl_name":"AvichalS/Speech-Emotion-Recognition","hl_trunc_description":"Developed a deep learning model using Multi-Layer Perceptron to recognize and classify speech signals into 6 distinct emotions. Extracted…","language":"Python","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":638586991,"name":"Speech-Emotion-Recognition","owner_id":89925042,"owner_login":"AvichalS","updated_at":"2023-06-13T10:08:50.185Z","has_issues":true}},"sponsorable":false,"topics":["deep-learning","tensorflow","keras","librosa","audio-feature-extraction","streamlit"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":93,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253AAvichalS%252FSpeech-Emotion-Recognition%2B%2Blanguage%253APython","metadata":null,"warn_limited_results":false,"csrf_tokens":{"/AvichalS/Speech-Emotion-Recognition/star":{"post":"VR7K4MvbQY0nAfl2geHiRCc1k1h-5leJelg2tZq8yc51jK5SNwger1IUYNB2BSL3q8eYi_vHyHE1oGftf3mxqQ"},"/AvichalS/Speech-Emotion-Recognition/unstar":{"post":"6RwzTWA7Ck0CnvOF-a3pGmdITt6QUbF3S0ksjUMi-iqdGzFPb8qYggGBwEPuFtQ4OC0u04PlgOwOIgz4KuK9jg"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"egAlJbhxlhHF0XSgBgohCRpfbtxB5odwGUWgSVF8eMtssp_cdsGJz3M_-lLrqbZI9X_fLrvMqvUguquSrwZ8tg"}}},"title":"Repository search results"}