Skip to content

Commit

Permalink
version 1.0.3
Browse files Browse the repository at this point in the history
  • Loading branch information
Kevin Zhang committed Dec 26, 2013
1 parent 0738265 commit 423a761
Show file tree
Hide file tree
Showing 3 changed files with 42 additions and 22 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
1.0.3 - 12/25/2013

* Improve logging
* Fix over-recording bug
* Unescape all URLs to reduce risk of duplicate segment downloads (e.g. China Central TV streams)

1.0.2 - 12/25/2013

* Bypass URL parsing for absolute media segment URLs
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
gohls - HTTP Live Streaming (HLS) downloader written in Golang


* Current version: **1.0.2**
* Current version: **1.0.3**
* Author: Kevin Zhang
* License: [GNU GPL version 3](http://www.gnu.org/licenses/gpl-3.0.txt)

Expand Down
56 changes: 35 additions & 21 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import "lru" // https://github.com/golang/groupcache/blob/master/lru/lru.go
import "strings"
import "github.com/grafov/m3u8"

const VERSION = "1.0.2"
const VERSION = "1.0.3"

var USER_AGENT string

Expand All @@ -41,14 +41,19 @@ func doRequest(c *http.Client, req *http.Request) (*http.Response, error) {
return resp, err
}

func downloadSegment(fn string, feed chan string) {
type Download struct {
URI string
totalDuration time.Duration
}

func downloadSegment(fn string, dlc chan *Download, recTime time.Duration) {
out, err := os.Create(fn)
if err != nil {
log.Fatal(err)
}
defer out.Close()
for v := range feed {
req, err := http.NewRequest("GET", v, nil)
for v := range dlc {
req, err := http.NewRequest("GET", v.URI, nil)
if err != nil {
log.Fatal(err)
}
Expand All @@ -62,13 +67,14 @@ func downloadSegment(fn string, feed chan string) {
log.Fatal(err)
}
resp.Body.Close()
log.Printf("Downloaded %v\n", v)
log.Printf("Downloaded %v\n", v.URI)
log.Printf("Recorded %v of %v\n", v.totalDuration, recTime)
}
}

func getPlaylist(urlStr string, duration time.Duration, useLocalTime bool, feed chan string) {
func getPlaylist(urlStr string, recTime time.Duration, useLocalTime bool, dlc chan *Download) {
startTime := time.Now()
var recTime time.Duration
var recDuration time.Duration = 0
cache := lru.New(64)
playlistUrl, err := url.Parse(urlStr)
if err != nil {
Expand All @@ -95,35 +101,39 @@ func getPlaylist(urlStr string, duration time.Duration, useLocalTime bool, feed
if v != nil {
var msURI string
if strings.HasPrefix(v.URI, "http") {
msURI = v.URI
msURI, err = url.QueryUnescape(v.URI)
if err != nil {
log.Fatal(err)
}
} else {
msUrl, err := playlistUrl.Parse(v.URI)
if err != nil {
log.Print(err)
continue
}
msURI = msUrl.String()
msURI, err = url.QueryUnescape(msUrl.String())
if err != nil {
log.Fatal(err)
}
}
_, hit := cache.Get(msURI)
if !hit {
feed <- msURI
cache.Add(msURI, nil)
log.Printf("Queued %v\n", msURI)
if useLocalTime {
recTime = time.Now().Sub(startTime)
recDuration = time.Now().Sub(startTime)
} else {
recTime += time.Duration(int64(v.Duration * 1000000000))
recDuration += time.Duration(int64(v.Duration * 1000000000))
}
log.Printf("Recorded %v of %v\n", recTime, duration)
dlc <- &Download{msURI, recDuration}
}
if recDuration != 0 && recDuration >= recTime {
close(dlc)
return
}
}
if duration != 0 && recTime > duration {
close(feed)
return
}
}
if mpl.Closed {
close(feed)
close(dlc)
return
} else {
time.Sleep(time.Duration(int64(mpl.TargetDuration * 1000000000)))
Expand All @@ -150,7 +160,11 @@ func main() {
os.Exit(2)
}

msChan := make(chan string, 1024)
if !strings.HasPrefix(flag.Arg(0), "http") {
log.Fatal("Media playlist url must begin with http/https")
}

msChan := make(chan *Download, 1024)
go getPlaylist(flag.Arg(0), *duration, *useLocalTime, msChan)
downloadSegment(flag.Arg(1), msChan)
downloadSegment(flag.Arg(1), msChan, *duration)
}

0 comments on commit 423a761

Please sign in to comment.