diff --git a/queue/README.md b/queue/README.md index 890bb9e..1710dc8 100644 --- a/queue/README.md +++ b/queue/README.md @@ -75,7 +75,17 @@ func dequeue() (int, error) { ## Complexity -Enqueue and dequeue operations both perform in O(1) times. +Enqueue and dequeue operations both perform in O(1) times in the linked list implementation. In other traditional languages like C the linked list approach is considered to be faster than the slice approach because both enqueue and dequeue operations are O(n) due to the slice size change. In Go however slices are managed intelligently behind the scene and perform very well for just about all purposes. + +We can use Go's built-in benchmarking tooling to see which implementation is faster. This is done in [slice_vs_linked_list_bench_test.go](./slice_vs_linked_list_bench_test.go). It can be executed by running `go test -bench=. -test.benchmem` in this directory. The output shows that the slice implementation is almost seven times faster. + +```Shell +pkg: github.com/spring1843/go-dsa/queue +BenchmarkLinkedListQueue-8 17956176 83.73 ns/op 56 B/op 1 allocs/op +BenchmarkSliceQueue-8 100000000 11.79 ns/op 45 B/op 0 allocs/op +PASS +ok github.com/spring1843/go-dsa/queue 3.775s +``` ## Application diff --git a/queue/slice_vs_linked_list_bench_test.go b/queue/slice_vs_linked_list_bench_test.go new file mode 100644 index 0000000..e9bae0b --- /dev/null +++ b/queue/slice_vs_linked_list_bench_test.go @@ -0,0 +1,63 @@ +package queue + +import ( + "container/list" + "errors" + "testing" +) + +type ( + linkedListQueue struct { + items *list.List + } + + sliceQueue []int +) + +func BenchmarkLinkedListQueue(b *testing.B) { + q := newLinkedListQueue() + for n := 0; n < b.N; n++ { + q.enqueue(n) + } + for n := 0; n < b.N; n++ { + q.dequeue() + } +} + +func BenchmarkSliceQueue(b *testing.B) { + q := sliceQueue{} + for n := 0; n < b.N; n++ { + q.enqueue(n) + } + for n := 0; n < b.N; n++ { + q.dequeue() + } +} + +func (q *sliceQueue) enqueue(val int) { + *q = append(*q, val) +} + +func (q *sliceQueue) dequeue() (int, error) { + if len(*q) == 0 { + return 0, errors.New("queue is empty") + } + value := (*q)[0] + *q = (*q)[1:] + return value, nil +} + +func newLinkedListQueue() *linkedListQueue { + return &linkedListQueue{items: list.New()} +} + +func (q *linkedListQueue) enqueue(val int) { + q.items.PushBack(val) +} + +func (q *linkedListQueue) dequeue() (int, error) { + if q.items.Len() == 0 { + return 0, errors.New("queue is empty") + } + return q.items.Remove(q.items.Front()).(int), nil +} diff --git a/stack/README.md b/stack/README.md index 4512933..4c1274d 100644 --- a/stack/README.md +++ b/stack/README.md @@ -75,6 +75,8 @@ func pop() (int, error) { Push and pop operations in stacks are considered O(1) operations, making them highly efficient. Additionally, many machines have built-in stack instruction sets, further increasing their performance. Stacks' unique efficiency and usefulness have solidified their place as one of the most fundamental data structures, second only to [arrays](../array). +Resizing the slice and item shifting maybe necessary in the slice implementation, hence traditionally this implementation is seen as O(n). As shown in the complexity of [queue](../queue/README.md) because of the intelligent ways Go resizes the slices this is not a problem and the slice implementation of both stack and queue will perform better than the linked list implementation. + ## Application Stacks are helpful when LIFO operations are desired. Many [graph](../graph) problems are solved with stacks.