Linux kernel & device driver programming

Cross-Referenced Linux and Device Driver Code

[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ]
Version: [ 2.6.11.8 ] [ 2.6.25 ] [ 2.6.25.8 ] [ 2.6.31.13 ] Architecture: [ i386 ]
  1 /*
  2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
  3  *
  4  *  internal queue handling
  5  *
  6  *  Authors: Waleri Fomin <fomin@de.ibm.com>
  7  *           Reinhard Ernst <rernst@de.ibm.com>
  8  *           Christoph Raisch <raisch@de.ibm.com>
  9  *
 10  *  Copyright (c) 2005 IBM Corporation
 11  *
 12  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
 13  *  BSD.
 14  *
 15  * OpenIB BSD License
 16  *
 17  * Redistribution and use in source and binary forms, with or without
 18  * modification, are permitted provided that the following conditions are met:
 19  *
 20  * Redistributions of source code must retain the above copyright notice, this
 21  * list of conditions and the following disclaimer.
 22  *
 23  * Redistributions in binary form must reproduce the above copyright notice,
 24  * this list of conditions and the following disclaimer in the documentation
 25  * and/or other materials
 26  * provided with the distribution.
 27  *
 28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 29  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 31  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 32  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
 35  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 36  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 38  * POSSIBILITY OF SUCH DAMAGE.
 39  */
 40 
 41 #include "ehca_tools.h"
 42 #include "ipz_pt_fn.h"
 43 #include "ehca_classes.h"
 44 
 45 #define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
 46 
 47 struct kmem_cache *small_qp_cache;
 48 
 49 void *ipz_qpageit_get_inc(struct ipz_queue *queue)
 50 {
 51         void *ret = ipz_qeit_get(queue);
 52         queue->current_q_offset += queue->pagesize;
 53         if (queue->current_q_offset > queue->queue_length) {
 54                 queue->current_q_offset -= queue->pagesize;
 55                 ret = NULL;
 56         }
 57         if (((u64)ret) % queue->pagesize) {
 58                 ehca_gen_err("ERROR!! not at PAGE-Boundary");
 59                 return NULL;
 60         }
 61         return ret;
 62 }
 63 
 64 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
 65 {
 66         void *ret = ipz_qeit_get(queue);
 67         u64 last_entry_in_q = queue->queue_length - queue->qe_size;
 68 
 69         queue->current_q_offset += queue->qe_size;
 70         if (queue->current_q_offset > last_entry_in_q) {
 71                 queue->current_q_offset = 0;
 72                 queue->toggle_state = (~queue->toggle_state) & 1;
 73         }
 74 
 75         return ret;
 76 }
 77 
 78 int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
 79 {
 80         int i;
 81         for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
 82                 u64 page = (u64)virt_to_abs(queue->queue_pages[i]);
 83                 if (addr >= page && addr < page + queue->pagesize) {
 84                         *q_offset = addr - page + i * queue->pagesize;
 85                         return 0;
 86                 }
 87         }
 88         return -EINVAL;
 89 }
 90 
 91 #if PAGE_SHIFT < EHCA_PAGESHIFT
 92 #error Kernel pages must be at least as large than eHCA pages (4K) !
 93 #endif
 94 
 95 /*
 96  * allocate pages for queue:
 97  * outer loop allocates whole kernel pages (page aligned) and
 98  * inner loop divides a kernel page into smaller hca queue pages
 99  */
100 static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
101 {
102         int k, f = 0;
103         u8 *kpage;
104 
105         while (f < nr_of_pages) {
106                 kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
107                 if (!kpage)
108                         goto out;
109 
110                 for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
111                         queue->queue_pages[f] = (struct ipz_page *)kpage;
112                         kpage += EHCA_PAGESIZE;
113                         f++;
114                 }
115         }
116         return 1;
117 
118 out:
119         for (f = 0; f < nr_of_pages && queue->queue_pages[f];
120              f += PAGES_PER_KPAGE)
121                 free_page((unsigned long)(queue->queue_pages)[f]);
122         return 0;
123 }
124 
125 static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
126 {
127         int order = ilog2(queue->pagesize) - 9;
128         struct ipz_small_queue_page *page;
129         unsigned long bit;
130 
131         mutex_lock(&pd->lock);
132 
133         if (!list_empty(&pd->free[order]))
134                 page = list_entry(pd->free[order].next,
135                                   struct ipz_small_queue_page, list);
136         else {
137                 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
138                 if (!page)
139                         goto out;
140 
141                 page->page = get_zeroed_page(GFP_KERNEL);
142                 if (!page->page) {
143                         kmem_cache_free(small_qp_cache, page);
144                         goto out;
145                 }
146 
147                 list_add(&page->list, &pd->free[order]);
148         }
149 
150         bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
151         __set_bit(bit, page->bitmap);
152         page->fill++;
153 
154         if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
155                 list_move(&page->list, &pd->full[order]);
156 
157         mutex_unlock(&pd->lock);
158 
159         queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
160         queue->small_page = page;
161         queue->offset = bit << (order + 9);
162         return 1;
163 
164 out:
165         ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
166         return 0;
167 }
168 
169 static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
170 {
171         int order = ilog2(queue->pagesize) - 9;
172         struct ipz_small_queue_page *page = queue->small_page;
173         unsigned long bit;
174         int free_page = 0;
175 
176         bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
177                 >> (order + 9);
178 
179         mutex_lock(&pd->lock);
180 
181         __clear_bit(bit, page->bitmap);
182         page->fill--;
183 
184         if (page->fill == 0) {
185                 list_del(&page->list);
186                 free_page = 1;
187         }
188 
189         if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
190                 /* the page was full until we freed the chunk */
191                 list_move_tail(&page->list, &pd->free[order]);
192 
193         mutex_unlock(&pd->lock);
194 
195         if (free_page) {
196                 free_page(page->page);
197                 kmem_cache_free(small_qp_cache, page);
198         }
199 }
200 
201 int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
202                    const u32 nr_of_pages, const u32 pagesize,
203                    const u32 qe_size, const u32 nr_of_sg,
204                    int is_small)
205 {
206         if (pagesize > PAGE_SIZE) {
207                 ehca_gen_err("FATAL ERROR: pagesize=%x "
208                              "is greater than kernel page size", pagesize);
209                 return 0;
210         }
211 
212         /* init queue fields */
213         queue->queue_length = nr_of_pages * pagesize;
214         queue->pagesize = pagesize;
215         queue->qe_size = qe_size;
216         queue->act_nr_of_sg = nr_of_sg;
217         queue->current_q_offset = 0;
218         queue->toggle_state = 1;
219         queue->small_page = NULL;
220 
221         /* allocate queue page pointers */
222         queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
223         if (!queue->queue_pages) {
224                 ehca_gen_err("Couldn't allocate queue page list");
225                 return 0;
226         }
227         memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
228 
229         /* allocate actual queue pages */
230         if (is_small) {
231                 if (!alloc_small_queue_page(queue, pd))
232                         goto ipz_queue_ctor_exit0;
233         } else
234                 if (!alloc_queue_pages(queue, nr_of_pages))
235                         goto ipz_queue_ctor_exit0;
236 
237         return 1;
238 
239 ipz_queue_ctor_exit0:
240         ehca_gen_err("Couldn't alloc pages queue=%p "
241                  "nr_of_pages=%x",  queue, nr_of_pages);
242         vfree(queue->queue_pages);
243 
244         return 0;
245 }
246 
247 int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
248 {
249         int i, nr_pages;
250 
251         if (!queue || !queue->queue_pages) {
252                 ehca_gen_dbg("queue or queue_pages is NULL");
253                 return 0;
254         }
255 
256         if (queue->small_page)
257                 free_small_queue_page(queue, pd);
258         else {
259                 nr_pages = queue->queue_length / queue->pagesize;
260                 for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
261                         free_page((unsigned long)queue->queue_pages[i]);
262         }
263 
264         vfree(queue->queue_pages);
265 
266         return 1;
267 }
268 
269 int ehca_init_small_qp_cache(void)
270 {
271         small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
272                                            sizeof(struct ipz_small_queue_page),
273                                            0, SLAB_HWCACHE_ALIGN, NULL);
274         if (!small_qp_cache)
275                 return -ENOMEM;
276 
277         return 0;
278 }
279 
280 void ehca_cleanup_small_qp_cache(void)
281 {
282         kmem_cache_destroy(small_qp_cache);
283 }
284 
  This page was automatically generated by the LXR engine.