/[gxemul]/upstream/0.4.1/src/cpus/cpu_arm_instr_loadstore.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /upstream/0.4.1/src/cpus/cpu_arm_instr_loadstore.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 29 - (hide annotations)
Mon Oct 8 16:20:32 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 14052 byte(s)
0.4.1
1 dpavlin 14 /*
2 dpavlin 22 * Copyright (C) 2005-2006 Anders Gavare. All rights reserved.
3 dpavlin 14 *
4     * Redistribution and use in source and binary forms, with or without
5     * modification, are permitted provided that the following conditions are met:
6     *
7     * 1. Redistributions of source code must retain the above copyright
8     * notice, this list of conditions and the following disclaimer.
9     * 2. Redistributions in binary form must reproduce the above copyright
10     * notice, this list of conditions and the following disclaimer in the
11     * documentation and/or other materials provided with the distribution.
12     * 3. The name of the author may not be used to endorse or promote products
13     * derived from this software without specific prior written permission.
14     *
15     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16     * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17     * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18     * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19     * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20     * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21     * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22     * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23     * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24     * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25     * SUCH DAMAGE.
26     *
27     *
28 dpavlin 22 * $Id: cpu_arm_instr_loadstore.c,v 1.20 2006/02/16 19:49:04 debug Exp $
29 dpavlin 14 *
30     *
31     * TODO: Many things...
32     *
33     * o) Big-endian ARM loads/stores.
34     *
35     * o) Alignment checks!
36     *
37     * o) Native load/store if the endianness is the same as the host's
38 dpavlin 18 * (only implemented for little endian, so far, and it assumes that
39     * alignment is correct!)
40 dpavlin 14 *
41     * o) "Base Updated Abort Model", which updates the base register
42     * even if the memory access failed.
43     *
44     * o) Some ARM implementations use pc+8, some use pc+12 for stores?
45     *
46     * o) All load/store variants with the PC register are not really
47     * valid. (E.g. a byte load into the PC register. What should that
48     * accomplish?)
49     *
50     * o) Perhaps an optimization for the case when offset = 0, because
51     * that's quite common, and also when the Reg expression is just
52     * a simple, non-rotated register (0..14).
53     */
54    
55    
56 dpavlin 22 #if defined(A__SIGNED) && !defined(A__H) && !defined(A__L)
57     #define A__LDRD
58     #endif
59 dpavlin 18 #if defined(A__SIGNED) && defined(A__H) && !defined(A__L)
60     #define A__STRD
61     #endif
62    
63    
64 dpavlin 14 /*
65     * General load/store, by using memory_rw(). If at all possible, memory_rw()
66     * then inserts the page into the translation array, so that the fast
67     * load/store routine below can be used for further accesses.
68     */
69     void A__NAME__general(struct cpu *cpu, struct arm_instr_call *ic)
70     {
71     #if !defined(A__P) && defined(A__W)
72     const int memory_rw_flags = CACHE_DATA | MEMORY_USER_ACCESS;
73     #else
74     const int memory_rw_flags = CACHE_DATA;
75     #endif
76 dpavlin 18
77 dpavlin 16 #ifdef A__REG
78     uint32_t (*reg_func)(struct cpu *, struct arm_instr_call *)
79     = (void *)(size_t)ic->arg[1];
80     #endif
81 dpavlin 18
82 dpavlin 22 #if defined(A__STRD) || defined(A__LDRD)
83 dpavlin 18 unsigned char data[8];
84     const int datalen = 8;
85     #else
86 dpavlin 14 #ifdef A__B
87     unsigned char data[1];
88 dpavlin 18 const int datalen = 1;
89 dpavlin 14 #else
90     #ifdef A__H
91     unsigned char data[2];
92 dpavlin 18 const int datalen = 2;
93 dpavlin 14 #else
94 dpavlin 18 const int datalen = 4;
95     #ifdef HOST_LITTLE_ENDIAN
96     unsigned char *data = (unsigned char *) ic->arg[2];
97     #else
98 dpavlin 14 unsigned char data[4];
99     #endif
100     #endif
101 dpavlin 18 #endif
102     #endif
103    
104 dpavlin 14 uint32_t addr, low_pc, offset =
105     #ifndef A__U
106     -
107     #endif
108     #ifdef A__REG
109 dpavlin 16 reg_func(cpu, ic);
110 dpavlin 14 #else
111 dpavlin 16 ic->arg[1];
112 dpavlin 14 #endif
113    
114     low_pc = ((size_t)ic - (size_t)cpu->cd.arm.
115     cur_ic_page) / sizeof(struct arm_instr_call);
116 dpavlin 20 cpu->pc &= ~((ARM_IC_ENTRIES_PER_PAGE-1)
117 dpavlin 14 << ARM_INSTR_ALIGNMENT_SHIFT);
118 dpavlin 20 cpu->pc += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
119 dpavlin 14
120     addr = reg(ic->arg[0])
121     #ifdef A__P
122     + offset
123     #endif
124     ;
125    
126 dpavlin 22
127     #if defined(A__L) || defined(A__LDRD)
128 dpavlin 14 /* Load: */
129 dpavlin 18 if (!cpu->memory_rw(cpu, cpu->mem, addr, data, datalen,
130 dpavlin 14 MEM_READ, memory_rw_flags)) {
131     /* load failed, an exception was generated */
132     return;
133     }
134 dpavlin 22 #if defined(A__B) && !defined(A__LDRD)
135 dpavlin 14 reg(ic->arg[2]) =
136     #ifdef A__SIGNED
137     (int32_t)(int8_t)
138     #endif
139     data[0];
140     #else
141 dpavlin 22 #if defined(A__H) && !defined(A__LDRD)
142 dpavlin 14 reg(ic->arg[2]) =
143     #ifdef A__SIGNED
144     (int32_t)(int16_t)
145     #endif
146     (data[0] + (data[1] << 8));
147     #else
148 dpavlin 22 #ifndef A__LDRD
149 dpavlin 18 #ifdef HOST_LITTLE_ENDIAN
150     /* Nothing. */
151     #else
152 dpavlin 14 reg(ic->arg[2]) = data[0] + (data[1] << 8) +
153     (data[2] << 16) + (data[3] << 24);
154     #endif
155 dpavlin 22 #else
156     reg(ic->arg[2]) = data[0] + (data[1] << 8) +
157     (data[2] << 16) + (data[3] << 24);
158     reg(((uint32_t *)ic->arg[2]) + 1) = data[4] + (data[5] << 8) +
159     (data[6] << 16) + (data[7] << 24);
160 dpavlin 14 #endif
161 dpavlin 18 #endif
162 dpavlin 22 #endif
163 dpavlin 14 #else
164     /* Store: */
165 dpavlin 18 #if !defined(A__B) && !defined(A__H) && defined(HOST_LITTLE_ENDIAN)
166     #ifdef A__STRD
167     *(uint32_t *)data = reg(ic->arg[2]);
168     *(uint32_t *)(data + 4) = reg(ic->arg[2] + 4);
169     #endif
170 dpavlin 14 #else
171     data[0] = reg(ic->arg[2]);
172 dpavlin 18 #ifndef A__B
173 dpavlin 14 data[1] = reg(ic->arg[2]) >> 8;
174 dpavlin 18 #if !defined(A__H) || defined(A__STRD)
175 dpavlin 14 data[1] = reg(ic->arg[2]) >> 8;
176     data[2] = reg(ic->arg[2]) >> 16;
177     data[3] = reg(ic->arg[2]) >> 24;
178 dpavlin 18 #ifdef A__STRD
179     data[4] = reg(ic->arg[2] + 4);
180     data[5] = reg(ic->arg[2] + 4) >> 8;
181     data[6] = reg(ic->arg[2] + 4) >> 16;
182     data[7] = reg(ic->arg[2] + 4) >> 24;
183 dpavlin 14 #endif
184     #endif
185 dpavlin 18 #endif
186     #endif
187     if (!cpu->memory_rw(cpu, cpu->mem, addr, data, datalen,
188 dpavlin 14 MEM_WRITE, memory_rw_flags)) {
189     /* store failed, an exception was generated */
190     return;
191     }
192     #endif
193    
194     #ifdef A__P
195     #ifdef A__W
196     reg(ic->arg[0]) = addr;
197     #endif
198     #else /* post-index writeback */
199     reg(ic->arg[0]) = addr + offset;
200     #endif
201     }
202    
203    
204     /*
205     * Fast load/store, if the page is in the translation array.
206     */
207     void A__NAME(struct cpu *cpu, struct arm_instr_call *ic)
208     {
209 dpavlin 22 #if defined(A__LDRD) || defined(A__STRD)
210 dpavlin 18 /* Chicken out, let's do this unoptimized for now: */
211 dpavlin 14 A__NAME__general(cpu, ic);
212     #else
213 dpavlin 16 #ifdef A__REG
214     uint32_t (*reg_func)(struct cpu *, struct arm_instr_call *)
215     = (void *)(size_t)ic->arg[1];
216     #endif
217 dpavlin 14 uint32_t offset =
218     #ifndef A__U
219     -
220     #endif
221     #ifdef A__REG
222 dpavlin 16 reg_func(cpu, ic);
223 dpavlin 14 #else
224 dpavlin 16 ic->arg[1];
225 dpavlin 14 #endif
226     uint32_t addr = reg(ic->arg[0])
227     #ifdef A__P
228     + offset
229     #endif
230     ;
231     unsigned char *page = cpu->cd.arm.
232     #ifdef A__L
233     host_load
234     #else
235     host_store
236     #endif
237     [addr >> 12];
238    
239 dpavlin 18
240     #if !defined(A__P) && defined(A__W)
241     /*
242     * T-bit: userland access: check the corresponding bit in the
243     * is_userpage array. If it is set, then we're ok. Otherwise: use the
244     * generic function.
245     */
246 dpavlin 20 uint32_t x = cpu->cd.arm.is_userpage[addr >> 17];
247     if (!(x & (1 << ((addr >> 12) & 31))))
248 dpavlin 18 A__NAME__general(cpu, ic);
249     else
250     #endif
251    
252    
253 dpavlin 14 if (page == NULL) {
254 dpavlin 18 A__NAME__general(cpu, ic);
255 dpavlin 14 } else {
256     #ifdef A__L
257     #ifdef A__B
258     reg(ic->arg[2]) =
259     #ifdef A__SIGNED
260     (int32_t)(int8_t)
261     #endif
262     page[addr & 0xfff];
263     #else
264     #ifdef A__H
265     reg(ic->arg[2]) =
266     #ifdef A__SIGNED
267     (int32_t)(int16_t)
268     #endif
269     (page[addr & 0xfff] + (page[(addr & 0xfff) + 1] << 8));
270     #else
271 dpavlin 18 #ifdef HOST_LITTLE_ENDIAN
272     reg(ic->arg[2]) = *(uint32_t *)(page + (addr & 0xffc));
273     #else
274 dpavlin 14 reg(ic->arg[2]) = page[addr & 0xfff] +
275     (page[(addr & 0xfff) + 1] << 8) +
276     (page[(addr & 0xfff) + 2] << 16) +
277     (page[(addr & 0xfff) + 3] << 24);
278     #endif
279     #endif
280 dpavlin 18 #endif
281 dpavlin 14 #else
282     #ifdef A__B
283     page[addr & 0xfff] = reg(ic->arg[2]);
284     #else
285     #ifdef A__H
286     page[addr & 0xfff] = reg(ic->arg[2]);
287     page[(addr & 0xfff)+1] = reg(ic->arg[2]) >> 8;
288     #else
289 dpavlin 18 #ifdef HOST_LITTLE_ENDIAN
290     *(uint32_t *)(page + (addr & 0xffc)) = reg(ic->arg[2]);
291     #else
292 dpavlin 14 page[addr & 0xfff] = reg(ic->arg[2]);
293     page[(addr & 0xfff)+1] = reg(ic->arg[2]) >> 8;
294     page[(addr & 0xfff)+2] = reg(ic->arg[2]) >> 16;
295     page[(addr & 0xfff)+3] = reg(ic->arg[2]) >> 24;
296     #endif
297     #endif
298     #endif
299 dpavlin 18 #endif
300 dpavlin 14
301     /* Index Write-back: */
302     #ifdef A__P
303     #ifdef A__W
304     reg(ic->arg[0]) = addr;
305     #endif
306     #else
307     /* post-index writeback */
308     reg(ic->arg[0]) = addr + offset;
309     #endif
310     }
311 dpavlin 18 #endif /* not STRD */
312 dpavlin 14 }
313    
314    
315     /*
316     * Special case when loading or storing the ARM's PC register, or when the PC
317     * register is used as the base address register.
318     *
319     * o) Loads into the PC register cause a branch. If an exception occured
320     * during the load, then the PC register should already point to the
321     * exception handler, in which case we simply recalculate the pointers a
322     * second time (no harm is done by doing that).
323     *
324     * TODO: A tiny performance optimization would be to separate the two
325     * cases: a load where arg[0] = PC, and the case where arg[2] = PC.
326     *
327     * o) Stores store "PC of the current instruction + 12". The solution I have
328     * choosen is to calculate this value and place it into a temporary
329     * variable (tmp_pc), which is then used for the store.
330     */
331     void A__NAME_PC(struct cpu *cpu, struct arm_instr_call *ic)
332     {
333     #ifdef A__L
334     /* Load: */
335     if (ic->arg[0] == (size_t)(&cpu->cd.arm.tmp_pc)) {
336     /* tmp_pc = current PC + 8: */
337     uint32_t low_pc, tmp;
338     low_pc = ((size_t)ic - (size_t) cpu->cd.arm.cur_ic_page) /
339     sizeof(struct arm_instr_call);
340 dpavlin 20 tmp = cpu->pc & ~((ARM_IC_ENTRIES_PER_PAGE-1) <<
341 dpavlin 14 ARM_INSTR_ALIGNMENT_SHIFT);
342     tmp += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
343     cpu->cd.arm.tmp_pc = tmp + 8;
344     }
345     A__NAME(cpu, ic);
346     if (ic->arg[2] == (size_t)(&cpu->cd.arm.r[ARM_PC])) {
347     cpu->pc = cpu->cd.arm.r[ARM_PC];
348 dpavlin 20 quick_pc_to_pointers(cpu);
349 dpavlin 18 if (cpu->machine->show_trace_tree)
350     cpu_functioncall_trace(cpu, cpu->pc);
351 dpavlin 14 }
352     #else
353     /* Store: */
354     uint32_t low_pc, tmp;
355     /* Calculate tmp from this instruction's PC + 12 */
356     low_pc = ((size_t)ic - (size_t) cpu->cd.arm.cur_ic_page) /
357     sizeof(struct arm_instr_call);
358 dpavlin 20 tmp = cpu->pc & ~((ARM_IC_ENTRIES_PER_PAGE-1) <<
359 dpavlin 14 ARM_INSTR_ALIGNMENT_SHIFT);
360     tmp += (low_pc << ARM_INSTR_ALIGNMENT_SHIFT);
361     cpu->cd.arm.tmp_pc = tmp + 12;
362     A__NAME(cpu, ic);
363     #endif
364     }
365    
366    
367     #ifndef A__NOCONDITIONS
368     /* Load/stores with all registers except the PC register: */
369     void A__NAME__eq(struct cpu *cpu, struct arm_instr_call *ic)
370 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_Z) A__NAME(cpu, ic); }
371 dpavlin 14 void A__NAME__ne(struct cpu *cpu, struct arm_instr_call *ic)
372 dpavlin 20 { if (!(cpu->cd.arm.flags & ARM_F_Z)) A__NAME(cpu, ic); }
373 dpavlin 14 void A__NAME__cs(struct cpu *cpu, struct arm_instr_call *ic)
374 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_C) A__NAME(cpu, ic); }
375 dpavlin 14 void A__NAME__cc(struct cpu *cpu, struct arm_instr_call *ic)
376 dpavlin 20 { if (!(cpu->cd.arm.flags & ARM_F_C)) A__NAME(cpu, ic); }
377 dpavlin 14 void A__NAME__mi(struct cpu *cpu, struct arm_instr_call *ic)
378 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_N) A__NAME(cpu, ic); }
379 dpavlin 14 void A__NAME__pl(struct cpu *cpu, struct arm_instr_call *ic)
380 dpavlin 20 { if (!(cpu->cd.arm.flags & ARM_F_N)) A__NAME(cpu, ic); }
381 dpavlin 14 void A__NAME__vs(struct cpu *cpu, struct arm_instr_call *ic)
382 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_V) A__NAME(cpu, ic); }
383 dpavlin 14 void A__NAME__vc(struct cpu *cpu, struct arm_instr_call *ic)
384 dpavlin 20 { if (!(cpu->cd.arm.flags & ARM_F_V)) A__NAME(cpu, ic); }
385 dpavlin 14
386     void A__NAME__hi(struct cpu *cpu, struct arm_instr_call *ic)
387 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_C &&
388     !(cpu->cd.arm.flags & ARM_F_Z)) A__NAME(cpu, ic); }
389 dpavlin 14 void A__NAME__ls(struct cpu *cpu, struct arm_instr_call *ic)
390 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_Z ||
391     !(cpu->cd.arm.flags & ARM_F_C)) A__NAME(cpu, ic); }
392 dpavlin 14 void A__NAME__ge(struct cpu *cpu, struct arm_instr_call *ic)
393 dpavlin 20 { if (((cpu->cd.arm.flags & ARM_F_N)?1:0) ==
394     ((cpu->cd.arm.flags & ARM_F_V)?1:0)) A__NAME(cpu, ic); }
395 dpavlin 14 void A__NAME__lt(struct cpu *cpu, struct arm_instr_call *ic)
396 dpavlin 20 { if (((cpu->cd.arm.flags & ARM_F_N)?1:0) !=
397     ((cpu->cd.arm.flags & ARM_F_V)?1:0)) A__NAME(cpu, ic); }
398 dpavlin 14 void A__NAME__gt(struct cpu *cpu, struct arm_instr_call *ic)
399 dpavlin 20 { if (((cpu->cd.arm.flags & ARM_F_N)?1:0) ==
400     ((cpu->cd.arm.flags & ARM_F_V)?1:0) &&
401     !(cpu->cd.arm.flags & ARM_F_Z)) A__NAME(cpu, ic); }
402 dpavlin 14 void A__NAME__le(struct cpu *cpu, struct arm_instr_call *ic)
403 dpavlin 20 { if (((cpu->cd.arm.flags & ARM_F_N)?1:0) !=
404     ((cpu->cd.arm.flags & ARM_F_V)?1:0) ||
405     (cpu->cd.arm.flags & ARM_F_Z)) A__NAME(cpu, ic); }
406 dpavlin 14
407    
408     /* Load/stores with the PC register: */
409     void A__NAME_PC__eq(struct cpu *cpu, struct arm_instr_call *ic)
410 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_Z) A__NAME_PC(cpu, ic); }
411 dpavlin 14 void A__NAME_PC__ne(struct cpu *cpu, struct arm_instr_call *ic)
412 dpavlin 20 { if (!(cpu->cd.arm.flags & ARM_F_Z)) A__NAME_PC(cpu, ic); }
413 dpavlin 14 void A__NAME_PC__cs(struct cpu *cpu, struct arm_instr_call *ic)
414 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_C) A__NAME_PC(cpu, ic); }
415 dpavlin 14 void A__NAME_PC__cc(struct cpu *cpu, struct arm_instr_call *ic)
416 dpavlin 20 { if (!(cpu->cd.arm.flags & ARM_F_C)) A__NAME_PC(cpu, ic); }
417 dpavlin 14 void A__NAME_PC__mi(struct cpu *cpu, struct arm_instr_call *ic)
418 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_N) A__NAME_PC(cpu, ic); }
419 dpavlin 14 void A__NAME_PC__pl(struct cpu *cpu, struct arm_instr_call *ic)
420 dpavlin 20 { if (!(cpu->cd.arm.flags & ARM_F_N)) A__NAME_PC(cpu, ic); }
421 dpavlin 14 void A__NAME_PC__vs(struct cpu *cpu, struct arm_instr_call *ic)
422 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_V) A__NAME_PC(cpu, ic); }
423 dpavlin 14 void A__NAME_PC__vc(struct cpu *cpu, struct arm_instr_call *ic)
424 dpavlin 20 { if (!(cpu->cd.arm.flags & ARM_F_V)) A__NAME_PC(cpu, ic); }
425 dpavlin 14
426     void A__NAME_PC__hi(struct cpu *cpu, struct arm_instr_call *ic)
427 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_C &&
428     !(cpu->cd.arm.flags & ARM_F_Z)) A__NAME_PC(cpu, ic); }
429 dpavlin 14 void A__NAME_PC__ls(struct cpu *cpu, struct arm_instr_call *ic)
430 dpavlin 20 { if (cpu->cd.arm.flags & ARM_F_Z ||
431     !(cpu->cd.arm.flags & ARM_F_C)) A__NAME_PC(cpu, ic); }
432 dpavlin 14 void A__NAME_PC__ge(struct cpu *cpu, struct arm_instr_call *ic)
433 dpavlin 20 { if (((cpu->cd.arm.flags & ARM_F_N)?1:0) ==
434     ((cpu->cd.arm.flags & ARM_F_V)?1:0)) A__NAME_PC(cpu, ic); }
435 dpavlin 14 void A__NAME_PC__lt(struct cpu *cpu, struct arm_instr_call *ic)
436 dpavlin 20 { if (((cpu->cd.arm.flags & ARM_F_N)?1:0) !=
437     ((cpu->cd.arm.flags & ARM_F_V)?1:0)) A__NAME_PC(cpu, ic); }
438 dpavlin 14 void A__NAME_PC__gt(struct cpu *cpu, struct arm_instr_call *ic)
439 dpavlin 20 { if (((cpu->cd.arm.flags & ARM_F_N)?1:0) ==
440     ((cpu->cd.arm.flags & ARM_F_V)?1:0) &&
441     !(cpu->cd.arm.flags & ARM_F_Z)) A__NAME_PC(cpu, ic); }
442 dpavlin 14 void A__NAME_PC__le(struct cpu *cpu, struct arm_instr_call *ic)
443 dpavlin 20 { if (((cpu->cd.arm.flags & ARM_F_N)?1:0) !=
444     ((cpu->cd.arm.flags & ARM_F_V)?1:0) ||
445     (cpu->cd.arm.flags & ARM_F_Z)) A__NAME_PC(cpu, ic); }
446 dpavlin 14 #endif
447 dpavlin 18
448    
449 dpavlin 22 #ifdef A__LDRD
450     #undef A__LDRD
451     #endif
452    
453 dpavlin 18 #ifdef A__STRD
454     #undef A__STRD
455     #endif
456 dpavlin 22

  ViewVC Help
Powered by ViewVC 1.1.26