pa.c (hppa_profile_hook): Split gen_call_profiler into separate insns.
[gcc.git] / gcc / config / pa / pa.md
1 ;;- Machine description for HP PA-RISC architecture for GCC compiler
2 ;; Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 ;; 2002, 2003 Free Software Foundation, Inc.
4 ;; Contributed by the Center for Software Science at the University
5 ;; of Utah.
6
7 ;; This file is part of GCC.
8
9 ;; GCC is free software; you can redistribute it and/or modify
10 ;; it under the terms of the GNU General Public License as published by
11 ;; the Free Software Foundation; either version 2, or (at your option)
12 ;; any later version.
13
14 ;; GCC is distributed in the hope that it will be useful,
15 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 ;; GNU General Public License for more details.
18
19 ;; You should have received a copy of the GNU General Public License
20 ;; along with GCC; see the file COPYING. If not, write to
21 ;; the Free Software Foundation, 59 Temple Place - Suite 330,
22 ;; Boston, MA 02111-1307, USA.
23
24 ;; This gcc Version 2 machine description is inspired by sparc.md and
25 ;; mips.md.
26
27 ;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
28
29 ;; Insn type. Used to default other attribute values.
30
31 ;; type "unary" insns have one input operand (1) and one output operand (0)
32 ;; type "binary" insns have two input operands (1,2) and one output (0)
33
34 (define_attr "type"
35 "move,unary,binary,shift,nullshift,compare,load,store,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,fpload,fpstore,fpalu,fpcc,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,multi,milli,parallel_branch"
36 (const_string "binary"))
37
38 (define_attr "pa_combine_type"
39 "fmpy,faddsub,uncond_branch,addmove,none"
40 (const_string "none"))
41
42 ;; Processor type (for scheduling, not code generation) -- this attribute
43 ;; must exactly match the processor_type enumeration in pa.h.
44 ;;
45 ;; FIXME: Add 800 scheduling for completeness?
46
47 (define_attr "cpu" "700,7100,7100LC,7200,7300,8000" (const (symbol_ref "pa_cpu_attr")))
48
49 ;; Length (in # of bytes).
50 (define_attr "length" ""
51 (cond [(eq_attr "type" "load,fpload")
52 (if_then_else (match_operand 1 "symbolic_memory_operand" "")
53 (const_int 8) (const_int 4))
54
55 (eq_attr "type" "store,fpstore")
56 (if_then_else (match_operand 0 "symbolic_memory_operand" "")
57 (const_int 8) (const_int 4))
58
59 (eq_attr "type" "binary,shift,nullshift")
60 (if_then_else (match_operand 2 "arith_operand" "")
61 (const_int 4) (const_int 12))
62
63 (eq_attr "type" "move,unary,shift,nullshift")
64 (if_then_else (match_operand 1 "arith_operand" "")
65 (const_int 4) (const_int 8))]
66
67 (const_int 4)))
68
69 (define_asm_attributes
70 [(set_attr "length" "4")
71 (set_attr "type" "multi")])
72
73 ;; Attributes for instruction and branch scheduling
74
75 ;; For conditional branches.
76 (define_attr "in_branch_delay" "false,true"
77 (if_then_else (and (eq_attr "type" "!uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch")
78 (eq_attr "length" "4"))
79 (const_string "true")
80 (const_string "false")))
81
82 ;; Disallow instructions which use the FPU since they will tie up the FPU
83 ;; even if the instruction is nullified.
84 (define_attr "in_nullified_branch_delay" "false,true"
85 (if_then_else (and (eq_attr "type" "!uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,parallel_branch")
86 (eq_attr "length" "4"))
87 (const_string "true")
88 (const_string "false")))
89
90 ;; For calls and millicode calls. Allow unconditional branches in the
91 ;; delay slot.
92 (define_attr "in_call_delay" "false,true"
93 (cond [(and (eq_attr "type" "!uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch")
94 (eq_attr "length" "4"))
95 (const_string "true")
96 (eq_attr "type" "uncond_branch")
97 (if_then_else (ne (symbol_ref "TARGET_JUMP_IN_DELAY")
98 (const_int 0))
99 (const_string "true")
100 (const_string "false"))]
101 (const_string "false")))
102
103
104 ;; Call delay slot description.
105 (define_delay (eq_attr "type" "call")
106 [(eq_attr "in_call_delay" "true") (nil) (nil)])
107
108 ;; Millicode call delay slot description.
109 (define_delay (eq_attr "type" "milli")
110 [(eq_attr "in_call_delay" "true") (nil) (nil)])
111
112 ;; Return and other similar instructions.
113 (define_delay (eq_attr "type" "btable_branch,branch,parallel_branch")
114 [(eq_attr "in_branch_delay" "true") (nil) (nil)])
115
116 ;; Floating point conditional branch delay slot description and
117 (define_delay (eq_attr "type" "fbranch")
118 [(eq_attr "in_branch_delay" "true")
119 (eq_attr "in_nullified_branch_delay" "true")
120 (nil)])
121
122 ;; Integer conditional branch delay slot description.
123 ;; Nullification of conditional branches on the PA is dependent on the
124 ;; direction of the branch. Forward branches nullify true and
125 ;; backward branches nullify false. If the direction is unknown
126 ;; then nullification is not allowed.
127 (define_delay (eq_attr "type" "cbranch")
128 [(eq_attr "in_branch_delay" "true")
129 (and (eq_attr "in_nullified_branch_delay" "true")
130 (attr_flag "forward"))
131 (and (eq_attr "in_nullified_branch_delay" "true")
132 (attr_flag "backward"))])
133
134 (define_delay (and (eq_attr "type" "uncond_branch")
135 (eq (symbol_ref "following_call (insn)")
136 (const_int 0)))
137 [(eq_attr "in_branch_delay" "true") (nil) (nil)])
138
139 ;; Memory. Disregarding Cache misses, the Mustang memory times are:
140 ;; load: 2, fpload: 3
141 ;; store, fpstore: 3, no D-cache operations should be scheduled.
142
143 ;; The Timex (aka 700) has two floating-point units: ALU, and MUL/DIV/SQRT.
144 ;; Timings:
145 ;; Instruction Time Unit Minimum Distance (unit contention)
146 ;; fcpy 3 ALU 2
147 ;; fabs 3 ALU 2
148 ;; fadd 3 ALU 2
149 ;; fsub 3 ALU 2
150 ;; fcmp 3 ALU 2
151 ;; fcnv 3 ALU 2
152 ;; fmpyadd 3 ALU,MPY 2
153 ;; fmpysub 3 ALU,MPY 2
154 ;; fmpycfxt 3 ALU,MPY 2
155 ;; fmpy 3 MPY 2
156 ;; fmpyi 3 MPY 2
157 ;; fdiv,sgl 10 MPY 10
158 ;; fdiv,dbl 12 MPY 12
159 ;; fsqrt,sgl 14 MPY 14
160 ;; fsqrt,dbl 18 MPY 18
161 ;;
162 ;; We don't model fmpyadd/fmpysub properly as those instructions
163 ;; keep both the FP ALU and MPY units busy. Given that these
164 ;; processors are obsolete, I'm not going to spend the time to
165 ;; model those instructions correctly.
166
167 (define_automaton "pa700")
168 (define_cpu_unit "dummy_700,mem_700,fpalu_700,fpmpy_700" "pa700")
169
170 (define_insn_reservation "W0" 4
171 (and (eq_attr "type" "fpcc")
172 (eq_attr "cpu" "700"))
173 "fpalu_700*2")
174
175 (define_insn_reservation "W1" 3
176 (and (eq_attr "type" "fpalu")
177 (eq_attr "cpu" "700"))
178 "fpalu_700*2")
179
180 (define_insn_reservation "W2" 3
181 (and (eq_attr "type" "fpmulsgl,fpmuldbl")
182 (eq_attr "cpu" "700"))
183 "fpmpy_700*2")
184
185 (define_insn_reservation "W3" 10
186 (and (eq_attr "type" "fpdivsgl")
187 (eq_attr "cpu" "700"))
188 "fpmpy_700*10")
189
190 (define_insn_reservation "W4" 12
191 (and (eq_attr "type" "fpdivdbl")
192 (eq_attr "cpu" "700"))
193 "fpmpy_700*12")
194
195 (define_insn_reservation "W5" 14
196 (and (eq_attr "type" "fpsqrtsgl")
197 (eq_attr "cpu" "700"))
198 "fpmpy_700*14")
199
200 (define_insn_reservation "W6" 18
201 (and (eq_attr "type" "fpsqrtdbl")
202 (eq_attr "cpu" "700"))
203 "fpmpy_700*18")
204
205 (define_insn_reservation "W7" 2
206 (and (eq_attr "type" "load")
207 (eq_attr "cpu" "700"))
208 "mem_700")
209
210 (define_insn_reservation "W8" 2
211 (and (eq_attr "type" "fpload")
212 (eq_attr "cpu" "700"))
213 "mem_700")
214
215 (define_insn_reservation "W9" 3
216 (and (eq_attr "type" "store")
217 (eq_attr "cpu" "700"))
218 "mem_700*3")
219
220 (define_insn_reservation "W10" 3
221 (and (eq_attr "type" "fpstore")
222 (eq_attr "cpu" "700"))
223 "mem_700*3")
224
225 (define_insn_reservation "W11" 1
226 (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,load,fpload,store,fpstore")
227 (eq_attr "cpu" "700"))
228 "dummy_700")
229
230 ;; We have a bypass for all computations in the FP unit which feed an
231 ;; FP store as long as the sizes are the same.
232 (define_bypass 2 "W1,W2" "W10" "hppa_fpstore_bypass_p")
233 (define_bypass 9 "W3" "W10" "hppa_fpstore_bypass_p")
234 (define_bypass 11 "W4" "W10" "hppa_fpstore_bypass_p")
235 (define_bypass 13 "W5" "W10" "hppa_fpstore_bypass_p")
236 (define_bypass 17 "W6" "W10" "hppa_fpstore_bypass_p")
237
238 ;; We have an "anti-bypass" for FP loads which feed an FP store.
239 (define_bypass 4 "W8" "W10" "hppa_fpstore_bypass_p")
240
241 ;; Function units for the 7100 and 7150. The 7100/7150 can dual-issue
242 ;; floating point computations with non-floating point computations (fp loads
243 ;; and stores are not fp computations).
244 ;;
245 ;; Memory. Disregarding Cache misses, memory loads take two cycles; stores also
246 ;; take two cycles, during which no Dcache operations should be scheduled.
247 ;; Any special cases are handled in pa_adjust_cost. The 7100, 7150 and 7100LC
248 ;; all have the same memory characteristics if one disregards cache misses.
249 ;;
250 ;; The 7100/7150 has three floating-point units: ALU, MUL, and DIV.
251 ;; There's no value in modeling the ALU and MUL separately though
252 ;; since there can never be a functional unit conflict given the
253 ;; latency and issue rates for those units.
254 ;;
255 ;; Timings:
256 ;; Instruction Time Unit Minimum Distance (unit contention)
257 ;; fcpy 2 ALU 1
258 ;; fabs 2 ALU 1
259 ;; fadd 2 ALU 1
260 ;; fsub 2 ALU 1
261 ;; fcmp 2 ALU 1
262 ;; fcnv 2 ALU 1
263 ;; fmpyadd 2 ALU,MPY 1
264 ;; fmpysub 2 ALU,MPY 1
265 ;; fmpycfxt 2 ALU,MPY 1
266 ;; fmpy 2 MPY 1
267 ;; fmpyi 2 MPY 1
268 ;; fdiv,sgl 8 DIV 8
269 ;; fdiv,dbl 15 DIV 15
270 ;; fsqrt,sgl 8 DIV 8
271 ;; fsqrt,dbl 15 DIV 15
272
273 (define_automaton "pa7100")
274 (define_cpu_unit "i_7100, f_7100,fpmac_7100,fpdivsqrt_7100,mem_7100" "pa7100")
275
276 (define_insn_reservation "X0" 2
277 (and (eq_attr "type" "fpcc,fpalu,fpmulsgl,fpmuldbl")
278 (eq_attr "cpu" "7100"))
279 "f_7100,fpmac_7100")
280
281 (define_insn_reservation "X1" 8
282 (and (eq_attr "type" "fpdivsgl,fpsqrtsgl")
283 (eq_attr "cpu" "7100"))
284 "f_7100+fpdivsqrt_7100,fpdivsqrt_7100*7")
285
286 (define_insn_reservation "X2" 15
287 (and (eq_attr "type" "fpdivdbl,fpsqrtdbl")
288 (eq_attr "cpu" "7100"))
289 "f_7100+fpdivsqrt_7100,fpdivsqrt_7100*14")
290
291 (define_insn_reservation "X3" 2
292 (and (eq_attr "type" "load")
293 (eq_attr "cpu" "7100"))
294 "i_7100+mem_7100")
295
296 (define_insn_reservation "X4" 2
297 (and (eq_attr "type" "fpload")
298 (eq_attr "cpu" "7100"))
299 "i_7100+mem_7100")
300
301 (define_insn_reservation "X5" 2
302 (and (eq_attr "type" "store")
303 (eq_attr "cpu" "7100"))
304 "i_7100+mem_7100,mem_7100")
305
306 (define_insn_reservation "X6" 2
307 (and (eq_attr "type" "fpstore")
308 (eq_attr "cpu" "7100"))
309 "i_7100+mem_7100,mem_7100")
310
311 (define_insn_reservation "X7" 1
312 (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl,load,fpload,store,fpstore")
313 (eq_attr "cpu" "7100"))
314 "i_7100")
315
316 ;; We have a bypass for all computations in the FP unit which feed an
317 ;; FP store as long as the sizes are the same.
318 (define_bypass 1 "X0" "X6" "hppa_fpstore_bypass_p")
319 (define_bypass 7 "X1" "X6" "hppa_fpstore_bypass_p")
320 (define_bypass 14 "X2" "X6" "hppa_fpstore_bypass_p")
321
322 ;; We have an "anti-bypass" for FP loads which feed an FP store.
323 (define_bypass 3 "X4" "X6" "hppa_fpstore_bypass_p")
324
325 ;; The 7100LC has three floating-point units: ALU, MUL, and DIV.
326 ;; There's no value in modeling the ALU and MUL separately though
327 ;; since there can never be a functional unit conflict that
328 ;; can be avoided given the latency, issue rates and mandatory
329 ;; one cycle cpu-wide lock for a double precision fp multiply.
330 ;;
331 ;; Timings:
332 ;; Instruction Time Unit Minimum Distance (unit contention)
333 ;; fcpy 2 ALU 1
334 ;; fabs 2 ALU 1
335 ;; fadd 2 ALU 1
336 ;; fsub 2 ALU 1
337 ;; fcmp 2 ALU 1
338 ;; fcnv 2 ALU 1
339 ;; fmpyadd,sgl 2 ALU,MPY 1
340 ;; fmpyadd,dbl 3 ALU,MPY 2
341 ;; fmpysub,sgl 2 ALU,MPY 1
342 ;; fmpysub,dbl 3 ALU,MPY 2
343 ;; fmpycfxt,sgl 2 ALU,MPY 1
344 ;; fmpycfxt,dbl 3 ALU,MPY 2
345 ;; fmpy,sgl 2 MPY 1
346 ;; fmpy,dbl 3 MPY 2
347 ;; fmpyi 3 MPY 2
348 ;; fdiv,sgl 8 DIV 8
349 ;; fdiv,dbl 15 DIV 15
350 ;; fsqrt,sgl 8 DIV 8
351 ;; fsqrt,dbl 15 DIV 15
352 ;;
353 ;; The PA7200 is just like the PA7100LC except that there is
354 ;; no store-store penalty.
355 ;;
356 ;; The PA7300 is just like the PA7200 except that there is
357 ;; no store-load penalty.
358 ;;
359 ;; Note there are some aspects of the 7100LC we are not modeling
360 ;; at the moment. I'll be reviewing the 7100LC scheduling info
361 ;; shortly and updating this description.
362 ;;
363 ;; load-load pairs
364 ;; store-store pairs
365 ;; other issue modeling
366
367 (define_automaton "pa7100lc")
368 (define_cpu_unit "i0_7100lc, i1_7100lc, f_7100lc" "pa7100lc")
369 (define_cpu_unit "fpmac_7100lc" "pa7100lc")
370 (define_cpu_unit "mem_7100lc" "pa7100lc")
371
372 ;; Double precision multiplies lock the entire CPU for one
373 ;; cycle. There is no way to avoid this lock and trying to
374 ;; schedule around the lock is pointless and thus there is no
375 ;; value in trying to model this lock.
376 ;;
377 ;; Not modeling the lock allows us to treat fp multiplies just
378 ;; like any other FP alu instruction. It allows for a smaller
379 ;; DFA and may reduce register pressure.
380 (define_insn_reservation "Y0" 2
381 (and (eq_attr "type" "fpcc,fpalu,fpmulsgl,fpmuldbl")
382 (eq_attr "cpu" "7100LC,7200,7300"))
383 "f_7100lc,fpmac_7100lc")
384
385 ;; fp division and sqrt instructions lock the entire CPU for
386 ;; 7 cycles (single precision) or 14 cycles (double precision).
387 ;; There is no way to avoid this lock and trying to schedule
388 ;; around the lock is pointless and thus there is no value in
389 ;; trying to model this lock. Not modeling the lock allows
390 ;; for a smaller DFA and may reduce register pressure.
391 (define_insn_reservation "Y1" 1
392 (and (eq_attr "type" "fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl")
393 (eq_attr "cpu" "7100LC,7200,7300"))
394 "f_7100lc")
395
396 (define_insn_reservation "Y2" 2
397 (and (eq_attr "type" "load")
398 (eq_attr "cpu" "7100LC,7200,7300"))
399 "i1_7100lc+mem_7100lc")
400
401 (define_insn_reservation "Y3" 2
402 (and (eq_attr "type" "fpload")
403 (eq_attr "cpu" "7100LC,7200,7300"))
404 "i1_7100lc+mem_7100lc")
405
406 (define_insn_reservation "Y4" 2
407 (and (eq_attr "type" "store")
408 (eq_attr "cpu" "7100LC"))
409 "i1_7100lc+mem_7100lc,mem_7100lc")
410
411 (define_insn_reservation "Y5" 2
412 (and (eq_attr "type" "fpstore")
413 (eq_attr "cpu" "7100LC"))
414 "i1_7100lc+mem_7100lc,mem_7100lc")
415
416 (define_insn_reservation "Y6" 1
417 (and (eq_attr "type" "shift,nullshift")
418 (eq_attr "cpu" "7100LC,7200,7300"))
419 "i1_7100lc")
420
421 (define_insn_reservation "Y7" 1
422 (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl,load,fpload,store,fpstore,shift,nullshift")
423 (eq_attr "cpu" "7100LC,7200,7300"))
424 "(i0_7100lc|i1_7100lc)")
425
426 ;; The 7200 has a store-load penalty
427 (define_insn_reservation "Y8" 2
428 (and (eq_attr "type" "store")
429 (eq_attr "cpu" "7200"))
430 "i1_7100lc,mem_7100lc")
431
432 (define_insn_reservation "Y9" 2
433 (and (eq_attr "type" "fpstore")
434 (eq_attr "cpu" "7200"))
435 "i1_7100lc,mem_7100lc")
436
437 ;; The 7300 has no penalty for store-store or store-load
438 (define_insn_reservation "Y10" 2
439 (and (eq_attr "type" "store")
440 (eq_attr "cpu" "7300"))
441 "i1_7100lc")
442
443 (define_insn_reservation "Y11" 2
444 (and (eq_attr "type" "fpstore")
445 (eq_attr "cpu" "7300"))
446 "i1_7100lc")
447
448 ;; We have an "anti-bypass" for FP loads which feed an FP store.
449 (define_bypass 3 "Y3" "Y5,Y9,Y11" "hppa_fpstore_bypass_p")
450
451 ;; Scheduling for the PA8000 is somewhat different than scheduling for a
452 ;; traditional architecture.
453 ;;
454 ;; The PA8000 has a large (56) entry reorder buffer that is split between
455 ;; memory and non-memory operations.
456 ;;
457 ;; The PA8000 can issue two memory and two non-memory operations per cycle to
458 ;; the function units, with the exception of branches and multi-output
459 ;; instructions. The PA8000 can retire two non-memory operations per cycle
460 ;; and two memory operations per cycle, only one of which may be a store.
461 ;;
462 ;; Given the large reorder buffer, the processor can hide most latencies.
463 ;; According to HP, they've got the best results by scheduling for retirement
464 ;; bandwidth with limited latency scheduling for floating point operations.
465 ;; Latency for integer operations and memory references is ignored.
466 ;;
467 ;;
468 ;; We claim floating point operations have a 2 cycle latency and are
469 ;; fully pipelined, except for div and sqrt which are not pipelined and
470 ;; take from 17 to 31 cycles to complete.
471 ;;
472 ;; It's worth noting that there is no way to saturate all the functional
473 ;; units on the PA8000 as there is not enough issue bandwidth.
474
475 (define_automaton "pa8000")
476 (define_cpu_unit "inm0_8000, inm1_8000, im0_8000, im1_8000" "pa8000")
477 (define_cpu_unit "rnm0_8000, rnm1_8000, rm0_8000, rm1_8000" "pa8000")
478 (define_cpu_unit "store_8000" "pa8000")
479 (define_cpu_unit "f0_8000, f1_8000" "pa8000")
480 (define_cpu_unit "fdivsqrt0_8000, fdivsqrt1_8000" "pa8000")
481 (define_reservation "inm_8000" "inm0_8000 | inm1_8000")
482 (define_reservation "im_8000" "im0_8000 | im1_8000")
483 (define_reservation "rnm_8000" "rnm0_8000 | rnm1_8000")
484 (define_reservation "rm_8000" "rm0_8000 | rm1_8000")
485 (define_reservation "f_8000" "f0_8000 | f1_8000")
486 (define_reservation "fdivsqrt_8000" "fdivsqrt0_8000 | fdivsqrt1_8000")
487
488 ;; We can issue any two memops per cycle, but we can only retire
489 ;; one memory store per cycle. We assume that the reorder buffer
490 ;; will hide any memory latencies per HP's recommendation.
491 (define_insn_reservation "Z0" 0
492 (and
493 (eq_attr "type" "load,fpload")
494 (eq_attr "cpu" "8000"))
495 "im_8000,rm_8000")
496
497 (define_insn_reservation "Z1" 0
498 (and
499 (eq_attr "type" "store,fpstore")
500 (eq_attr "cpu" "8000"))
501 "im_8000,rm_8000+store_8000")
502
503 ;; We can issue and retire two non-memory operations per cycle with
504 ;; a few exceptions (branches). This group catches those we want
505 ;; to assume have zero latency.
506 (define_insn_reservation "Z2" 0
507 (and
508 (eq_attr "type" "!load,fpload,store,fpstore,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch,fpcc,fpalu,fpmulsgl,fpmuldbl,fpsqrtsgl,fpsqrtdbl,fpdivsgl,fpdivdbl")
509 (eq_attr "cpu" "8000"))
510 "inm_8000,rnm_8000")
511
512 ;; Branches use both slots in the non-memory issue and
513 ;; retirement unit.
514 (define_insn_reservation "Z3" 0
515 (and
516 (eq_attr "type" "uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch")
517 (eq_attr "cpu" "8000"))
518 "inm0_8000+inm1_8000,rnm0_8000+rnm1_8000")
519
520 ;; We partial latency schedule the floating point units.
521 ;; They can issue/retire two at a time in the non-memory
522 ;; units. We fix their latency at 2 cycles and they
523 ;; are fully pipelined.
524 (define_insn_reservation "Z4" 1
525 (and
526 (eq_attr "type" "fpcc,fpalu,fpmulsgl,fpmuldbl")
527 (eq_attr "cpu" "8000"))
528 "inm_8000,f_8000,rnm_8000")
529
530 ;; The fdivsqrt units are not pipelined and have a very long latency.
531 ;; To keep the DFA from exploding, we do not show all the
532 ;; reservations for the divsqrt unit.
533 (define_insn_reservation "Z5" 17
534 (and
535 (eq_attr "type" "fpdivsgl,fpsqrtsgl")
536 (eq_attr "cpu" "8000"))
537 "inm_8000,fdivsqrt_8000*6,rnm_8000")
538
539 (define_insn_reservation "Z6" 31
540 (and
541 (eq_attr "type" "fpdivdbl,fpsqrtdbl")
542 (eq_attr "cpu" "8000"))
543 "inm_8000,fdivsqrt_8000*6,rnm_8000")
544
545
546 \f
547 ;; Compare instructions.
548 ;; This controls RTL generation and register allocation.
549
550 ;; We generate RTL for comparisons and branches by having the cmpxx
551 ;; patterns store away the operands. Then, the scc and bcc patterns
552 ;; emit RTL for both the compare and the branch.
553 ;;
554
555 (define_expand "cmpdi"
556 [(set (reg:CC 0)
557 (compare:CC (match_operand:DI 0 "reg_or_0_operand" "")
558 (match_operand:DI 1 "register_operand" "")))]
559 "TARGET_64BIT"
560
561 "
562 {
563 hppa_compare_op0 = operands[0];
564 hppa_compare_op1 = operands[1];
565 hppa_branch_type = CMP_SI;
566 DONE;
567 }")
568
569 (define_expand "cmpsi"
570 [(set (reg:CC 0)
571 (compare:CC (match_operand:SI 0 "reg_or_0_operand" "")
572 (match_operand:SI 1 "arith5_operand" "")))]
573 ""
574 "
575 {
576 hppa_compare_op0 = operands[0];
577 hppa_compare_op1 = operands[1];
578 hppa_branch_type = CMP_SI;
579 DONE;
580 }")
581
582 (define_expand "cmpsf"
583 [(set (reg:CCFP 0)
584 (compare:CCFP (match_operand:SF 0 "reg_or_0_operand" "")
585 (match_operand:SF 1 "reg_or_0_operand" "")))]
586 "! TARGET_SOFT_FLOAT"
587 "
588 {
589 hppa_compare_op0 = operands[0];
590 hppa_compare_op1 = operands[1];
591 hppa_branch_type = CMP_SF;
592 DONE;
593 }")
594
595 (define_expand "cmpdf"
596 [(set (reg:CCFP 0)
597 (compare:CCFP (match_operand:DF 0 "reg_or_0_operand" "")
598 (match_operand:DF 1 "reg_or_0_operand" "")))]
599 "! TARGET_SOFT_FLOAT"
600 "
601 {
602 hppa_compare_op0 = operands[0];
603 hppa_compare_op1 = operands[1];
604 hppa_branch_type = CMP_DF;
605 DONE;
606 }")
607
608 (define_insn ""
609 [(set (reg:CCFP 0)
610 (match_operator:CCFP 2 "comparison_operator"
611 [(match_operand:SF 0 "reg_or_0_operand" "fG")
612 (match_operand:SF 1 "reg_or_0_operand" "fG")]))]
613 "! TARGET_SOFT_FLOAT"
614 "fcmp,sgl,%Y2 %f0,%f1"
615 [(set_attr "length" "4")
616 (set_attr "type" "fpcc")])
617
618 (define_insn ""
619 [(set (reg:CCFP 0)
620 (match_operator:CCFP 2 "comparison_operator"
621 [(match_operand:DF 0 "reg_or_0_operand" "fG")
622 (match_operand:DF 1 "reg_or_0_operand" "fG")]))]
623 "! TARGET_SOFT_FLOAT"
624 "fcmp,dbl,%Y2 %f0,%f1"
625 [(set_attr "length" "4")
626 (set_attr "type" "fpcc")])
627
628 ;; Provide a means to emit the movccfp0 and movccfp1 optimization
629 ;; placeholders. This is necessary in rare situations when a
630 ;; placeholder is re-emitted (see PR 8705).
631
632 (define_expand "movccfp"
633 [(set (reg:CCFP 0)
634 (match_operand 0 "const_int_operand" ""))]
635 "! TARGET_SOFT_FLOAT"
636 "
637 {
638 if ((unsigned HOST_WIDE_INT) INTVAL (operands[0]) > 1)
639 FAIL;
640 }")
641
642 ;; The following patterns are optimization placeholders. In almost
643 ;; all cases, the user of the condition code will be simplified and the
644 ;; original condition code setting insn should be eliminated.
645
646 (define_insn "*movccfp0"
647 [(set (reg:CCFP 0)
648 (const_int 0))]
649 "! TARGET_SOFT_FLOAT"
650 "fcmp,dbl,= %%fr0,%%fr0"
651 [(set_attr "length" "4")
652 (set_attr "type" "fpcc")])
653
654 (define_insn "*movccfp1"
655 [(set (reg:CCFP 0)
656 (const_int 1))]
657 "! TARGET_SOFT_FLOAT"
658 "fcmp,dbl,!= %%fr0,%%fr0"
659 [(set_attr "length" "4")
660 (set_attr "type" "fpcc")])
661
662 ;; scc insns.
663
664 (define_expand "seq"
665 [(set (match_operand:SI 0 "register_operand" "")
666 (eq:SI (match_dup 1)
667 (match_dup 2)))]
668 "!TARGET_64BIT"
669 "
670 {
671 /* fp scc patterns rarely match, and are not a win on the PA. */
672 if (hppa_branch_type != CMP_SI)
673 FAIL;
674 /* set up operands from compare. */
675 operands[1] = hppa_compare_op0;
676 operands[2] = hppa_compare_op1;
677 /* fall through and generate default code */
678 }")
679
680 (define_expand "sne"
681 [(set (match_operand:SI 0 "register_operand" "")
682 (ne:SI (match_dup 1)
683 (match_dup 2)))]
684 "!TARGET_64BIT"
685 "
686 {
687 /* fp scc patterns rarely match, and are not a win on the PA. */
688 if (hppa_branch_type != CMP_SI)
689 FAIL;
690 operands[1] = hppa_compare_op0;
691 operands[2] = hppa_compare_op1;
692 }")
693
694 (define_expand "slt"
695 [(set (match_operand:SI 0 "register_operand" "")
696 (lt:SI (match_dup 1)
697 (match_dup 2)))]
698 "!TARGET_64BIT"
699 "
700 {
701 /* fp scc patterns rarely match, and are not a win on the PA. */
702 if (hppa_branch_type != CMP_SI)
703 FAIL;
704 operands[1] = hppa_compare_op0;
705 operands[2] = hppa_compare_op1;
706 }")
707
708 (define_expand "sgt"
709 [(set (match_operand:SI 0 "register_operand" "")
710 (gt:SI (match_dup 1)
711 (match_dup 2)))]
712 "!TARGET_64BIT"
713 "
714 {
715 /* fp scc patterns rarely match, and are not a win on the PA. */
716 if (hppa_branch_type != CMP_SI)
717 FAIL;
718 operands[1] = hppa_compare_op0;
719 operands[2] = hppa_compare_op1;
720 }")
721
722 (define_expand "sle"
723 [(set (match_operand:SI 0 "register_operand" "")
724 (le:SI (match_dup 1)
725 (match_dup 2)))]
726 "!TARGET_64BIT"
727 "
728 {
729 /* fp scc patterns rarely match, and are not a win on the PA. */
730 if (hppa_branch_type != CMP_SI)
731 FAIL;
732 operands[1] = hppa_compare_op0;
733 operands[2] = hppa_compare_op1;
734 }")
735
736 (define_expand "sge"
737 [(set (match_operand:SI 0 "register_operand" "")
738 (ge:SI (match_dup 1)
739 (match_dup 2)))]
740 "!TARGET_64BIT"
741 "
742 {
743 /* fp scc patterns rarely match, and are not a win on the PA. */
744 if (hppa_branch_type != CMP_SI)
745 FAIL;
746 operands[1] = hppa_compare_op0;
747 operands[2] = hppa_compare_op1;
748 }")
749
750 (define_expand "sltu"
751 [(set (match_operand:SI 0 "register_operand" "")
752 (ltu:SI (match_dup 1)
753 (match_dup 2)))]
754 "!TARGET_64BIT"
755 "
756 {
757 if (hppa_branch_type != CMP_SI)
758 FAIL;
759 operands[1] = hppa_compare_op0;
760 operands[2] = hppa_compare_op1;
761 }")
762
763 (define_expand "sgtu"
764 [(set (match_operand:SI 0 "register_operand" "")
765 (gtu:SI (match_dup 1)
766 (match_dup 2)))]
767 "!TARGET_64BIT"
768 "
769 {
770 if (hppa_branch_type != CMP_SI)
771 FAIL;
772 operands[1] = hppa_compare_op0;
773 operands[2] = hppa_compare_op1;
774 }")
775
776 (define_expand "sleu"
777 [(set (match_operand:SI 0 "register_operand" "")
778 (leu:SI (match_dup 1)
779 (match_dup 2)))]
780 "!TARGET_64BIT"
781 "
782 {
783 if (hppa_branch_type != CMP_SI)
784 FAIL;
785 operands[1] = hppa_compare_op0;
786 operands[2] = hppa_compare_op1;
787 }")
788
789 (define_expand "sgeu"
790 [(set (match_operand:SI 0 "register_operand" "")
791 (geu:SI (match_dup 1)
792 (match_dup 2)))]
793 "!TARGET_64BIT"
794 "
795 {
796 if (hppa_branch_type != CMP_SI)
797 FAIL;
798 operands[1] = hppa_compare_op0;
799 operands[2] = hppa_compare_op1;
800 }")
801
802 ;; Instruction canonicalization puts immediate operands second, which
803 ;; is the reverse of what we want.
804
805 (define_insn "scc"
806 [(set (match_operand:SI 0 "register_operand" "=r")
807 (match_operator:SI 3 "comparison_operator"
808 [(match_operand:SI 1 "register_operand" "r")
809 (match_operand:SI 2 "arith11_operand" "rI")]))]
810 ""
811 "{com%I2clr|cmp%I2clr},%B3 %2,%1,%0\;ldi 1,%0"
812 [(set_attr "type" "binary")
813 (set_attr "length" "8")])
814
815 (define_insn ""
816 [(set (match_operand:DI 0 "register_operand" "=r")
817 (match_operator:DI 3 "comparison_operator"
818 [(match_operand:DI 1 "register_operand" "r")
819 (match_operand:DI 2 "arith11_operand" "rI")]))]
820 "TARGET_64BIT"
821 "cmp%I2clr,*%B3 %2,%1,%0\;ldi 1,%0"
822 [(set_attr "type" "binary")
823 (set_attr "length" "8")])
824
825 (define_insn "iorscc"
826 [(set (match_operand:SI 0 "register_operand" "=r")
827 (ior:SI (match_operator:SI 3 "comparison_operator"
828 [(match_operand:SI 1 "register_operand" "r")
829 (match_operand:SI 2 "arith11_operand" "rI")])
830 (match_operator:SI 6 "comparison_operator"
831 [(match_operand:SI 4 "register_operand" "r")
832 (match_operand:SI 5 "arith11_operand" "rI")])))]
833 ""
834 "{com%I2clr|cmp%I2clr},%S3 %2,%1,%%r0\;{com%I5clr|cmp%I5clr},%B6 %5,%4,%0\;ldi 1,%0"
835 [(set_attr "type" "binary")
836 (set_attr "length" "12")])
837
838 (define_insn ""
839 [(set (match_operand:DI 0 "register_operand" "=r")
840 (ior:DI (match_operator:DI 3 "comparison_operator"
841 [(match_operand:DI 1 "register_operand" "r")
842 (match_operand:DI 2 "arith11_operand" "rI")])
843 (match_operator:DI 6 "comparison_operator"
844 [(match_operand:DI 4 "register_operand" "r")
845 (match_operand:DI 5 "arith11_operand" "rI")])))]
846 "TARGET_64BIT"
847 "cmp%I2clr,*%S3 %2,%1,%%r0\;cmp%I5clr,*%B6 %5,%4,%0\;ldi 1,%0"
848 [(set_attr "type" "binary")
849 (set_attr "length" "12")])
850
851 ;; Combiner patterns for common operations performed with the output
852 ;; from an scc insn (negscc and incscc).
853 (define_insn "negscc"
854 [(set (match_operand:SI 0 "register_operand" "=r")
855 (neg:SI (match_operator:SI 3 "comparison_operator"
856 [(match_operand:SI 1 "register_operand" "r")
857 (match_operand:SI 2 "arith11_operand" "rI")])))]
858 ""
859 "{com%I2clr|cmp%I2clr},%B3 %2,%1,%0\;ldi -1,%0"
860 [(set_attr "type" "binary")
861 (set_attr "length" "8")])
862
863 (define_insn ""
864 [(set (match_operand:DI 0 "register_operand" "=r")
865 (neg:DI (match_operator:DI 3 "comparison_operator"
866 [(match_operand:DI 1 "register_operand" "r")
867 (match_operand:DI 2 "arith11_operand" "rI")])))]
868 "TARGET_64BIT"
869 "cmp%I2clr,*%B3 %2,%1,%0\;ldi -1,%0"
870 [(set_attr "type" "binary")
871 (set_attr "length" "8")])
872
873 ;; Patterns for adding/subtracting the result of a boolean expression from
874 ;; a register. First we have special patterns that make use of the carry
875 ;; bit, and output only two instructions. For the cases we can't in
876 ;; general do in two instructions, the incscc pattern at the end outputs
877 ;; two or three instructions.
878
879 (define_insn ""
880 [(set (match_operand:SI 0 "register_operand" "=r")
881 (plus:SI (leu:SI (match_operand:SI 2 "register_operand" "r")
882 (match_operand:SI 3 "arith11_operand" "rI"))
883 (match_operand:SI 1 "register_operand" "r")))]
884 ""
885 "sub%I3 %3,%2,%%r0\;{addc|add,c} %%r0,%1,%0"
886 [(set_attr "type" "binary")
887 (set_attr "length" "8")])
888
889 (define_insn ""
890 [(set (match_operand:DI 0 "register_operand" "=r")
891 (plus:DI (leu:DI (match_operand:DI 2 "register_operand" "r")
892 (match_operand:DI 3 "arith11_operand" "rI"))
893 (match_operand:DI 1 "register_operand" "r")))]
894 "TARGET_64BIT"
895 "sub%I3 %3,%2,%%r0\;add,dc %%r0,%1,%0"
896 [(set_attr "type" "binary")
897 (set_attr "length" "8")])
898
899 ; This need only accept registers for op3, since canonicalization
900 ; replaces geu with gtu when op3 is an integer.
901 (define_insn ""
902 [(set (match_operand:SI 0 "register_operand" "=r")
903 (plus:SI (geu:SI (match_operand:SI 2 "register_operand" "r")
904 (match_operand:SI 3 "register_operand" "r"))
905 (match_operand:SI 1 "register_operand" "r")))]
906 ""
907 "sub %2,%3,%%r0\;{addc|add,c} %%r0,%1,%0"
908 [(set_attr "type" "binary")
909 (set_attr "length" "8")])
910
911 (define_insn ""
912 [(set (match_operand:DI 0 "register_operand" "=r")
913 (plus:DI (geu:DI (match_operand:DI 2 "register_operand" "r")
914 (match_operand:DI 3 "register_operand" "r"))
915 (match_operand:DI 1 "register_operand" "r")))]
916 "TARGET_64BIT"
917 "sub %2,%3,%%r0\;add,dc %%r0,%1,%0"
918 [(set_attr "type" "binary")
919 (set_attr "length" "8")])
920
921 ; Match only integers for op3 here. This is used as canonical form of the
922 ; geu pattern when op3 is an integer. Don't match registers since we can't
923 ; make better code than the general incscc pattern.
924 (define_insn ""
925 [(set (match_operand:SI 0 "register_operand" "=r")
926 (plus:SI (gtu:SI (match_operand:SI 2 "register_operand" "r")
927 (match_operand:SI 3 "int11_operand" "I"))
928 (match_operand:SI 1 "register_operand" "r")))]
929 ""
930 "addi %k3,%2,%%r0\;{addc|add,c} %%r0,%1,%0"
931 [(set_attr "type" "binary")
932 (set_attr "length" "8")])
933
934 (define_insn ""
935 [(set (match_operand:DI 0 "register_operand" "=r")
936 (plus:DI (gtu:DI (match_operand:DI 2 "register_operand" "r")
937 (match_operand:DI 3 "int11_operand" "I"))
938 (match_operand:DI 1 "register_operand" "r")))]
939 "TARGET_64BIT"
940 "addi %k3,%2,%%r0\;add,dc %%r0,%1,%0"
941 [(set_attr "type" "binary")
942 (set_attr "length" "8")])
943
944 (define_insn "incscc"
945 [(set (match_operand:SI 0 "register_operand" "=r,r")
946 (plus:SI (match_operator:SI 4 "comparison_operator"
947 [(match_operand:SI 2 "register_operand" "r,r")
948 (match_operand:SI 3 "arith11_operand" "rI,rI")])
949 (match_operand:SI 1 "register_operand" "0,?r")))]
950 ""
951 "@
952 {com%I3clr|cmp%I3clr},%B4 %3,%2,%%r0\;addi 1,%0,%0
953 {com%I3clr|cmp%I3clr},%B4 %3,%2,%%r0\;addi,tr 1,%1,%0\;copy %1,%0"
954 [(set_attr "type" "binary,binary")
955 (set_attr "length" "8,12")])
956
957 (define_insn ""
958 [(set (match_operand:DI 0 "register_operand" "=r,r")
959 (plus:DI (match_operator:DI 4 "comparison_operator"
960 [(match_operand:DI 2 "register_operand" "r,r")
961 (match_operand:DI 3 "arith11_operand" "rI,rI")])
962 (match_operand:DI 1 "register_operand" "0,?r")))]
963 "TARGET_64BIT"
964 "@
965 cmp%I3clr,*%B4 %3,%2,%%r0\;addi 1,%0,%0
966 cmp%I3clr,*%B4 %3,%2,%%r0\;addi,tr 1,%1,%0\;copy %1,%0"
967 [(set_attr "type" "binary,binary")
968 (set_attr "length" "8,12")])
969
970 (define_insn ""
971 [(set (match_operand:SI 0 "register_operand" "=r")
972 (minus:SI (match_operand:SI 1 "register_operand" "r")
973 (gtu:SI (match_operand:SI 2 "register_operand" "r")
974 (match_operand:SI 3 "arith11_operand" "rI"))))]
975 ""
976 "sub%I3 %3,%2,%%r0\;{subb|sub,b} %1,%%r0,%0"
977 [(set_attr "type" "binary")
978 (set_attr "length" "8")])
979
980 (define_insn ""
981 [(set (match_operand:DI 0 "register_operand" "=r")
982 (minus:DI (match_operand:DI 1 "register_operand" "r")
983 (gtu:DI (match_operand:DI 2 "register_operand" "r")
984 (match_operand:DI 3 "arith11_operand" "rI"))))]
985 "TARGET_64BIT"
986 "sub%I3 %3,%2,%%r0\;sub,db %1,%%r0,%0"
987 [(set_attr "type" "binary")
988 (set_attr "length" "8")])
989
990 (define_insn ""
991 [(set (match_operand:SI 0 "register_operand" "=r")
992 (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
993 (gtu:SI (match_operand:SI 2 "register_operand" "r")
994 (match_operand:SI 3 "arith11_operand" "rI")))
995 (match_operand:SI 4 "register_operand" "r")))]
996 ""
997 "sub%I3 %3,%2,%%r0\;{subb|sub,b} %1,%4,%0"
998 [(set_attr "type" "binary")
999 (set_attr "length" "8")])
1000
1001 (define_insn ""
1002 [(set (match_operand:DI 0 "register_operand" "=r")
1003 (minus:DI (minus:DI (match_operand:DI 1 "register_operand" "r")
1004 (gtu:DI (match_operand:DI 2 "register_operand" "r")
1005 (match_operand:DI 3 "arith11_operand" "rI")))
1006 (match_operand:DI 4 "register_operand" "r")))]
1007 "TARGET_64BIT"
1008 "sub%I3 %3,%2,%%r0\;sub,db %1,%4,%0"
1009 [(set_attr "type" "binary")
1010 (set_attr "length" "8")])
1011
1012 ; This need only accept registers for op3, since canonicalization
1013 ; replaces ltu with leu when op3 is an integer.
1014 (define_insn ""
1015 [(set (match_operand:SI 0 "register_operand" "=r")
1016 (minus:SI (match_operand:SI 1 "register_operand" "r")
1017 (ltu:SI (match_operand:SI 2 "register_operand" "r")
1018 (match_operand:SI 3 "register_operand" "r"))))]
1019 ""
1020 "sub %2,%3,%%r0\;{subb|sub,b} %1,%%r0,%0"
1021 [(set_attr "type" "binary")
1022 (set_attr "length" "8")])
1023
1024 (define_insn ""
1025 [(set (match_operand:DI 0 "register_operand" "=r")
1026 (minus:DI (match_operand:DI 1 "register_operand" "r")
1027 (ltu:DI (match_operand:DI 2 "register_operand" "r")
1028 (match_operand:DI 3 "register_operand" "r"))))]
1029 "TARGET_64BIT"
1030 "sub %2,%3,%%r0\;sub,db %1,%%r0,%0"
1031 [(set_attr "type" "binary")
1032 (set_attr "length" "8")])
1033
1034 (define_insn ""
1035 [(set (match_operand:SI 0 "register_operand" "=r")
1036 (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
1037 (ltu:SI (match_operand:SI 2 "register_operand" "r")
1038 (match_operand:SI 3 "register_operand" "r")))
1039 (match_operand:SI 4 "register_operand" "r")))]
1040 ""
1041 "sub %2,%3,%%r0\;{subb|sub,b} %1,%4,%0"
1042 [(set_attr "type" "binary")
1043 (set_attr "length" "8")])
1044
1045 (define_insn ""
1046 [(set (match_operand:DI 0 "register_operand" "=r")
1047 (minus:DI (minus:DI (match_operand:DI 1 "register_operand" "r")
1048 (ltu:DI (match_operand:DI 2 "register_operand" "r")
1049 (match_operand:DI 3 "register_operand" "r")))
1050 (match_operand:DI 4 "register_operand" "r")))]
1051 "TARGET_64BIT"
1052 "sub %2,%3,%%r0\;sub,db %1,%4,%0"
1053 [(set_attr "type" "binary")
1054 (set_attr "length" "8")])
1055
1056 ; Match only integers for op3 here. This is used as canonical form of the
1057 ; ltu pattern when op3 is an integer. Don't match registers since we can't
1058 ; make better code than the general incscc pattern.
1059 (define_insn ""
1060 [(set (match_operand:SI 0 "register_operand" "=r")
1061 (minus:SI (match_operand:SI 1 "register_operand" "r")
1062 (leu:SI (match_operand:SI 2 "register_operand" "r")
1063 (match_operand:SI 3 "int11_operand" "I"))))]
1064 ""
1065 "addi %k3,%2,%%r0\;{subb|sub,b} %1,%%r0,%0"
1066 [(set_attr "type" "binary")
1067 (set_attr "length" "8")])
1068
1069 (define_insn ""
1070 [(set (match_operand:DI 0 "register_operand" "=r")
1071 (minus:DI (match_operand:DI 1 "register_operand" "r")
1072 (leu:DI (match_operand:DI 2 "register_operand" "r")
1073 (match_operand:DI 3 "int11_operand" "I"))))]
1074 "TARGET_64BIT"
1075 "addi %k3,%2,%%r0\;sub,db %1,%%r0,%0"
1076 [(set_attr "type" "binary")
1077 (set_attr "length" "8")])
1078
1079 (define_insn ""
1080 [(set (match_operand:SI 0 "register_operand" "=r")
1081 (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
1082 (leu:SI (match_operand:SI 2 "register_operand" "r")
1083 (match_operand:SI 3 "int11_operand" "I")))
1084 (match_operand:SI 4 "register_operand" "r")))]
1085 ""
1086 "addi %k3,%2,%%r0\;{subb|sub,b} %1,%4,%0"
1087 [(set_attr "type" "binary")
1088 (set_attr "length" "8")])
1089
1090 (define_insn ""
1091 [(set (match_operand:DI 0 "register_operand" "=r")
1092 (minus:DI (minus:DI (match_operand:DI 1 "register_operand" "r")
1093 (leu:DI (match_operand:DI 2 "register_operand" "r")
1094 (match_operand:DI 3 "int11_operand" "I")))
1095 (match_operand:DI 4 "register_operand" "r")))]
1096 "TARGET_64BIT"
1097 "addi %k3,%2,%%r0\;sub,db %1,%4,%0"
1098 [(set_attr "type" "binary")
1099 (set_attr "length" "8")])
1100
1101 (define_insn "decscc"
1102 [(set (match_operand:SI 0 "register_operand" "=r,r")
1103 (minus:SI (match_operand:SI 1 "register_operand" "0,?r")
1104 (match_operator:SI 4 "comparison_operator"
1105 [(match_operand:SI 2 "register_operand" "r,r")
1106 (match_operand:SI 3 "arith11_operand" "rI,rI")])))]
1107 ""
1108 "@
1109 {com%I3clr|cmp%I3clr},%B4 %3,%2,%%r0\;addi -1,%0,%0
1110 {com%I3clr|cmp%I3clr},%B4 %3,%2,%%r0\;addi,tr -1,%1,%0\;copy %1,%0"
1111 [(set_attr "type" "binary,binary")
1112 (set_attr "length" "8,12")])
1113
1114 (define_insn ""
1115 [(set (match_operand:DI 0 "register_operand" "=r,r")
1116 (minus:DI (match_operand:DI 1 "register_operand" "0,?r")
1117 (match_operator:DI 4 "comparison_operator"
1118 [(match_operand:DI 2 "register_operand" "r,r")
1119 (match_operand:DI 3 "arith11_operand" "rI,rI")])))]
1120 "TARGET_64BIT"
1121 "@
1122 cmp%I3clr,*%B4 %3,%2,%%r0\;addi -1,%0,%0
1123 cmp%I3clr,*%B4 %3,%2,%%r0\;addi,tr -1,%1,%0\;copy %1,%0"
1124 [(set_attr "type" "binary,binary")
1125 (set_attr "length" "8,12")])
1126
1127 ; Patterns for max and min. (There is no need for an earlyclobber in the
1128 ; last alternative since the middle alternative will match if op0 == op1.)
1129
1130 (define_insn "sminsi3"
1131 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1132 (smin:SI (match_operand:SI 1 "register_operand" "%0,0,r")
1133 (match_operand:SI 2 "arith11_operand" "r,I,M")))]
1134 ""
1135 "@
1136 {comclr|cmpclr},> %2,%0,%%r0\;copy %2,%0
1137 {comiclr|cmpiclr},> %2,%0,%%r0\;ldi %2,%0
1138 {comclr|cmpclr},> %1,%r2,%0\;copy %1,%0"
1139 [(set_attr "type" "multi,multi,multi")
1140 (set_attr "length" "8,8,8")])
1141
1142 (define_insn "smindi3"
1143 [(set (match_operand:DI 0 "register_operand" "=r,r,r")
1144 (smin:DI (match_operand:DI 1 "register_operand" "%0,0,r")
1145 (match_operand:DI 2 "arith11_operand" "r,I,M")))]
1146 "TARGET_64BIT"
1147 "@
1148 cmpclr,*> %2,%0,%%r0\;copy %2,%0
1149 cmpiclr,*> %2,%0,%%r0\;ldi %2,%0
1150 cmpclr,*> %1,%r2,%0\;copy %1,%0"
1151 [(set_attr "type" "multi,multi,multi")
1152 (set_attr "length" "8,8,8")])
1153
1154 (define_insn "uminsi3"
1155 [(set (match_operand:SI 0 "register_operand" "=r,r")
1156 (umin:SI (match_operand:SI 1 "register_operand" "%0,0")
1157 (match_operand:SI 2 "arith11_operand" "r,I")))]
1158 ""
1159 "@
1160 {comclr|cmpclr},>> %2,%0,%%r0\;copy %2,%0
1161 {comiclr|cmpiclr},>> %2,%0,%%r0\;ldi %2,%0"
1162 [(set_attr "type" "multi,multi")
1163 (set_attr "length" "8,8")])
1164
1165 (define_insn "umindi3"
1166 [(set (match_operand:DI 0 "register_operand" "=r,r")
1167 (umin:DI (match_operand:DI 1 "register_operand" "%0,0")
1168 (match_operand:DI 2 "arith11_operand" "r,I")))]
1169 "TARGET_64BIT"
1170 "@
1171 cmpclr,*>> %2,%0,%%r0\;copy %2,%0
1172 cmpiclr,*>> %2,%0,%%r0\;ldi %2,%0"
1173 [(set_attr "type" "multi,multi")
1174 (set_attr "length" "8,8")])
1175
1176 (define_insn "smaxsi3"
1177 [(set (match_operand:SI 0 "register_operand" "=r,r,r")
1178 (smax:SI (match_operand:SI 1 "register_operand" "%0,0,r")
1179 (match_operand:SI 2 "arith11_operand" "r,I,M")))]
1180 ""
1181 "@
1182 {comclr|cmpclr},< %2,%0,%%r0\;copy %2,%0
1183 {comiclr|cmpiclr},< %2,%0,%%r0\;ldi %2,%0
1184 {comclr|cmpclr},< %1,%r2,%0\;copy %1,%0"
1185 [(set_attr "type" "multi,multi,multi")
1186 (set_attr "length" "8,8,8")])
1187
1188 (define_insn "smaxdi3"
1189 [(set (match_operand:DI 0 "register_operand" "=r,r,r")
1190 (smax:DI (match_operand:DI 1 "register_operand" "%0,0,r")
1191 (match_operand:DI 2 "arith11_operand" "r,I,M")))]
1192 "TARGET_64BIT"
1193 "@
1194 cmpclr,*< %2,%0,%%r0\;copy %2,%0
1195 cmpiclr,*< %2,%0,%%r0\;ldi %2,%0
1196 cmpclr,*< %1,%r2,%0\;copy %1,%0"
1197 [(set_attr "type" "multi,multi,multi")
1198 (set_attr "length" "8,8,8")])
1199
1200 (define_insn "umaxsi3"
1201 [(set (match_operand:SI 0 "register_operand" "=r,r")
1202 (umax:SI (match_operand:SI 1 "register_operand" "%0,0")
1203 (match_operand:SI 2 "arith11_operand" "r,I")))]
1204 ""
1205 "@
1206 {comclr|cmpclr},<< %2,%0,%%r0\;copy %2,%0
1207 {comiclr|cmpiclr},<< %2,%0,%%r0\;ldi %2,%0"
1208 [(set_attr "type" "multi,multi")
1209 (set_attr "length" "8,8")])
1210
1211 (define_insn "umaxdi3"
1212 [(set (match_operand:DI 0 "register_operand" "=r,r")
1213 (umax:DI (match_operand:DI 1 "register_operand" "%0,0")
1214 (match_operand:DI 2 "arith11_operand" "r,I")))]
1215 "TARGET_64BIT"
1216 "@
1217 cmpclr,*<< %2,%0,%%r0\;copy %2,%0
1218 cmpiclr,*<< %2,%0,%%r0\;ldi %2,%0"
1219 [(set_attr "type" "multi,multi")
1220 (set_attr "length" "8,8")])
1221
1222 (define_insn "abssi2"
1223 [(set (match_operand:SI 0 "register_operand" "=r")
1224 (abs:SI (match_operand:SI 1 "register_operand" "r")))]
1225 ""
1226 "or,>= %%r0,%1,%0\;subi 0,%0,%0"
1227 [(set_attr "type" "multi")
1228 (set_attr "length" "8")])
1229
1230 (define_insn "absdi2"
1231 [(set (match_operand:DI 0 "register_operand" "=r")
1232 (abs:DI (match_operand:DI 1 "register_operand" "r")))]
1233 "TARGET_64BIT"
1234 "or,*>= %%r0,%1,%0\;subi 0,%0,%0"
1235 [(set_attr "type" "multi")
1236 (set_attr "length" "8")])
1237
1238 ;;; Experimental conditional move patterns
1239
1240 (define_expand "movsicc"
1241 [(set (match_operand:SI 0 "register_operand" "")
1242 (if_then_else:SI
1243 (match_operator 1 "comparison_operator"
1244 [(match_dup 4)
1245 (match_dup 5)])
1246 (match_operand:SI 2 "reg_or_cint_move_operand" "")
1247 (match_operand:SI 3 "reg_or_cint_move_operand" "")))]
1248 ""
1249 "
1250 {
1251 enum rtx_code code = GET_CODE (operands[1]);
1252
1253 if (hppa_branch_type != CMP_SI)
1254 FAIL;
1255
1256 if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
1257 || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
1258 FAIL;
1259
1260 /* operands[1] is currently the result of compare_from_rtx. We want to
1261 emit a compare of the original operands. */
1262 operands[1] = gen_rtx_fmt_ee (code, SImode, hppa_compare_op0, hppa_compare_op1);
1263 operands[4] = hppa_compare_op0;
1264 operands[5] = hppa_compare_op1;
1265 }")
1266
1267 ;; We used to accept any register for op1.
1268 ;;
1269 ;; However, it loses sometimes because the compiler will end up using
1270 ;; different registers for op0 and op1 in some critical cases. local-alloc
1271 ;; will not tie op0 and op1 because op0 is used in multiple basic blocks.
1272 ;;
1273 ;; If/when global register allocation supports tying we should allow any
1274 ;; register for op1 again.
1275 (define_insn ""
1276 [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
1277 (if_then_else:SI
1278 (match_operator 2 "comparison_operator"
1279 [(match_operand:SI 3 "register_operand" "r,r,r,r")
1280 (match_operand:SI 4 "arith11_operand" "rI,rI,rI,rI")])
1281 (match_operand:SI 1 "reg_or_cint_move_operand" "0,J,N,K")
1282 (const_int 0)))]
1283 ""
1284 "@
1285 {com%I4clr|cmp%I4clr},%S2 %4,%3,%%r0\;ldi 0,%0
1286 {com%I4clr|cmp%I4clr},%B2 %4,%3,%0\;ldi %1,%0
1287 {com%I4clr|cmp%I4clr},%B2 %4,%3,%0\;ldil L'%1,%0
1288 {com%I4clr|cmp%I4clr},%B2 %4,%3,%0\;{zdepi|depwi,z} %Z1,%0"
1289 [(set_attr "type" "multi,multi,multi,nullshift")
1290 (set_attr "length" "8,8,8,8")])
1291
1292 (define_insn ""
1293 [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r")
1294 (if_then_else:SI
1295 (match_operator 5 "comparison_operator"
1296 [(match_operand:SI 3 "register_operand" "r,r,r,r,r,r,r,r")
1297 (match_operand:SI 4 "arith11_operand" "rI,rI,rI,rI,rI,rI,rI,rI")])
1298 (match_operand:SI 1 "reg_or_cint_move_operand" "0,0,0,0,r,J,N,K")
1299 (match_operand:SI 2 "reg_or_cint_move_operand" "r,J,N,K,0,0,0,0")))]
1300 ""
1301 "@
1302 {com%I4clr|cmp%I4clr},%S5 %4,%3,%%r0\;copy %2,%0
1303 {com%I4clr|cmp%I4clr},%S5 %4,%3,%%r0\;ldi %2,%0
1304 {com%I4clr|cmp%I4clr},%S5 %4,%3,%%r0\;ldil L'%2,%0
1305 {com%I4clr|cmp%I4clr},%S5 %4,%3,%%r0\;{zdepi|depwi,z} %Z2,%0
1306 {com%I4clr|cmp%I4clr},%B5 %4,%3,%%r0\;copy %1,%0
1307 {com%I4clr|cmp%I4clr},%B5 %4,%3,%%r0\;ldi %1,%0
1308 {com%I4clr|cmp%I4clr},%B5 %4,%3,%%r0\;ldil L'%1,%0
1309 {com%I4clr|cmp%I4clr},%B5 %4,%3,%%r0\;{zdepi|depwi,z} %Z1,%0"
1310 [(set_attr "type" "multi,multi,multi,nullshift,multi,multi,multi,nullshift")
1311 (set_attr "length" "8,8,8,8,8,8,8,8")])
1312
1313 (define_expand "movdicc"
1314 [(set (match_operand:DI 0 "register_operand" "")
1315 (if_then_else:DI
1316 (match_operator 1 "comparison_operator"
1317 [(match_dup 4)
1318 (match_dup 5)])
1319 (match_operand:DI 2 "reg_or_cint_move_operand" "")
1320 (match_operand:DI 3 "reg_or_cint_move_operand" "")))]
1321 "TARGET_64BIT"
1322 "
1323 {
1324 enum rtx_code code = GET_CODE (operands[1]);
1325
1326 if (hppa_branch_type != CMP_SI)
1327 FAIL;
1328
1329 if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
1330 || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
1331 FAIL;
1332
1333 /* operands[1] is currently the result of compare_from_rtx. We want to
1334 emit a compare of the original operands. */
1335 operands[1] = gen_rtx_fmt_ee (code, DImode, hppa_compare_op0, hppa_compare_op1);
1336 operands[4] = hppa_compare_op0;
1337 operands[5] = hppa_compare_op1;
1338 }")
1339
1340 ; We need the first constraint alternative in order to avoid
1341 ; earlyclobbers on all other alternatives.
1342 (define_insn ""
1343 [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r")
1344 (if_then_else:DI
1345 (match_operator 2 "comparison_operator"
1346 [(match_operand:DI 3 "register_operand" "r,r,r,r,r")
1347 (match_operand:DI 4 "arith11_operand" "rI,rI,rI,rI,rI")])
1348 (match_operand:DI 1 "reg_or_cint_move_operand" "0,r,J,N,K")
1349 (const_int 0)))]
1350 "TARGET_64BIT"
1351 "@
1352 cmp%I4clr,*%S2 %4,%3,%%r0\;ldi 0,%0
1353 cmp%I4clr,*%B2 %4,%3,%0\;copy %1,%0
1354 cmp%I4clr,*%B2 %4,%3,%0\;ldi %1,%0
1355 cmp%I4clr,*%B2 %4,%3,%0\;ldil L'%1,%0
1356 cmp%I4clr,*%B2 %4,%3,%0\;depdi,z %z1,%0"
1357 [(set_attr "type" "multi,multi,multi,multi,nullshift")
1358 (set_attr "length" "8,8,8,8,8")])
1359
1360 (define_insn ""
1361 [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r,r,r")
1362 (if_then_else:DI
1363 (match_operator 5 "comparison_operator"
1364 [(match_operand:DI 3 "register_operand" "r,r,r,r,r,r,r,r")
1365 (match_operand:DI 4 "arith11_operand" "rI,rI,rI,rI,rI,rI,rI,rI")])
1366 (match_operand:DI 1 "reg_or_cint_move_operand" "0,0,0,0,r,J,N,K")
1367 (match_operand:DI 2 "reg_or_cint_move_operand" "r,J,N,K,0,0,0,0")))]
1368 "TARGET_64BIT"
1369 "@
1370 cmp%I4clr,*%S5 %4,%3,%%r0\;copy %2,%0
1371 cmp%I4clr,*%S5 %4,%3,%%r0\;ldi %2,%0
1372 cmp%I4clr,*%S5 %4,%3,%%r0\;ldil L'%2,%0
1373 cmp%I4clr,*%S5 %4,%3,%%r0\;depdi,z %z2,%0
1374 cmp%I4clr,*%B5 %4,%3,%%r0\;copy %1,%0
1375 cmp%I4clr,*%B5 %4,%3,%%r0\;ldi %1,%0
1376 cmp%I4clr,*%B5 %4,%3,%%r0\;ldil L'%1,%0
1377 cmp%I4clr,*%B5 %4,%3,%%r0\;depdi,z %z1,%0"
1378 [(set_attr "type" "multi,multi,multi,nullshift,multi,multi,multi,nullshift")
1379 (set_attr "length" "8,8,8,8,8,8,8,8")])
1380
1381 ;; Conditional Branches
1382
1383 (define_expand "beq"
1384 [(set (pc)
1385 (if_then_else (eq (match_dup 1) (match_dup 2))
1386 (label_ref (match_operand 0 "" ""))
1387 (pc)))]
1388 ""
1389 "
1390 {
1391 if (hppa_branch_type != CMP_SI)
1392 {
1393 emit_insn (gen_cmp_fp (EQ, hppa_compare_op0, hppa_compare_op1));
1394 emit_bcond_fp (NE, operands[0]);
1395 DONE;
1396 }
1397 /* set up operands from compare. */
1398 operands[1] = hppa_compare_op0;
1399 operands[2] = hppa_compare_op1;
1400 /* fall through and generate default code */
1401 }")
1402
1403 (define_expand "bne"
1404 [(set (pc)
1405 (if_then_else (ne (match_dup 1) (match_dup 2))
1406 (label_ref (match_operand 0 "" ""))
1407 (pc)))]
1408 ""
1409 "
1410 {
1411 if (hppa_branch_type != CMP_SI)
1412 {
1413 emit_insn (gen_cmp_fp (NE, hppa_compare_op0, hppa_compare_op1));
1414 emit_bcond_fp (NE, operands[0]);
1415 DONE;
1416 }
1417 operands[1] = hppa_compare_op0;
1418 operands[2] = hppa_compare_op1;
1419 }")
1420
1421 (define_expand "bgt"
1422 [(set (pc)
1423 (if_then_else (gt (match_dup 1) (match_dup 2))
1424 (label_ref (match_operand 0 "" ""))
1425 (pc)))]
1426 ""
1427 "
1428 {
1429 if (hppa_branch_type != CMP_SI)
1430 {
1431 emit_insn (gen_cmp_fp (GT, hppa_compare_op0, hppa_compare_op1));
1432 emit_bcond_fp (NE, operands[0]);
1433 DONE;
1434 }
1435 operands[1] = hppa_compare_op0;
1436 operands[2] = hppa_compare_op1;
1437 }")
1438
1439 (define_expand "blt"
1440 [(set (pc)
1441 (if_then_else (lt (match_dup 1) (match_dup 2))
1442 (label_ref (match_operand 0 "" ""))
1443 (pc)))]
1444 ""
1445 "
1446 {
1447 if (hppa_branch_type != CMP_SI)
1448 {
1449 emit_insn (gen_cmp_fp (LT, hppa_compare_op0, hppa_compare_op1));
1450 emit_bcond_fp (NE, operands[0]);
1451 DONE;
1452 }
1453 operands[1] = hppa_compare_op0;
1454 operands[2] = hppa_compare_op1;
1455 }")
1456
1457 (define_expand "bge"
1458 [(set (pc)
1459 (if_then_else (ge (match_dup 1) (match_dup 2))
1460 (label_ref (match_operand 0 "" ""))
1461 (pc)))]
1462 ""
1463 "
1464 {
1465 if (hppa_branch_type != CMP_SI)
1466 {
1467 emit_insn (gen_cmp_fp (GE, hppa_compare_op0, hppa_compare_op1));
1468 emit_bcond_fp (NE, operands[0]);
1469 DONE;
1470 }
1471 operands[1] = hppa_compare_op0;
1472 operands[2] = hppa_compare_op1;
1473 }")
1474
1475 (define_expand "ble"
1476 [(set (pc)
1477 (if_then_else (le (match_dup 1) (match_dup 2))
1478 (label_ref (match_operand 0 "" ""))
1479 (pc)))]
1480 ""
1481 "
1482 {
1483 if (hppa_branch_type != CMP_SI)
1484 {
1485 emit_insn (gen_cmp_fp (LE, hppa_compare_op0, hppa_compare_op1));
1486 emit_bcond_fp (NE, operands[0]);
1487 DONE;
1488 }
1489 operands[1] = hppa_compare_op0;
1490 operands[2] = hppa_compare_op1;
1491 }")
1492
1493 (define_expand "bgtu"
1494 [(set (pc)
1495 (if_then_else (gtu (match_dup 1) (match_dup 2))
1496 (label_ref (match_operand 0 "" ""))
1497 (pc)))]
1498 ""
1499 "
1500 {
1501 if (hppa_branch_type != CMP_SI)
1502 FAIL;
1503 operands[1] = hppa_compare_op0;
1504 operands[2] = hppa_compare_op1;
1505 }")
1506
1507 (define_expand "bltu"
1508 [(set (pc)
1509 (if_then_else (ltu (match_dup 1) (match_dup 2))
1510 (label_ref (match_operand 0 "" ""))
1511 (pc)))]
1512 ""
1513 "
1514 {
1515 if (hppa_branch_type != CMP_SI)
1516 FAIL;
1517 operands[1] = hppa_compare_op0;
1518 operands[2] = hppa_compare_op1;
1519 }")
1520
1521 (define_expand "bgeu"
1522 [(set (pc)
1523 (if_then_else (geu (match_dup 1) (match_dup 2))
1524 (label_ref (match_operand 0 "" ""))
1525 (pc)))]
1526 ""
1527 "
1528 {
1529 if (hppa_branch_type != CMP_SI)
1530 FAIL;
1531 operands[1] = hppa_compare_op0;
1532 operands[2] = hppa_compare_op1;
1533 }")
1534
1535 (define_expand "bleu"
1536 [(set (pc)
1537 (if_then_else (leu (match_dup 1) (match_dup 2))
1538 (label_ref (match_operand 0 "" ""))
1539 (pc)))]
1540 ""
1541 "
1542 {
1543 if (hppa_branch_type != CMP_SI)
1544 FAIL;
1545 operands[1] = hppa_compare_op0;
1546 operands[2] = hppa_compare_op1;
1547 }")
1548
1549 (define_expand "bltgt"
1550 [(set (pc)
1551 (if_then_else (ltgt (match_dup 1) (match_dup 2))
1552 (label_ref (match_operand 0 "" ""))
1553 (pc)))]
1554 ""
1555 "
1556 {
1557 if (hppa_branch_type == CMP_SI)
1558 FAIL;
1559 emit_insn (gen_cmp_fp (LTGT, hppa_compare_op0, hppa_compare_op1));
1560 emit_bcond_fp (NE, operands[0]);
1561 DONE;
1562 }")
1563
1564 (define_expand "bunle"
1565 [(set (pc)
1566 (if_then_else (unle (match_dup 1) (match_dup 2))
1567 (label_ref (match_operand 0 "" ""))
1568 (pc)))]
1569 ""
1570 "
1571 {
1572 if (hppa_branch_type == CMP_SI)
1573 FAIL;
1574 emit_insn (gen_cmp_fp (UNLE, hppa_compare_op0, hppa_compare_op1));
1575 emit_bcond_fp (NE, operands[0]);
1576 DONE;
1577 }")
1578
1579 (define_expand "bunlt"
1580 [(set (pc)
1581 (if_then_else (unlt (match_dup 1) (match_dup 2))
1582 (label_ref (match_operand 0 "" ""))
1583 (pc)))]
1584 ""
1585 "
1586 {
1587 if (hppa_branch_type == CMP_SI)
1588 FAIL;
1589 emit_insn (gen_cmp_fp (UNLT, hppa_compare_op0, hppa_compare_op1));
1590 emit_bcond_fp (NE, operands[0]);
1591 DONE;
1592 }")
1593
1594 (define_expand "bunge"
1595 [(set (pc)
1596 (if_then_else (unge (match_dup 1) (match_dup 2))
1597 (label_ref (match_operand 0 "" ""))
1598 (pc)))]
1599 ""
1600 "
1601 {
1602 if (hppa_branch_type == CMP_SI)
1603 FAIL;
1604 emit_insn (gen_cmp_fp (UNGE, hppa_compare_op0, hppa_compare_op1));
1605 emit_bcond_fp (NE, operands[0]);
1606 DONE;
1607 }")
1608
1609 (define_expand "bungt"
1610 [(set (pc)
1611 (if_then_else (ungt (match_dup 1) (match_dup 2))
1612 (label_ref (match_operand 0 "" ""))
1613 (pc)))]
1614 ""
1615 "
1616 {
1617 if (hppa_branch_type == CMP_SI)
1618 FAIL;
1619 emit_insn (gen_cmp_fp (UNGT, hppa_compare_op0, hppa_compare_op1));
1620 emit_bcond_fp (NE, operands[0]);
1621 DONE;
1622 }")
1623
1624 (define_expand "buneq"
1625 [(set (pc)
1626 (if_then_else (uneq (match_dup 1) (match_dup 2))
1627 (label_ref (match_operand 0 "" ""))
1628 (pc)))]
1629 ""
1630 "
1631 {
1632 if (hppa_branch_type == CMP_SI)
1633 FAIL;
1634 emit_insn (gen_cmp_fp (UNEQ, hppa_compare_op0, hppa_compare_op1));
1635 emit_bcond_fp (NE, operands[0]);
1636 DONE;
1637 }")
1638
1639 (define_expand "bunordered"
1640 [(set (pc)
1641 (if_then_else (unordered (match_dup 1) (match_dup 2))
1642 (label_ref (match_operand 0 "" ""))
1643 (pc)))]
1644 ""
1645 "
1646 {
1647 if (hppa_branch_type == CMP_SI)
1648 FAIL;
1649 emit_insn (gen_cmp_fp (UNORDERED, hppa_compare_op0, hppa_compare_op1));
1650 emit_bcond_fp (NE, operands[0]);
1651 DONE;
1652 }")
1653
1654 (define_expand "bordered"
1655 [(set (pc)
1656 (if_then_else (ordered (match_dup 1) (match_dup 2))
1657 (label_ref (match_operand 0 "" ""))
1658 (pc)))]
1659 ""
1660 "
1661 {
1662 if (hppa_branch_type == CMP_SI)
1663 FAIL;
1664 emit_insn (gen_cmp_fp (ORDERED, hppa_compare_op0, hppa_compare_op1));
1665 emit_bcond_fp (NE, operands[0]);
1666 DONE;
1667 }")
1668
1669 ;; Match the branch patterns.
1670
1671
1672 ;; Note a long backward conditional branch with an annulled delay slot
1673 ;; has a length of 12.
1674 (define_insn ""
1675 [(set (pc)
1676 (if_then_else
1677 (match_operator 3 "comparison_operator"
1678 [(match_operand:SI 1 "reg_or_0_operand" "rM")
1679 (match_operand:SI 2 "arith5_operand" "rL")])
1680 (label_ref (match_operand 0 "" ""))
1681 (pc)))]
1682 ""
1683 "*
1684 {
1685 return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
1686 get_attr_length (insn), 0, insn);
1687 }"
1688 [(set_attr "type" "cbranch")
1689 (set (attr "length")
1690 (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1691 (const_int 8184))
1692 (const_int 4)
1693 (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1694 (const_int 262100))
1695 (const_int 8)
1696 (eq (symbol_ref "flag_pic") (const_int 0))
1697 (const_int 20)]
1698 (const_int 28)))])
1699
1700 ;; Match the negated branch.
1701
1702 (define_insn ""
1703 [(set (pc)
1704 (if_then_else
1705 (match_operator 3 "comparison_operator"
1706 [(match_operand:SI 1 "reg_or_0_operand" "rM")
1707 (match_operand:SI 2 "arith5_operand" "rL")])
1708 (pc)
1709 (label_ref (match_operand 0 "" ""))))]
1710 ""
1711 "*
1712 {
1713 return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
1714 get_attr_length (insn), 1, insn);
1715 }"
1716 [(set_attr "type" "cbranch")
1717 (set (attr "length")
1718 (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1719 (const_int 8184))
1720 (const_int 4)
1721 (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1722 (const_int 262100))
1723 (const_int 8)
1724 (eq (symbol_ref "flag_pic") (const_int 0))
1725 (const_int 20)]
1726 (const_int 28)))])
1727
1728 (define_insn ""
1729 [(set (pc)
1730 (if_then_else
1731 (match_operator 3 "comparison_operator"
1732 [(match_operand:DI 1 "reg_or_0_operand" "rM")
1733 (match_operand:DI 2 "reg_or_0_operand" "rM")])
1734 (label_ref (match_operand 0 "" ""))
1735 (pc)))]
1736 "TARGET_64BIT"
1737 "*
1738 {
1739 return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
1740 get_attr_length (insn), 0, insn);
1741 }"
1742 [(set_attr "type" "cbranch")
1743 (set (attr "length")
1744 (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1745 (const_int 8184))
1746 (const_int 4)
1747 (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1748 (const_int 262100))
1749 (const_int 8)
1750 (eq (symbol_ref "flag_pic") (const_int 0))
1751 (const_int 20)]
1752 (const_int 28)))])
1753
1754 ;; Match the negated branch.
1755
1756 (define_insn ""
1757 [(set (pc)
1758 (if_then_else
1759 (match_operator 3 "comparison_operator"
1760 [(match_operand:DI 1 "reg_or_0_operand" "rM")
1761 (match_operand:DI 2 "reg_or_0_operand" "rM")])
1762 (pc)
1763 (label_ref (match_operand 0 "" ""))))]
1764 "TARGET_64BIT"
1765 "*
1766 {
1767 return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
1768 get_attr_length (insn), 1, insn);
1769 }"
1770 [(set_attr "type" "cbranch")
1771 (set (attr "length")
1772 (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1773 (const_int 8184))
1774 (const_int 4)
1775 (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1776 (const_int 262100))
1777 (const_int 8)
1778 (eq (symbol_ref "flag_pic") (const_int 0))
1779 (const_int 20)]
1780 (const_int 28)))])
1781 (define_insn ""
1782 [(set (pc)
1783 (if_then_else
1784 (match_operator 3 "cmpib_comparison_operator"
1785 [(match_operand:DI 1 "reg_or_0_operand" "rM")
1786 (match_operand:DI 2 "arith5_operand" "rL")])
1787 (label_ref (match_operand 0 "" ""))
1788 (pc)))]
1789 "TARGET_64BIT"
1790 "*
1791 {
1792 return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
1793 get_attr_length (insn), 0, insn);
1794 }"
1795 [(set_attr "type" "cbranch")
1796 (set (attr "length")
1797 (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1798 (const_int 8184))
1799 (const_int 4)
1800 (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1801 (const_int 262100))
1802 (const_int 8)
1803 (eq (symbol_ref "flag_pic") (const_int 0))
1804 (const_int 20)]
1805 (const_int 28)))])
1806
1807 ;; Match the negated branch.
1808
1809 (define_insn ""
1810 [(set (pc)
1811 (if_then_else
1812 (match_operator 3 "cmpib_comparison_operator"
1813 [(match_operand:DI 1 "reg_or_0_operand" "rM")
1814 (match_operand:DI 2 "arith5_operand" "rL")])
1815 (pc)
1816 (label_ref (match_operand 0 "" ""))))]
1817 "TARGET_64BIT"
1818 "*
1819 {
1820 return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
1821 get_attr_length (insn), 1, insn);
1822 }"
1823 [(set_attr "type" "cbranch")
1824 (set (attr "length")
1825 (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1826 (const_int 8184))
1827 (const_int 4)
1828 (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
1829 (const_int 262100))
1830 (const_int 8)
1831 (eq (symbol_ref "flag_pic") (const_int 0))
1832 (const_int 20)]
1833 (const_int 28)))])
1834
1835 ;; Branch on Bit patterns.
1836 (define_insn ""
1837 [(set (pc)
1838 (if_then_else
1839 (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
1840 (const_int 1)
1841 (match_operand:SI 1 "uint5_operand" ""))
1842 (const_int 0))
1843 (label_ref (match_operand 2 "" ""))
1844 (pc)))]
1845 ""
1846 "*
1847 {
1848 return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
1849 get_attr_length (insn), 0, insn, 0);
1850 }"
1851 [(set_attr "type" "cbranch")
1852 (set (attr "length")
1853 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
1854 (const_int 8184))
1855 (const_int 4)
1856 (const_int 8)))])
1857
1858 (define_insn ""
1859 [(set (pc)
1860 (if_then_else
1861 (ne (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
1862 (const_int 1)
1863 (match_operand:DI 1 "uint32_operand" ""))
1864 (const_int 0))
1865 (label_ref (match_operand 2 "" ""))
1866 (pc)))]
1867 "TARGET_64BIT"
1868 "*
1869 {
1870 return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
1871 get_attr_length (insn), 0, insn, 0);
1872 }"
1873 [(set_attr "type" "cbranch")
1874 (set (attr "length")
1875 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
1876 (const_int 8184))
1877 (const_int 4)
1878 (const_int 8)))])
1879
1880 (define_insn ""
1881 [(set (pc)
1882 (if_then_else
1883 (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
1884 (const_int 1)
1885 (match_operand:SI 1 "uint5_operand" ""))
1886 (const_int 0))
1887 (pc)
1888 (label_ref (match_operand 2 "" ""))))]
1889 ""
1890 "*
1891 {
1892 return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
1893 get_attr_length (insn), 1, insn, 0);
1894 }"
1895 [(set_attr "type" "cbranch")
1896 (set (attr "length")
1897 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
1898 (const_int 8184))
1899 (const_int 4)
1900 (const_int 8)))])
1901
1902 (define_insn ""
1903 [(set (pc)
1904 (if_then_else
1905 (ne (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
1906 (const_int 1)
1907 (match_operand:DI 1 "uint32_operand" ""))
1908 (const_int 0))
1909 (pc)
1910 (label_ref (match_operand 2 "" ""))))]
1911 "TARGET_64BIT"
1912 "*
1913 {
1914 return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
1915 get_attr_length (insn), 1, insn, 0);
1916 }"
1917 [(set_attr "type" "cbranch")
1918 (set (attr "length")
1919 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
1920 (const_int 8184))
1921 (const_int 4)
1922 (const_int 8)))])
1923
1924 (define_insn ""
1925 [(set (pc)
1926 (if_then_else
1927 (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
1928 (const_int 1)
1929 (match_operand:SI 1 "uint5_operand" ""))
1930 (const_int 0))
1931 (label_ref (match_operand 2 "" ""))
1932 (pc)))]
1933 ""
1934 "*
1935 {
1936 return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
1937 get_attr_length (insn), 0, insn, 1);
1938 }"
1939 [(set_attr "type" "cbranch")
1940 (set (attr "length")
1941 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
1942 (const_int 8184))
1943 (const_int 4)
1944 (const_int 8)))])
1945
1946 (define_insn ""
1947 [(set (pc)
1948 (if_then_else
1949 (eq (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
1950 (const_int 1)
1951 (match_operand:DI 1 "uint32_operand" ""))
1952 (const_int 0))
1953 (label_ref (match_operand 2 "" ""))
1954 (pc)))]
1955 "TARGET_64BIT"
1956 "*
1957 {
1958 return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
1959 get_attr_length (insn), 0, insn, 1);
1960 }"
1961 [(set_attr "type" "cbranch")
1962 (set (attr "length")
1963 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
1964 (const_int 8184))
1965 (const_int 4)
1966 (const_int 8)))])
1967
1968 (define_insn ""
1969 [(set (pc)
1970 (if_then_else
1971 (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
1972 (const_int 1)
1973 (match_operand:SI 1 "uint5_operand" ""))
1974 (const_int 0))
1975 (pc)
1976 (label_ref (match_operand 2 "" ""))))]
1977 ""
1978 "*
1979 {
1980 return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
1981 get_attr_length (insn), 1, insn, 1);
1982 }"
1983 [(set_attr "type" "cbranch")
1984 (set (attr "length")
1985 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
1986 (const_int 8184))
1987 (const_int 4)
1988 (const_int 8)))])
1989
1990 (define_insn ""
1991 [(set (pc)
1992 (if_then_else
1993 (eq (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
1994 (const_int 1)
1995 (match_operand:DI 1 "uint32_operand" ""))
1996 (const_int 0))
1997 (pc)
1998 (label_ref (match_operand 2 "" ""))))]
1999 "TARGET_64BIT"
2000 "*
2001 {
2002 return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
2003 get_attr_length (insn), 1, insn, 1);
2004 }"
2005 [(set_attr "type" "cbranch")
2006 (set (attr "length")
2007 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2008 (const_int 8184))
2009 (const_int 4)
2010 (const_int 8)))])
2011
2012 ;; Branch on Variable Bit patterns.
2013 (define_insn ""
2014 [(set (pc)
2015 (if_then_else
2016 (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
2017 (const_int 1)
2018 (match_operand:SI 1 "register_operand" "q"))
2019 (const_int 0))
2020 (label_ref (match_operand 2 "" ""))
2021 (pc)))]
2022 ""
2023 "*
2024 {
2025 return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
2026 get_attr_length (insn), 0, insn, 0);
2027 }"
2028 [(set_attr "type" "cbranch")
2029 (set (attr "length")
2030 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2031 (const_int 8184))
2032 (const_int 4)
2033 (const_int 8)))])
2034
2035 (define_insn ""
2036 [(set (pc)
2037 (if_then_else
2038 (ne (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
2039 (const_int 1)
2040 (match_operand:DI 1 "register_operand" "q"))
2041 (const_int 0))
2042 (label_ref (match_operand 2 "" ""))
2043 (pc)))]
2044 "TARGET_64BIT"
2045 "*
2046 {
2047 return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
2048 get_attr_length (insn), 0, insn, 0);
2049 }"
2050 [(set_attr "type" "cbranch")
2051 (set (attr "length")
2052 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2053 (const_int 8184))
2054 (const_int 4)
2055 (const_int 8)))])
2056
2057 (define_insn ""
2058 [(set (pc)
2059 (if_then_else
2060 (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
2061 (const_int 1)
2062 (match_operand:SI 1 "register_operand" "q"))
2063 (const_int 0))
2064 (pc)
2065 (label_ref (match_operand 2 "" ""))))]
2066 ""
2067 "*
2068 {
2069 return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
2070 get_attr_length (insn), 1, insn, 0);
2071 }"
2072 [(set_attr "type" "cbranch")
2073 (set (attr "length")
2074 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2075 (const_int 8184))
2076 (const_int 4)
2077 (const_int 8)))])
2078
2079 (define_insn ""
2080 [(set (pc)
2081 (if_then_else
2082 (ne (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
2083 (const_int 1)
2084 (match_operand:DI 1 "register_operand" "q"))
2085 (const_int 0))
2086 (pc)
2087 (label_ref (match_operand 2 "" ""))))]
2088 "TARGET_64BIT"
2089 "*
2090 {
2091 return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
2092 get_attr_length (insn), 1, insn, 0);
2093 }"
2094 [(set_attr "type" "cbranch")
2095 (set (attr "length")
2096 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2097 (const_int 8184))
2098 (const_int 4)
2099 (const_int 8)))])
2100
2101 (define_insn ""
2102 [(set (pc)
2103 (if_then_else
2104 (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
2105 (const_int 1)
2106 (match_operand:SI 1 "register_operand" "q"))
2107 (const_int 0))
2108 (label_ref (match_operand 2 "" ""))
2109 (pc)))]
2110 ""
2111 "*
2112 {
2113 return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
2114 get_attr_length (insn), 0, insn, 1);
2115 }"
2116 [(set_attr "type" "cbranch")
2117 (set (attr "length")
2118 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2119 (const_int 8184))
2120 (const_int 4)
2121 (const_int 8)))])
2122
2123 (define_insn ""
2124 [(set (pc)
2125 (if_then_else
2126 (eq (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
2127 (const_int 1)
2128 (match_operand:DI 1 "register_operand" "q"))
2129 (const_int 0))
2130 (label_ref (match_operand 2 "" ""))
2131 (pc)))]
2132 "TARGET_64BIT"
2133 "*
2134 {
2135 return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
2136 get_attr_length (insn), 0, insn, 1);
2137 }"
2138 [(set_attr "type" "cbranch")
2139 (set (attr "length")
2140 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2141 (const_int 8184))
2142 (const_int 4)
2143 (const_int 8)))])
2144
2145 (define_insn ""
2146 [(set (pc)
2147 (if_then_else
2148 (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
2149 (const_int 1)
2150 (match_operand:SI 1 "register_operand" "q"))
2151 (const_int 0))
2152 (pc)
2153 (label_ref (match_operand 2 "" ""))))]
2154 ""
2155 "*
2156 {
2157 return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
2158 get_attr_length (insn), 1, insn, 1);
2159 }"
2160 [(set_attr "type" "cbranch")
2161 (set (attr "length")
2162 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2163 (const_int 8184))
2164 (const_int 4)
2165 (const_int 8)))])
2166
2167 (define_insn ""
2168 [(set (pc)
2169 (if_then_else
2170 (eq (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
2171 (const_int 1)
2172 (match_operand:DI 1 "register_operand" "q"))
2173 (const_int 0))
2174 (pc)
2175 (label_ref (match_operand 2 "" ""))))]
2176 "TARGET_64BIT"
2177 "*
2178 {
2179 return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
2180 get_attr_length (insn), 1, insn, 1);
2181 }"
2182 [(set_attr "type" "cbranch")
2183 (set (attr "length")
2184 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
2185 (const_int 8184))
2186 (const_int 4)
2187 (const_int 8)))])
2188
2189 ;; Floating point branches
2190 (define_insn ""
2191 [(set (pc) (if_then_else (ne (reg:CCFP 0) (const_int 0))
2192 (label_ref (match_operand 0 "" ""))
2193 (pc)))]
2194 "! TARGET_SOFT_FLOAT"
2195 "*
2196 {
2197 if (INSN_ANNULLED_BRANCH_P (insn))
2198 return \"ftest\;b,n %0\";
2199 else
2200 return \"ftest\;b%* %0\";
2201 }"
2202 [(set_attr "type" "fbranch")
2203 (set_attr "length" "8")])
2204
2205 (define_insn ""
2206 [(set (pc) (if_then_else (ne (reg:CCFP 0) (const_int 0))
2207 (pc)
2208 (label_ref (match_operand 0 "" ""))))]
2209 "! TARGET_SOFT_FLOAT"
2210 "*
2211 {
2212 if (INSN_ANNULLED_BRANCH_P (insn))
2213 return \"ftest\;add,tr %%r0,%%r0,%%r0\;b,n %0\";
2214 else
2215 return \"ftest\;add,tr %%r0,%%r0,%%r0\;b%* %0\";
2216 }"
2217 [(set_attr "type" "fbranch")
2218 (set_attr "length" "12")])
2219
2220 ;; Move instructions
2221
2222 (define_expand "movsi"
2223 [(set (match_operand:SI 0 "general_operand" "")
2224 (match_operand:SI 1 "general_operand" ""))]
2225 ""
2226 "
2227 {
2228 if (emit_move_sequence (operands, SImode, 0))
2229 DONE;
2230 }")
2231
2232 ;; Reloading an SImode or DImode value requires a scratch register if
2233 ;; going in to or out of float point registers.
2234
2235 (define_expand "reload_insi"
2236 [(set (match_operand:SI 0 "register_operand" "=Z")
2237 (match_operand:SI 1 "non_hard_reg_operand" ""))
2238 (clobber (match_operand:SI 2 "register_operand" "=&r"))]
2239 ""
2240 "
2241 {
2242 if (emit_move_sequence (operands, SImode, operands[2]))
2243 DONE;
2244
2245 /* We don't want the clobber emitted, so handle this ourselves. */
2246 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
2247 DONE;
2248 }")
2249
2250 (define_expand "reload_outsi"
2251 [(set (match_operand:SI 0 "non_hard_reg_operand" "")
2252 (match_operand:SI 1 "register_operand" "Z"))
2253 (clobber (match_operand:SI 2 "register_operand" "=&r"))]
2254 ""
2255 "
2256 {
2257 if (emit_move_sequence (operands, SImode, operands[2]))
2258 DONE;
2259
2260 /* We don't want the clobber emitted, so handle this ourselves. */
2261 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
2262 DONE;
2263 }")
2264
2265 (define_insn ""
2266 [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand"
2267 "=r,r,r,r,r,r,Q,!*q,!f,f,*TR")
2268 (match_operand:SI 1 "move_operand"
2269 "A,r,J,N,K,RQ,rM,!rM,!fM,*RT,f"))]
2270 "(register_operand (operands[0], SImode)
2271 || reg_or_0_operand (operands[1], SImode))
2272 && ! TARGET_SOFT_FLOAT"
2273 "@
2274 ldw RT'%A1,%0
2275 copy %1,%0
2276 ldi %1,%0
2277 ldil L'%1,%0
2278 {zdepi|depwi,z} %Z1,%0
2279 ldw%M1 %1,%0
2280 stw%M0 %r1,%0
2281 mtsar %r1
2282 fcpy,sgl %f1,%0
2283 fldw%F1 %1,%0
2284 fstw%F0 %1,%0"
2285 [(set_attr "type" "load,move,move,move,shift,load,store,move,fpalu,fpload,fpstore")
2286 (set_attr "pa_combine_type" "addmove")
2287 (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4")])
2288
2289 (define_insn ""
2290 [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand"
2291 "=r,r,r,r,r,r,Q,!*q")
2292 (match_operand:SI 1 "move_operand"
2293 "A,r,J,N,K,RQ,rM,!rM"))]
2294 "(register_operand (operands[0], SImode)
2295 || reg_or_0_operand (operands[1], SImode))
2296 && TARGET_SOFT_FLOAT"
2297 "@
2298 ldw RT'%A1,%0
2299 copy %1,%0
2300 ldi %1,%0
2301 ldil L'%1,%0
2302 {zdepi|depwi,z} %Z1,%0
2303 ldw%M1 %1,%0
2304 stw%M0 %r1,%0
2305 mtsar %r1"
2306 [(set_attr "type" "load,move,move,move,move,load,store,move")
2307 (set_attr "pa_combine_type" "addmove")
2308 (set_attr "length" "4,4,4,4,4,4,4,4")])
2309
2310 (define_insn ""
2311 [(set (match_operand:SI 0 "register_operand" "=r")
2312 (mem:SI (plus:SI (match_operand:SI 1 "basereg_operand" "r")
2313 (match_operand:SI 2 "register_operand" "r"))))]
2314 "! TARGET_DISABLE_INDEXING"
2315 "{ldwx|ldw} %2(%1),%0"
2316 [(set_attr "type" "load")
2317 (set_attr "length" "4")])
2318
2319 (define_insn ""
2320 [(set (match_operand:SI 0 "register_operand" "=r")
2321 (mem:SI (plus:SI (match_operand:SI 1 "register_operand" "r")
2322 (match_operand:SI 2 "basereg_operand" "r"))))]
2323 "! TARGET_DISABLE_INDEXING"
2324 "{ldwx|ldw} %1(%2),%0"
2325 [(set_attr "type" "load")
2326 (set_attr "length" "4")])
2327
2328 ;; Load or store with base-register modification.
2329
2330 (define_expand "pre_load"
2331 [(parallel [(set (match_operand:SI 0 "register_operand" "")
2332 (mem (plus (match_operand 1 "register_operand" "")
2333 (match_operand 2 "pre_cint_operand" ""))))
2334 (set (match_dup 1)
2335 (plus (match_dup 1) (match_dup 2)))])]
2336 ""
2337 "
2338 {
2339 if (TARGET_64BIT)
2340 {
2341 emit_insn (gen_pre_ldd (operands[0], operands[1], operands[2]));
2342 DONE;
2343 }
2344 emit_insn (gen_pre_ldw (operands[0], operands[1], operands[2]));
2345 DONE;
2346 }")
2347
2348 (define_insn "pre_ldw"
2349 [(set (match_operand:SI 0 "register_operand" "=r")
2350 (mem:SI (plus:SI (match_operand:SI 1 "register_operand" "+r")
2351 (match_operand:SI 2 "pre_cint_operand" ""))))
2352 (set (match_dup 1)
2353 (plus:SI (match_dup 1) (match_dup 2)))]
2354 ""
2355 "*
2356 {
2357 if (INTVAL (operands[2]) < 0)
2358 return \"{ldwm|ldw,mb} %2(%1),%0\";
2359 return \"{ldws|ldw},mb %2(%1),%0\";
2360 }"
2361 [(set_attr "type" "load")
2362 (set_attr "length" "4")])
2363
2364 (define_insn "pre_ldd"
2365 [(set (match_operand:DI 0 "register_operand" "=r")
2366 (mem:DI (plus:DI (match_operand:DI 1 "register_operand" "+r")
2367 (match_operand:DI 2 "pre_cint_operand" ""))))
2368 (set (match_dup 1)
2369 (plus:DI (match_dup 1) (match_dup 2)))]
2370 "TARGET_64BIT"
2371 "ldd,mb %2(%1),%0"
2372 [(set_attr "type" "load")
2373 (set_attr "length" "4")])
2374
2375 (define_insn ""
2376 [(set (mem:SI (plus:SI (match_operand:SI 0 "register_operand" "+r")
2377 (match_operand:SI 1 "pre_cint_operand" "")))
2378 (match_operand:SI 2 "reg_or_0_operand" "rM"))
2379 (set (match_dup 0)
2380 (plus:SI (match_dup 0) (match_dup 1)))]
2381 ""
2382 "*
2383 {
2384 if (INTVAL (operands[1]) < 0)
2385 return \"{stwm|stw,mb} %r2,%1(%0)\";
2386 return \"{stws|stw},mb %r2,%1(%0)\";
2387 }"
2388 [(set_attr "type" "store")
2389 (set_attr "length" "4")])
2390
2391 (define_insn ""
2392 [(set (match_operand:SI 0 "register_operand" "=r")
2393 (mem:SI (match_operand:SI 1 "register_operand" "+r")))
2394 (set (match_dup 1)
2395 (plus:SI (match_dup 1)
2396 (match_operand:SI 2 "post_cint_operand" "")))]
2397 ""
2398 "*
2399 {
2400 if (INTVAL (operands[2]) > 0)
2401 return \"{ldwm|ldw,ma} %2(%1),%0\";
2402 return \"{ldws|ldw},ma %2(%1),%0\";
2403 }"
2404 [(set_attr "type" "load")
2405 (set_attr "length" "4")])
2406
2407 (define_expand "post_store"
2408 [(parallel [(set (mem (match_operand 0 "register_operand" ""))
2409 (match_operand 1 "reg_or_0_operand" ""))
2410 (set (match_dup 0)
2411 (plus (match_dup 0)
2412 (match_operand 2 "post_cint_operand" "")))])]
2413 ""
2414 "
2415 {
2416 if (TARGET_64BIT)
2417 {
2418 emit_insn (gen_post_std (operands[0], operands[1], operands[2]));
2419 DONE;
2420 }
2421 emit_insn (gen_post_stw (operands[0], operands[1], operands[2]));
2422 DONE;
2423 }")
2424
2425 (define_insn "post_stw"
2426 [(set (mem:SI (match_operand:SI 0 "register_operand" "+r"))
2427 (match_operand:SI 1 "reg_or_0_operand" "rM"))
2428 (set (match_dup 0)
2429 (plus:SI (match_dup 0)
2430 (match_operand:SI 2 "post_cint_operand" "")))]
2431 ""
2432 "*
2433 {
2434 if (INTVAL (operands[2]) > 0)
2435 return \"{stwm|stw,ma} %r1,%2(%0)\";
2436 return \"{stws|stw},ma %r1,%2(%0)\";
2437 }"
2438 [(set_attr "type" "store")
2439 (set_attr "length" "4")])
2440
2441 (define_insn "post_std"
2442 [(set (mem:DI (match_operand:DI 0 "register_operand" "+r"))
2443 (match_operand:DI 1 "reg_or_0_operand" "rM"))
2444 (set (match_dup 0)
2445 (plus:DI (match_dup 0)
2446 (match_operand:DI 2 "post_cint_operand" "")))]
2447 "TARGET_64BIT"
2448 "std,ma %r1,%2(%0)"
2449 [(set_attr "type" "store")
2450 (set_attr "length" "4")])
2451
2452 ;; For loading the address of a label while generating PIC code.
2453 ;; Note since this pattern can be created at reload time (via movsi), all
2454 ;; the same rules for movsi apply here. (no new pseudos, no temporaries).
2455 (define_insn ""
2456 [(set (match_operand 0 "pmode_register_operand" "=r")
2457 (match_operand 1 "pic_label_operand" ""))]
2458 "TARGET_PA_20"
2459 "*
2460 {
2461 rtx xoperands[3];
2462 extern FILE *asm_out_file;
2463
2464 xoperands[0] = operands[0];
2465 xoperands[1] = operands[1];
2466 xoperands[2] = gen_label_rtx ();
2467
2468 (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
2469 CODE_LABEL_NUMBER (xoperands[2]));
2470 output_asm_insn (\"mfia %0\", xoperands);
2471
2472 /* If we're trying to load the address of a label that happens to be
2473 close, then we can use a shorter sequence. */
2474 if (GET_CODE (operands[1]) == LABEL_REF
2475 && INSN_ADDRESSES_SET_P ()
2476 && abs (INSN_ADDRESSES (INSN_UID (XEXP (operands[1], 0)))
2477 - INSN_ADDRESSES (INSN_UID (insn))) < 8100)
2478 output_asm_insn (\"ldo %1-%2(%0),%0\", xoperands);
2479 else
2480 {
2481 output_asm_insn (\"addil L%%%1-%2,%0\", xoperands);
2482 output_asm_insn (\"ldo R%%%1-%2(%0),%0\", xoperands);
2483 }
2484 return \"\";
2485 }"
2486 [(set_attr "type" "multi")
2487 (set_attr "length" "12")]) ; 8 or 12
2488
2489 (define_insn ""
2490 [(set (match_operand 0 "pmode_register_operand" "=a")
2491 (match_operand 1 "pic_label_operand" ""))]
2492 "!TARGET_PA_20"
2493 "*
2494 {
2495 rtx xoperands[3];
2496 extern FILE *asm_out_file;
2497
2498 xoperands[0] = operands[0];
2499 xoperands[1] = operands[1];
2500 xoperands[2] = gen_label_rtx ();
2501
2502 output_asm_insn (\"bl .+8,%0\", xoperands);
2503 output_asm_insn (\"depi 0,31,2,%0\", xoperands);
2504 (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
2505 CODE_LABEL_NUMBER (xoperands[2]));
2506
2507 /* If we're trying to load the address of a label that happens to be
2508 close, then we can use a shorter sequence. */
2509 if (GET_CODE (operands[1]) == LABEL_REF
2510 && INSN_ADDRESSES_SET_P ()
2511 && abs (INSN_ADDRESSES (INSN_UID (XEXP (operands[1], 0)))
2512 - INSN_ADDRESSES (INSN_UID (insn))) < 8100)
2513 output_asm_insn (\"ldo %1-%2(%0),%0\", xoperands);
2514 else
2515 {
2516 output_asm_insn (\"addil L%%%1-%2,%0\", xoperands);
2517 output_asm_insn (\"ldo R%%%1-%2(%0),%0\", xoperands);
2518 }
2519 return \"\";
2520 }"
2521 [(set_attr "type" "multi")
2522 (set_attr "length" "16")]) ; 12 or 16
2523
2524 (define_insn ""
2525 [(set (match_operand:SI 0 "register_operand" "=a")
2526 (plus:SI (match_operand:SI 1 "register_operand" "r")
2527 (high:SI (match_operand 2 "" ""))))]
2528 "symbolic_operand (operands[2], Pmode)
2529 && ! function_label_operand (operands[2], Pmode)
2530 && flag_pic"
2531 "addil LT'%G2,%1"
2532 [(set_attr "type" "binary")
2533 (set_attr "length" "4")])
2534
2535 (define_insn ""
2536 [(set (match_operand:DI 0 "register_operand" "=a")
2537 (plus:DI (match_operand:DI 1 "register_operand" "r")
2538 (high:DI (match_operand 2 "" ""))))]
2539 "symbolic_operand (operands[2], Pmode)
2540 && ! function_label_operand (operands[2], Pmode)
2541 && TARGET_64BIT
2542 && flag_pic"
2543 "addil LT'%G2,%1"
2544 [(set_attr "type" "binary")
2545 (set_attr "length" "4")])
2546
2547 ;; Always use addil rather than ldil;add sequences. This allows the
2548 ;; HP linker to eliminate the dp relocation if the symbolic operand
2549 ;; lives in the TEXT space.
2550 (define_insn ""
2551 [(set (match_operand:SI 0 "register_operand" "=a")
2552 (high:SI (match_operand 1 "" "")))]
2553 "symbolic_operand (operands[1], Pmode)
2554 && ! function_label_operand (operands[1], Pmode)
2555 && ! read_only_operand (operands[1], Pmode)
2556 && ! flag_pic"
2557 "*
2558 {
2559 if (TARGET_LONG_LOAD_STORE)
2560 return \"addil NLR'%H1,%%r27\;ldo N'%H1(%%r1),%%r1\";
2561 else
2562 return \"addil LR'%H1,%%r27\";
2563 }"
2564 [(set_attr "type" "binary")
2565 (set (attr "length")
2566 (if_then_else (eq (symbol_ref "TARGET_LONG_LOAD_STORE") (const_int 0))
2567 (const_int 4)
2568 (const_int 8)))])
2569
2570
2571 ;; This is for use in the prologue/epilogue code. We need it
2572 ;; to add large constants to a stack pointer or frame pointer.
2573 ;; Because of the additional %r1 pressure, we probably do not
2574 ;; want to use this in general code, so make it available
2575 ;; only after reload.
2576 (define_insn ""
2577 [(set (match_operand:SI 0 "register_operand" "=!a,*r")
2578 (plus:SI (match_operand:SI 1 "register_operand" "r,r")
2579 (high:SI (match_operand 2 "const_int_operand" ""))))]
2580 "reload_completed"
2581 "@
2582 addil L'%G2,%1
2583 ldil L'%G2,%0\;{addl|add,l} %0,%1,%0"
2584 [(set_attr "type" "binary,binary")
2585 (set_attr "length" "4,8")])
2586
2587 (define_insn ""
2588 [(set (match_operand:DI 0 "register_operand" "=!a,*r")
2589 (plus:DI (match_operand:DI 1 "register_operand" "r,r")
2590 (high:DI (match_operand 2 "const_int_operand" ""))))]
2591 "reload_completed && TARGET_64BIT"
2592 "@
2593 addil L'%G2,%1
2594 ldil L'%G2,%0\;{addl|add,l} %0,%1,%0"
2595 [(set_attr "type" "binary,binary")
2596 (set_attr "length" "4,8")])
2597
2598 (define_insn ""
2599 [(set (match_operand:SI 0 "register_operand" "=r")
2600 (high:SI (match_operand 1 "" "")))]
2601 "(!flag_pic || !symbolic_operand (operands[1], Pmode))
2602 && !is_function_label_plus_const (operands[1])"
2603 "*
2604 {
2605 if (symbolic_operand (operands[1], Pmode))
2606 return \"ldil LR'%H1,%0\";
2607 else
2608 return \"ldil L'%G1,%0\";
2609 }"
2610 [(set_attr "type" "move")
2611 (set_attr "length" "4")])
2612
2613 (define_insn ""
2614 [(set (match_operand:DI 0 "register_operand" "=r")
2615 (high:DI (match_operand 1 "const_int_operand" "")))]
2616 "TARGET_64BIT"
2617 "ldil L'%G1,%0";
2618 [(set_attr "type" "move")
2619 (set_attr "length" "4")])
2620
2621 (define_insn ""
2622 [(set (match_operand:DI 0 "register_operand" "=r")
2623 (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
2624 (match_operand:DI 2 "const_int_operand" "i")))]
2625 "TARGET_64BIT"
2626 "ldo R'%G2(%1),%0";
2627 [(set_attr "type" "move")
2628 (set_attr "length" "4")])
2629
2630 (define_insn ""
2631 [(set (match_operand:SI 0 "register_operand" "=r")
2632 (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
2633 (match_operand:SI 2 "immediate_operand" "i")))]
2634 "!is_function_label_plus_const (operands[2])"
2635 "*
2636 {
2637 if (flag_pic && symbolic_operand (operands[2], Pmode))
2638 abort ();
2639 else if (symbolic_operand (operands[2], Pmode))
2640 return \"ldo RR'%G2(%1),%0\";
2641 else
2642 return \"ldo R'%G2(%1),%0\";
2643 }"
2644 [(set_attr "type" "move")
2645 (set_attr "length" "4")])
2646
2647 ;; Now that a symbolic_address plus a constant is broken up early
2648 ;; in the compilation phase (for better CSE) we need a special
2649 ;; combiner pattern to load the symbolic address plus the constant
2650 ;; in only 2 instructions. (For cases where the symbolic address
2651 ;; was not a common subexpression.)
2652 (define_split
2653 [(set (match_operand:SI 0 "register_operand" "")
2654 (match_operand:SI 1 "symbolic_operand" ""))
2655 (clobber (match_operand:SI 2 "register_operand" ""))]
2656 "! (flag_pic && pic_label_operand (operands[1], SImode))"
2657 [(set (match_dup 2) (high:SI (match_dup 1)))
2658 (set (match_dup 0) (lo_sum:SI (match_dup 2) (match_dup 1)))]
2659 "")
2660
2661 ;; hppa_legitimize_address goes to a great deal of trouble to
2662 ;; create addresses which use indexing. In some cases, this
2663 ;; is a lose because there isn't any store instructions which
2664 ;; allow indexed addresses (with integer register source).
2665 ;;
2666 ;; These define_splits try to turn a 3 insn store into
2667 ;; a 2 insn store with some creative RTL rewriting.
2668 (define_split
2669 [(set (mem:SI (plus:SI (mult:SI (match_operand:SI 0 "register_operand" "")
2670 (match_operand:SI 1 "shadd_operand" ""))
2671 (plus:SI (match_operand:SI 2 "register_operand" "")
2672 (match_operand:SI 3 "const_int_operand" ""))))
2673 (match_operand:SI 4 "register_operand" ""))
2674 (clobber (match_operand:SI 5 "register_operand" ""))]
2675 ""
2676 [(set (match_dup 5) (plus:SI (mult:SI (match_dup 0) (match_dup 1))
2677 (match_dup 2)))
2678 (set (mem:SI (plus:SI (match_dup 5) (match_dup 3))) (match_dup 4))]
2679 "")
2680
2681 (define_split
2682 [(set (mem:HI (plus:SI (mult:SI (match_operand:SI 0 "register_operand" "")
2683 (match_operand:SI 1 "shadd_operand" ""))
2684 (plus:SI (match_operand:SI 2 "register_operand" "")
2685 (match_operand:SI 3 "const_int_operand" ""))))
2686 (match_operand:HI 4 "register_operand" ""))
2687 (clobber (match_operand:SI 5 "register_operand" ""))]
2688 ""
2689 [(set (match_dup 5) (plus:SI (mult:SI (match_dup 0) (match_dup 1))
2690 (match_dup 2)))
2691 (set (mem:HI (plus:SI (match_dup 5) (match_dup 3))) (match_dup 4))]
2692 "")
2693
2694 (define_split
2695 [(set (mem:QI (plus:SI (mult:SI (match_operand:SI 0 "register_operand" "")
2696 (match_operand:SI 1 "shadd_operand" ""))
2697 (plus:SI (match_operand:SI 2 "register_operand" "")
2698 (match_operand:SI 3 "const_int_operand" ""))))
2699 (match_operand:QI 4 "register_operand" ""))
2700 (clobber (match_operand:SI 5 "register_operand" ""))]
2701 ""
2702 [(set (match_dup 5) (plus:SI (mult:SI (match_dup 0) (match_dup 1))
2703 (match_dup 2)))
2704 (set (mem:QI (plus:SI (match_dup 5) (match_dup 3))) (match_dup 4))]
2705 "")
2706
2707 (define_expand "movhi"
2708 [(set (match_operand:HI 0 "general_operand" "")
2709 (match_operand:HI 1 "general_operand" ""))]
2710 ""
2711 "
2712 {
2713 if (emit_move_sequence (operands, HImode, 0))
2714 DONE;
2715 }")
2716
2717 (define_insn ""
2718 [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q,!*q,!*f")
2719 (match_operand:HI 1 "move_operand" "r,J,N,K,RQ,rM,!rM,!*fM"))]
2720 "register_operand (operands[0], HImode)
2721 || reg_or_0_operand (operands[1], HImode)"
2722 "@
2723 copy %1,%0
2724 ldi %1,%0
2725 ldil L'%1,%0
2726 {zdepi|depwi,z} %Z1,%0
2727 ldh%M1 %1,%0
2728 sth%M0 %r1,%0
2729 mtsar %r1
2730 fcpy,sgl %f1,%0"
2731 [(set_attr "type" "move,move,move,shift,load,store,move,fpalu")
2732 (set_attr "pa_combine_type" "addmove")
2733 (set_attr "length" "4,4,4,4,4,4,4,4")])
2734
2735 (define_insn ""
2736 [(set (match_operand:HI 0 "register_operand" "=r")
2737 (mem:HI (plus:SI (match_operand:SI 1 "basereg_operand" "r")
2738 (match_operand:SI 2 "register_operand" "r"))))]
2739 "! TARGET_DISABLE_INDEXING"
2740 "{ldhx|ldh} %2(%1),%0"
2741 [(set_attr "type" "load")
2742 (set_attr "length" "4")])
2743
2744 (define_insn ""
2745 [(set (match_operand:HI 0 "register_operand" "=r")
2746 (mem:HI (plus:SI (match_operand:SI 1 "register_operand" "r")
2747 (match_operand:SI 2 "basereg_operand" "r"))))]
2748 "! TARGET_DISABLE_INDEXING"
2749 "{ldhx|ldh} %1(%2),%0"
2750 [(set_attr "type" "load")
2751 (set_attr "length" "4")])
2752
2753 ; Now zero extended variants.
2754 (define_insn ""
2755 [(set (match_operand:SI 0 "register_operand" "=r")
2756 (zero_extend:SI (mem:HI
2757 (plus:SI
2758 (match_operand:SI 1 "basereg_operand" "r")
2759 (match_operand:SI 2 "register_operand" "r")))))]
2760 "! TARGET_DISABLE_INDEXING"
2761 "{ldhx|ldh} %2(%1),%0"
2762 [(set_attr "type" "load")
2763 (set_attr "length" "4")])
2764
2765 (define_insn ""
2766 [(set (match_operand:SI 0 "register_operand" "=r")
2767 (zero_extend:SI (mem:HI
2768 (plus:SI
2769 (match_operand:SI 1 "register_operand" "r")
2770 (match_operand:SI 2 "basereg_operand" "r")))))]
2771 "! TARGET_DISABLE_INDEXING"
2772 "{ldhx|ldh} %1(%2),%0"
2773 [(set_attr "type" "load")
2774 (set_attr "length" "4")])
2775
2776 (define_insn ""
2777 [(set (match_operand:HI 0 "register_operand" "=r")
2778 (mem:HI (plus:SI (match_operand:SI 1 "register_operand" "+r")
2779 (match_operand:SI 2 "int5_operand" "L"))))
2780 (set (match_dup 1)
2781 (plus:SI (match_dup 1) (match_dup 2)))]
2782 ""
2783 "{ldhs|ldh},mb %2(%1),%0"
2784 [(set_attr "type" "load")
2785 (set_attr "length" "4")])
2786
2787 ; And a zero extended variant.
2788 (define_insn ""
2789 [(set (match_operand:SI 0 "register_operand" "=r")
2790 (zero_extend:SI (mem:HI
2791 (plus:SI
2792 (match_operand:SI 1 "register_operand" "+r")
2793 (match_operand:SI 2 "int5_operand" "L")))))
2794 (set (match_dup 1)
2795 (plus:SI (match_dup 1) (match_dup 2)))]
2796 ""
2797 "{ldhs|ldh},mb %2(%1),%0"
2798 [(set_attr "type" "load")
2799 (set_attr "length" "4")])
2800
2801 (define_insn ""
2802 [(set (mem:HI (plus:SI (match_operand:SI 0 "register_operand" "+r")
2803 (match_operand:SI 1 "int5_operand" "L")))
2804 (match_operand:HI 2 "reg_or_0_operand" "rM"))
2805 (set (match_dup 0)
2806 (plus:SI (match_dup 0) (match_dup 1)))]
2807 ""
2808 "{sths|sth},mb %r2,%1(%0)"
2809 [(set_attr "type" "store")
2810 (set_attr "length" "4")])
2811
2812 (define_insn ""
2813 [(set (match_operand:HI 0 "register_operand" "=r")
2814 (plus:HI (match_operand:HI 1 "register_operand" "r")
2815 (match_operand 2 "const_int_operand" "J")))]
2816 ""
2817 "ldo %2(%1),%0"
2818 [(set_attr "type" "binary")
2819 (set_attr "pa_combine_type" "addmove")
2820 (set_attr "length" "4")])
2821
2822 (define_expand "movqi"
2823 [(set (match_operand:QI 0 "general_operand" "")
2824 (match_operand:QI 1 "general_operand" ""))]
2825 ""
2826 "
2827 {
2828 if (emit_move_sequence (operands, QImode, 0))
2829 DONE;
2830 }")
2831
2832 (define_insn ""
2833 [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q,!*q,!*f")
2834 (match_operand:QI 1 "move_operand" "r,J,N,K,RQ,rM,!rM,!*fM"))]
2835 "register_operand (operands[0], QImode)
2836 || reg_or_0_operand (operands[1], QImode)"
2837 "@
2838 copy %1,%0
2839 ldi %1,%0
2840 ldil L'%1,%0
2841 {zdepi|depwi,z} %Z1,%0
2842 ldb%M1 %1,%0
2843 stb%M0 %r1,%0
2844 mtsar %r1
2845 fcpy,sgl %f1,%0"
2846 [(set_attr "type" "move,move,move,shift,load,store,move,fpalu")
2847 (set_attr "pa_combine_type" "addmove")
2848 (set_attr "length" "4,4,4,4,4,4,4,4")])
2849
2850 (define_insn ""
2851 [(set (match_operand:QI 0 "register_operand" "=r")
2852 (mem:QI (plus:SI (match_operand:SI 1 "basereg_operand" "r")
2853 (match_operand:SI 2 "register_operand" "r"))))]
2854 "! TARGET_DISABLE_INDEXING"
2855 "{ldbx|ldb} %2(%1),%0"
2856 [(set_attr "type" "load")
2857 (set_attr "length" "4")])
2858
2859 (define_insn ""
2860 [(set (match_operand:QI 0 "register_operand" "=r")
2861 (mem:QI (plus:SI (match_operand:SI 1 "register_operand" "r")
2862 (match_operand:SI 2 "basereg_operand" "r"))))]
2863 "! TARGET_DISABLE_INDEXING"
2864 "{ldbx|ldb} %1(%2),%0"
2865 [(set_attr "type" "load")
2866 (set_attr "length" "4")])
2867
2868 ; Indexed byte load with zero extension to SImode or HImode.
2869 (define_insn ""
2870 [(set (match_operand:SI 0 "register_operand" "=r")
2871 (zero_extend:SI (mem:QI
2872 (plus:SI
2873 (match_operand:SI 1 "basereg_operand" "r")
2874 (match_operand:SI 2 "register_operand" "r")))))]
2875 "! TARGET_DISABLE_INDEXING"
2876 "{ldbx|ldb} %2(%1),%0"
2877 [(set_attr "type" "load")
2878 (set_attr "length" "4")])
2879
2880 (define_insn ""
2881 [(set (match_operand:SI 0 "register_operand" "=r")
2882 (zero_extend:SI (mem:QI
2883 (plus:SI
2884 (match_operand:SI 1 "register_operand" "r")
2885 (match_operand:SI 2 "basereg_operand" "r")))))]
2886 "! TARGET_DISABLE_INDEXING"
2887 "{ldbx|ldb} %1(%2),%0"
2888 [(set_attr "type" "load")
2889 (set_attr "length" "4")])
2890
2891 (define_insn ""
2892 [(set (match_operand:HI 0 "register_operand" "=r")
2893 (zero_extend:HI (mem:QI
2894 (plus:SI
2895 (match_operand:SI 1 "basereg_operand" "r")
2896 (match_operand:SI 2 "register_operand" "r")))))]
2897 "! TARGET_DISABLE_INDEXING"
2898 "{ldbx|ldb} %2(%1),%0"
2899 [(set_attr "type" "load")
2900 (set_attr "length" "4")])
2901
2902 (define_insn ""
2903 [(set (match_operand:HI 0 "register_operand" "=r")
2904 (zero_extend:HI (mem:QI
2905 (plus:SI
2906 (match_operand:SI 1 "register_operand" "r")
2907 (match_operand:SI 2 "basereg_operand" "r")))))]
2908 "! TARGET_DISABLE_INDEXING"
2909 "{ldbx|ldb} %1(%2),%0"
2910 [(set_attr "type" "load")
2911 (set_attr "length" "4")])
2912
2913 (define_insn ""
2914 [(set (match_operand:QI 0 "register_operand" "=r")
2915 (mem:QI (plus:SI (match_operand:SI 1 "register_operand" "+r")
2916 (match_operand:SI 2 "int5_operand" "L"))))
2917 (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))]
2918 ""
2919 "{ldbs|ldb},mb %2(%1),%0"
2920 [(set_attr "type" "load")
2921 (set_attr "length" "4")])
2922
2923 ; Now the same thing with zero extensions.
2924 (define_insn ""
2925 [(set (match_operand:SI 0 "register_operand" "=r")
2926 (zero_extend:SI (mem:QI (plus:SI
2927 (match_operand:SI 1 "register_operand" "+r")
2928 (match_operand:SI 2 "int5_operand" "L")))))
2929 (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))]
2930 ""
2931 "{ldbs|ldb},mb %2(%1),%0"
2932 [(set_attr "type" "load")
2933 (set_attr "length" "4")])
2934
2935 (define_insn ""
2936 [(set (match_operand:HI 0 "register_operand" "=r")
2937 (zero_extend:HI (mem:QI (plus:SI
2938 (match_operand:SI 1 "register_operand" "+r")
2939 (match_operand:SI 2 "int5_operand" "L")))))
2940 (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))]
2941 ""
2942 "{ldbs|ldb},mb %2(%1),%0"
2943 [(set_attr "type" "load")
2944 (set_attr "length" "4")])
2945
2946 (define_insn ""
2947 [(set (mem:QI (plus:SI (match_operand:SI 0 "register_operand" "+r")
2948 (match_operand:SI 1 "int5_operand" "L")))
2949 (match_operand:QI 2 "reg_or_0_operand" "rM"))
2950 (set (match_dup 0)
2951 (plus:SI (match_dup 0) (match_dup 1)))]
2952 ""
2953 "{stbs|stb},mb %r2,%1(%0)"
2954 [(set_attr "type" "store")
2955 (set_attr "length" "4")])
2956
2957 ;; The definition of this insn does not really explain what it does,
2958 ;; but it should suffice that anything generated as this insn will be
2959 ;; recognized as a movstrsi operation, and that it will not successfully
2960 ;; combine with anything.
2961 (define_expand "movstrsi"
2962 [(parallel [(set (match_operand:BLK 0 "" "")
2963 (match_operand:BLK 1 "" ""))
2964 (clobber (match_dup 4))
2965 (clobber (match_dup 5))
2966 (clobber (match_dup 6))
2967 (clobber (match_dup 7))
2968 (clobber (match_dup 8))
2969 (use (match_operand:SI 2 "arith_operand" ""))
2970 (use (match_operand:SI 3 "const_int_operand" ""))])]
2971 "!TARGET_64BIT && optimize > 0"
2972 "
2973 {
2974 int size, align;
2975
2976 /* HP provides very fast block move library routine for the PA;
2977 this routine includes:
2978
2979 4x4 byte at a time block moves,
2980 1x4 byte at a time with alignment checked at runtime with
2981 attempts to align the source and destination as needed
2982 1x1 byte loop
2983
2984 With that in mind, here's the heuristics to try and guess when
2985 the inlined block move will be better than the library block
2986 move:
2987
2988 If the size isn't constant, then always use the library routines.
2989
2990 If the size is large in respect to the known alignment, then use
2991 the library routines.
2992
2993 If the size is small in respect to the known alignment, then open
2994 code the copy (since that will lead to better scheduling).
2995
2996 Else use the block move pattern. */
2997
2998 /* Undetermined size, use the library routine. */
2999 if (GET_CODE (operands[2]) != CONST_INT)
3000 FAIL;
3001
3002 size = INTVAL (operands[2]);
3003 align = INTVAL (operands[3]);
3004 align = align > 4 ? 4 : align;
3005
3006 /* If size/alignment is large, then use the library routines. */
3007 if (size / align > 16)
3008 FAIL;
3009
3010 /* This does happen, but not often enough to worry much about. */
3011 if (size / align < MOVE_RATIO)
3012 FAIL;
3013
3014 /* Fall through means we're going to use our block move pattern. */
3015 operands[0]
3016 = replace_equiv_address (operands[0],
3017 copy_to_mode_reg (SImode, XEXP (operands[0], 0)));
3018 operands[1]
3019 = replace_equiv_address (operands[1],
3020 copy_to_mode_reg (SImode, XEXP (operands[1], 0)));
3021 operands[4] = gen_reg_rtx (SImode);
3022 operands[5] = gen_reg_rtx (SImode);
3023 operands[6] = gen_reg_rtx (SImode);
3024 operands[7] = gen_reg_rtx (SImode);
3025 operands[8] = gen_reg_rtx (SImode);
3026 }")
3027
3028 ;; The operand constraints are written like this to support both compile-time
3029 ;; and run-time determined byte counts. The expander and output_block_move
3030 ;; only support compile-time determined counts at this time.
3031 ;;
3032 ;; If the count is run-time determined, the register with the byte count
3033 ;; is clobbered by the copying code, and therefore it is forced to operand 2.
3034 ;;
3035 ;; We used to clobber operands 0 and 1. However, a change to regrename.c
3036 ;; broke this semantic for pseudo registers. We can't use match_scratch
3037 ;; as this requires two registers in the class R1_REGS when the MEMs for
3038 ;; operands 0 and 1 are both equivalent to symbolic MEMs. Thus, we are
3039 ;; forced to internally copy operands 0 and 1 to operands 7 and 8,
3040 ;; respectively. We then split or peephole optimize after reload.
3041 (define_insn "movstrsi_prereload"
3042 [(set (mem:BLK (match_operand:SI 0 "register_operand" "r,r"))
3043 (mem:BLK (match_operand:SI 1 "register_operand" "r,r")))
3044 (clobber (match_operand:SI 2 "register_operand" "=r,r")) ;loop cnt/tmp
3045 (clobber (match_operand:SI 3 "register_operand" "=&r,&r")) ;item tmp1
3046 (clobber (match_operand:SI 6 "register_operand" "=&r,&r")) ;item tmp2
3047 (clobber (match_operand:SI 7 "register_operand" "=&r,&r")) ;item tmp3
3048 (clobber (match_operand:SI 8 "register_operand" "=&r,&r")) ;item tmp4
3049 (use (match_operand:SI 4 "arith_operand" "J,2")) ;byte count
3050 (use (match_operand:SI 5 "const_int_operand" "n,n"))] ;alignment
3051 "!TARGET_64BIT"
3052 "#"
3053 [(set_attr "type" "multi,multi")])
3054
3055 (define_split
3056 [(parallel [(set (mem:BLK (match_operand:SI 0 "register_operand" ""))
3057 (mem:BLK (match_operand:SI 1 "register_operand" "")))
3058 (clobber (match_operand:SI 2 "register_operand" ""))
3059 (clobber (match_operand:SI 3 "register_operand" ""))
3060 (clobber (match_operand:SI 6 "register_operand" ""))
3061 (clobber (match_operand:SI 7 "register_operand" ""))
3062 (clobber (match_operand:SI 8 "register_operand" ""))
3063 (use (match_operand:SI 4 "arith_operand" ""))
3064 (use (match_operand:SI 5 "const_int_operand" ""))])]
3065 "!TARGET_64BIT && reload_completed && !flag_peephole2"
3066 [(set (match_dup 7) (match_dup 0))
3067 (set (match_dup 8) (match_dup 1))
3068 (parallel [(set (mem:BLK (match_dup 7)) (mem:BLK (match_dup 8)))
3069 (clobber (match_dup 2))
3070 (clobber (match_dup 3))
3071 (clobber (match_dup 6))
3072 (clobber (match_dup 7))
3073 (clobber (match_dup 8))
3074 (use (match_dup 4))
3075 (use (match_dup 5))
3076 (const_int 0)])]
3077 "")
3078
3079 (define_peephole2
3080 [(parallel [(set (mem:BLK (match_operand:SI 0 "register_operand" ""))
3081 (mem:BLK (match_operand:SI 1 "register_operand" "")))
3082 (clobber (match_operand:SI 2 "register_operand" ""))
3083 (clobber (match_operand:SI 3 "register_operand" ""))
3084 (clobber (match_operand:SI 6 "register_operand" ""))
3085 (clobber (match_operand:SI 7 "register_operand" ""))
3086 (clobber (match_operand:SI 8 "register_operand" ""))
3087 (use (match_operand:SI 4 "arith_operand" ""))
3088 (use (match_operand:SI 5 "const_int_operand" ""))])]
3089 "!TARGET_64BIT"
3090 [(parallel [(set (mem:BLK (match_dup 7)) (mem:BLK (match_dup 8)))
3091 (clobber (match_dup 2))
3092 (clobber (match_dup 3))
3093 (clobber (match_dup 6))
3094 (clobber (match_dup 7))
3095 (clobber (match_dup 8))
3096 (use (match_dup 4))
3097 (use (match_dup 5))
3098 (const_int 0)])]
3099 "
3100 {
3101 if (dead_or_set_p (curr_insn, operands[0]))
3102 operands[7] = operands[0];
3103 else
3104 emit_insn (gen_rtx_SET (VOIDmode, operands[7], operands[0]));
3105
3106 if (dead_or_set_p (curr_insn, operands[1]))
3107 operands[8] = operands[1];
3108 else
3109 emit_insn (gen_rtx_SET (VOIDmode, operands[8], operands[1]));
3110 }")
3111
3112 (define_insn "movstrsi_postreload"
3113 [(set (mem:BLK (match_operand:SI 0 "register_operand" "r,r"))
3114 (mem:BLK (match_operand:SI 1 "register_operand" "r,r")))
3115 (clobber (match_operand:SI 2 "register_operand" "=r,r")) ;loop cnt/tmp
3116 (clobber (match_operand:SI 3 "register_operand" "=&r,&r")) ;item tmp1
3117 (clobber (match_operand:SI 6 "register_operand" "=&r,&r")) ;item tmp2
3118 (clobber (match_dup 0))
3119 (clobber (match_dup 1))
3120 (use (match_operand:SI 4 "arith_operand" "J,2")) ;byte count
3121 (use (match_operand:SI 5 "const_int_operand" "n,n")) ;alignment
3122 (const_int 0)]
3123 "!TARGET_64BIT && reload_completed"
3124 "* return output_block_move (operands, !which_alternative);"
3125 [(set_attr "type" "multi,multi")])
3126
3127 (define_expand "movstrdi"
3128 [(parallel [(set (match_operand:BLK 0 "" "")
3129 (match_operand:BLK 1 "" ""))
3130 (clobber (match_dup 4))
3131 (clobber (match_dup 5))
3132 (clobber (match_dup 6))
3133 (clobber (match_dup 7))
3134 (clobber (match_dup 8))
3135 (use (match_operand:DI 2 "arith_operand" ""))
3136 (use (match_operand:DI 3 "const_int_operand" ""))])]
3137 "TARGET_64BIT && optimize > 0"
3138 "
3139 {
3140 int size, align;
3141
3142 /* HP provides very fast block move library routine for the PA;
3143 this routine includes:
3144
3145 4x4 byte at a time block moves,
3146 1x4 byte at a time with alignment checked at runtime with
3147 attempts to align the source and destination as needed
3148 1x1 byte loop
3149
3150 With that in mind, here's the heuristics to try and guess when
3151 the inlined block move will be better than the library block
3152 move:
3153
3154 If the size isn't constant, then always use the library routines.
3155
3156 If the size is large in respect to the known alignment, then use
3157 the library routines.
3158
3159 If the size is small in respect to the known alignment, then open
3160 code the copy (since that will lead to better scheduling).
3161
3162 Else use the block move pattern. */
3163
3164 /* Undetermined size, use the library routine. */
3165 if (GET_CODE (operands[2]) != CONST_INT)
3166 FAIL;
3167
3168 size = INTVAL (operands[2]);
3169 align = INTVAL (operands[3]);
3170 align = align > 8 ? 8 : align;
3171
3172 /* If size/alignment is large, then use the library routines. */
3173 if (size / align > 16)
3174 FAIL;
3175
3176 /* This does happen, but not often enough to worry much about. */
3177 if (size / align < MOVE_RATIO)
3178 FAIL;
3179
3180 /* Fall through means we're going to use our block move pattern. */
3181 operands[0]
3182 = replace_equiv_address (operands[0],
3183 copy_to_mode_reg (DImode, XEXP (operands[0], 0)));
3184 operands[1]
3185 = replace_equiv_address (operands[1],
3186 copy_to_mode_reg (DImode, XEXP (operands[1], 0)));
3187 operands[4] = gen_reg_rtx (DImode);
3188 operands[5] = gen_reg_rtx (DImode);
3189 operands[6] = gen_reg_rtx (DImode);
3190 operands[7] = gen_reg_rtx (DImode);
3191 operands[8] = gen_reg_rtx (DImode);
3192 }")
3193
3194 ;; The operand constraints are written like this to support both compile-time
3195 ;; and run-time determined byte counts. The expander and output_block_move
3196 ;; only support compile-time determined counts at this time.
3197 ;;
3198 ;; If the count is run-time determined, the register with the byte count
3199 ;; is clobbered by the copying code, and therefore it is forced to operand 2.
3200 ;;
3201 ;; We used to clobber operands 0 and 1. However, a change to regrename.c
3202 ;; broke this semantic for pseudo registers. We can't use match_scratch
3203 ;; as this requires two registers in the class R1_REGS when the MEMs for
3204 ;; operands 0 and 1 are both equivalent to symbolic MEMs. Thus, we are
3205 ;; forced to internally copy operands 0 and 1 to operands 7 and 8,
3206 ;; respectively. We then split or peephole optimize after reload.
3207 (define_insn "movstrdi_prereload"
3208 [(set (mem:BLK (match_operand:DI 0 "register_operand" "r,r"))
3209 (mem:BLK (match_operand:DI 1 "register_operand" "r,r")))
3210 (clobber (match_operand:DI 2 "register_operand" "=r,r")) ;loop cnt/tmp
3211 (clobber (match_operand:DI 3 "register_operand" "=&r,&r")) ;item tmp1
3212 (clobber (match_operand:DI 6 "register_operand" "=&r,&r")) ;item tmp2
3213 (clobber (match_operand:DI 7 "register_operand" "=&r,&r")) ;item tmp3
3214 (clobber (match_operand:DI 8 "register_operand" "=&r,&r")) ;item tmp4
3215 (use (match_operand:DI 4 "arith_operand" "J,2")) ;byte count
3216 (use (match_operand:DI 5 "const_int_operand" "n,n"))] ;alignment
3217 "TARGET_64BIT"
3218 "#"
3219 [(set_attr "type" "multi,multi")])
3220
3221 (define_split
3222 [(parallel [(set (mem:BLK (match_operand:DI 0 "register_operand" ""))
3223 (mem:BLK (match_operand:DI 1 "register_operand" "")))
3224 (clobber (match_operand:DI 2 "register_operand" ""))
3225 (clobber (match_operand:DI 3 "register_operand" ""))
3226 (clobber (match_operand:DI 6 "register_operand" ""))
3227 (clobber (match_operand:DI 7 "register_operand" ""))
3228 (clobber (match_operand:DI 8 "register_operand" ""))
3229 (use (match_operand:DI 4 "arith_operand" ""))
3230 (use (match_operand:DI 5 "const_int_operand" ""))])]
3231 "TARGET_64BIT && reload_completed && !flag_peephole2"
3232 [(set (match_dup 7) (match_dup 0))
3233 (set (match_dup 8) (match_dup 1))
3234 (parallel [(set (mem:BLK (match_dup 7)) (mem:BLK (match_dup 8)))
3235 (clobber (match_dup 2))
3236 (clobber (match_dup 3))
3237 (clobber (match_dup 6))
3238 (clobber (match_dup 7))
3239 (clobber (match_dup 8))
3240 (use (match_dup 4))
3241 (use (match_dup 5))
3242 (const_int 0)])]
3243 "")
3244
3245 (define_peephole2
3246 [(parallel [(set (mem:BLK (match_operand:DI 0 "register_operand" ""))
3247 (mem:BLK (match_operand:DI 1 "register_operand" "")))
3248 (clobber (match_operand:DI 2 "register_operand" ""))
3249 (clobber (match_operand:DI 3 "register_operand" ""))
3250 (clobber (match_operand:DI 6 "register_operand" ""))
3251 (clobber (match_operand:DI 7 "register_operand" ""))
3252 (clobber (match_operand:DI 8 "register_operand" ""))
3253 (use (match_operand:DI 4 "arith_operand" ""))
3254 (use (match_operand:DI 5 "const_int_operand" ""))])]
3255 "TARGET_64BIT"
3256 [(parallel [(set (mem:BLK (match_dup 7)) (mem:BLK (match_dup 8)))
3257 (clobber (match_dup 2))
3258 (clobber (match_dup 3))
3259 (clobber (match_dup 6))
3260 (clobber (match_dup 7))
3261 (clobber (match_dup 8))
3262 (use (match_dup 4))
3263 (use (match_dup 5))
3264 (const_int 0)])]
3265 "
3266 {
3267 if (dead_or_set_p (curr_insn, operands[0]))
3268 operands[7] = operands[0];
3269 else
3270 emit_insn (gen_rtx_SET (VOIDmode, operands[7], operands[0]));
3271
3272 if (dead_or_set_p (curr_insn, operands[1]))
3273 operands[8] = operands[1];
3274 else
3275 emit_insn (gen_rtx_SET (VOIDmode, operands[8], operands[1]));
3276 }")
3277
3278 (define_insn "movstrdi_postreload"
3279 [(set (mem:BLK (match_operand:DI 0 "register_operand" "r,r"))
3280 (mem:BLK (match_operand:DI 1 "register_operand" "r,r")))
3281 (clobber (match_operand:DI 2 "register_operand" "=r,r")) ;loop cnt/tmp
3282 (clobber (match_operand:DI 3 "register_operand" "=&r,&r")) ;item tmp1
3283 (clobber (match_operand:DI 6 "register_operand" "=&r,&r")) ;item tmp2
3284 (clobber (match_dup 0))
3285 (clobber (match_dup 1))
3286 (use (match_operand:DI 4 "arith_operand" "J,2")) ;byte count
3287 (use (match_operand:DI 5 "const_int_operand" "n,n")) ;alignment
3288 (const_int 0)]
3289 "TARGET_64BIT && reload_completed"
3290 "* return output_block_move (operands, !which_alternative);"
3291 [(set_attr "type" "multi,multi")])
3292
3293 (define_expand "clrstrsi"
3294 [(parallel [(set (match_operand:BLK 0 "" "")
3295 (const_int 0))
3296 (clobber (match_dup 3))
3297 (clobber (match_dup 4))
3298 (use (match_operand:SI 1 "arith_operand" ""))
3299 (use (match_operand:SI 2 "const_int_operand" ""))])]
3300 "!TARGET_64BIT && optimize > 0"
3301 "
3302 {
3303 int size, align;
3304
3305 /* Undetermined size, use the library routine. */
3306 if (GET_CODE (operands[1]) != CONST_INT)
3307 FAIL;
3308
3309 size = INTVAL (operands[1]);
3310 align = INTVAL (operands[2]);
3311 align = align > 4 ? 4 : align;
3312
3313 /* If size/alignment is large, then use the library routines. */
3314 if (size / align > 16)
3315 FAIL;
3316
3317 /* This does happen, but not often enough to worry much about. */
3318 if (size / align < MOVE_RATIO)
3319 FAIL;
3320
3321 /* Fall through means we're going to use our block clear pattern. */
3322 operands[0]
3323 = replace_equiv_address (operands[0],
3324 copy_to_mode_reg (SImode, XEXP (operands[0], 0)));
3325 operands[3] = gen_reg_rtx (SImode);
3326 operands[4] = gen_reg_rtx (SImode);
3327 }")
3328
3329 (define_insn "clrstrsi_prereload"
3330 [(set (mem:BLK (match_operand:SI 0 "register_operand" "r,r"))
3331 (const_int 0))
3332 (clobber (match_operand:SI 1 "register_operand" "=r,r")) ;loop cnt/tmp
3333 (clobber (match_operand:SI 4 "register_operand" "=&r,&r")) ;tmp1
3334 (use (match_operand:SI 2 "arith_operand" "J,1")) ;byte count
3335 (use (match_operand:SI 3 "const_int_operand" "n,n"))] ;alignment
3336 "!TARGET_64BIT"
3337 "#"
3338 [(set_attr "type" "multi,multi")])
3339
3340 (define_split
3341 [(parallel [(set (mem:BLK (match_operand:SI 0 "register_operand" ""))
3342 (const_int 0))
3343 (clobber (match_operand:SI 1 "register_operand" ""))
3344 (clobber (match_operand:SI 4 "register_operand" ""))
3345 (use (match_operand:SI 2 "arith_operand" ""))
3346 (use (match_operand:SI 3 "const_int_operand" ""))])]
3347 "!TARGET_64BIT && reload_completed && !flag_peephole2"
3348 [(set (match_dup 4) (match_dup 0))
3349 (parallel [(set (mem:BLK (match_dup 4)) (const_int 0))
3350 (clobber (match_dup 1))
3351 (clobber (match_dup 4))
3352 (use (match_dup 2))
3353 (use (match_dup 3))
3354 (const_int 0)])]
3355 "")
3356
3357 (define_peephole2
3358 [(parallel [(set (mem:BLK (match_operand:SI 0 "register_operand" ""))
3359 (const_int 0))
3360 (clobber (match_operand:SI 1 "register_operand" ""))
3361 (clobber (match_operand:SI 4 "register_operand" ""))
3362 (use (match_operand:SI 2 "arith_operand" ""))
3363 (use (match_operand:SI 3 "const_int_operand" ""))])]
3364 "!TARGET_64BIT"
3365 [(parallel [(set (mem:BLK (match_dup 4)) (const_int 0))
3366 (clobber (match_dup 1))
3367 (clobber (match_dup 4))
3368 (use (match_dup 2))
3369 (use (match_dup 3))
3370 (const_int 0)])]
3371 "
3372 {
3373 if (dead_or_set_p (curr_insn, operands[0]))
3374 operands[4] = operands[0];
3375 else
3376 emit_insn (gen_rtx_SET (VOIDmode, operands[4], operands[0]));
3377 }")
3378
3379 (define_insn "clrstrsi_postreload"
3380 [(set (mem:BLK (match_operand:SI 0 "register_operand" "r,r"))
3381 (const_int 0))
3382 (clobber (match_operand:SI 1 "register_operand" "=r,r")) ;loop cnt/tmp
3383 (clobber (match_dup 0))
3384 (use (match_operand:SI 2 "arith_operand" "J,1")) ;byte count
3385 (use (match_operand:SI 3 "const_int_operand" "n,n")) ;alignment
3386 (const_int 0)]
3387 "!TARGET_64BIT && reload_completed"
3388 "* return output_block_clear (operands, !which_alternative);"
3389 [(set_attr "type" "multi,multi")])
3390
3391 (define_expand "clrstrdi"
3392 [(parallel [(set (match_operand:BLK 0 "" "")
3393 (const_int 0))
3394 (clobber (match_dup 3))
3395 (clobber (match_dup 4))
3396 (use (match_operand:DI 1 "arith_operand" ""))
3397 (use (match_operand:DI 2 "const_int_operand" ""))])]
3398 "TARGET_64BIT && optimize > 0"
3399 "
3400 {
3401 int size, align;
3402
3403 /* Undetermined size, use the library routine. */
3404 if (GET_CODE (operands[1]) != CONST_INT)
3405 FAIL;
3406
3407 size = INTVAL (operands[1]);
3408 align = INTVAL (operands[2]);
3409 align = align > 8 ? 8 : align;
3410
3411 /* If size/alignment is large, then use the library routines. */
3412 if (size / align > 16)
3413 FAIL;
3414
3415 /* This does happen, but not often enough to worry much about. */
3416 if (size / align < MOVE_RATIO)
3417 FAIL;
3418
3419 /* Fall through means we're going to use our block clear pattern. */
3420 operands[0]
3421 = replace_equiv_address (operands[0],
3422 copy_to_mode_reg (DImode, XEXP (operands[0], 0)));
3423 operands[3] = gen_reg_rtx (DImode);
3424 operands[4] = gen_reg_rtx (DImode);
3425 }")
3426
3427 (define_insn "clrstrdi_prereload"
3428 [(set (mem:BLK (match_operand:DI 0 "register_operand" "r,r"))
3429 (const_int 0))
3430 (clobber (match_operand:DI 1 "register_operand" "=r,r")) ;loop cnt/tmp
3431 (clobber (match_operand:DI 4 "register_operand" "=&r,&r")) ;item tmp1
3432 (use (match_operand:DI 2 "arith_operand" "J,1")) ;byte count
3433 (use (match_operand:DI 3 "const_int_operand" "n,n"))] ;alignment
3434 "TARGET_64BIT"
3435 "#"
3436 [(set_attr "type" "multi,multi")])
3437
3438 (define_split
3439 [(parallel [(set (mem:BLK (match_operand:DI 0 "register_operand" ""))
3440 (const_int 0))
3441 (clobber (match_operand:DI 1 "register_operand" ""))
3442 (clobber (match_operand:DI 4 "register_operand" ""))
3443 (use (match_operand:DI 2 "arith_operand" ""))
3444 (use (match_operand:DI 3 "const_int_operand" ""))])]
3445 "TARGET_64BIT && reload_completed && !flag_peephole2"
3446 [(set (match_dup 4) (match_dup 0))
3447 (parallel [(set (mem:BLK (match_dup 4)) (const_int 0))
3448 (clobber (match_dup 1))
3449 (clobber (match_dup 4))
3450 (use (match_dup 2))
3451 (use (match_dup 3))
3452 (const_int 0)])]
3453 "")
3454
3455 (define_peephole2
3456 [(parallel [(set (mem:BLK (match_operand:DI 0 "register_operand" ""))
3457 (const_int 0))
3458 (clobber (match_operand:DI 1 "register_operand" ""))
3459 (clobber (match_operand:DI 4 "register_operand" ""))
3460 (use (match_operand:DI 2 "arith_operand" ""))
3461 (use (match_operand:DI 3 "const_int_operand" ""))])]
3462 "TARGET_64BIT"
3463 [(parallel [(set (mem:BLK (match_dup 4)) (const_int 0))
3464 (clobber (match_dup 1))
3465 (clobber (match_dup 4))
3466 (use (match_dup 2))
3467 (use (match_dup 3))
3468 (const_int 0)])]
3469 "
3470 {
3471 if (dead_or_set_p (curr_insn, operands[0]))
3472 operands[4] = operands[0];
3473 else
3474 emit_insn (gen_rtx_SET (VOIDmode, operands[4], operands[0]));
3475 }")
3476
3477 (define_insn "clrstrdi_postreload"
3478 [(set (mem:BLK (match_operand:DI 0 "register_operand" "r,r"))
3479 (const_int 0))
3480 (clobber (match_operand:DI 1 "register_operand" "=r,r")) ;loop cnt/tmp
3481 (clobber (match_dup 0))
3482 (use (match_operand:DI 2 "arith_operand" "J,1")) ;byte count
3483 (use (match_operand:DI 3 "const_int_operand" "n,n")) ;alignment
3484 (const_int 0)]
3485 "TARGET_64BIT && reload_completed"
3486 "* return output_block_clear (operands, !which_alternative);"
3487 [(set_attr "type" "multi,multi")])
3488 \f
3489 ;; Floating point move insns
3490
3491 ;; This pattern forces (set (reg:DF ...) (const_double ...))
3492 ;; to be reloaded by putting the constant into memory when
3493 ;; reg is a floating point register.
3494 ;;
3495 ;; For integer registers we use ldil;ldo to set the appropriate
3496 ;; value.
3497 ;;
3498 ;; This must come before the movdf pattern, and it must be present
3499 ;; to handle obscure reloading cases.
3500 (define_insn ""
3501 [(set (match_operand:DF 0 "register_operand" "=?r,f")
3502 (match_operand:DF 1 "" "?F,m"))]
3503 "GET_CODE (operands[1]) == CONST_DOUBLE
3504 && operands[1] != CONST0_RTX (DFmode)
3505 && !TARGET_64BIT
3506 && ! TARGET_SOFT_FLOAT"
3507 "* return (which_alternative == 0 ? output_move_double (operands)
3508 : \"fldd%F1 %1,%0\");"
3509 [(set_attr "type" "move,fpload")
3510 (set_attr "length" "16,4")])
3511
3512 (define_expand "movdf"
3513 [(set (match_operand:DF 0 "general_operand" "")
3514 (match_operand:DF 1 "general_operand" ""))]
3515 ""
3516 "
3517 {
3518 if (GET_CODE (operands[1]) == CONST_DOUBLE && TARGET_64BIT)
3519 operands[1] = force_const_mem (DFmode, operands[1]);
3520
3521 if (emit_move_sequence (operands, DFmode, 0))
3522 DONE;
3523 }")
3524
3525 ;; Reloading an SImode or DImode value requires a scratch register if
3526 ;; going in to or out of float point registers.
3527
3528 (define_expand "reload_indf"
3529 [(set (match_operand:DF 0 "register_operand" "=Z")
3530 (match_operand:DF 1 "non_hard_reg_operand" ""))
3531 (clobber (match_operand:DF 2 "register_operand" "=&r"))]
3532 ""
3533 "
3534 {
3535 if (emit_move_sequence (operands, DFmode, operands[2]))
3536 DONE;
3537
3538 /* We don't want the clobber emitted, so handle this ourselves. */
3539 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
3540 DONE;
3541 }")
3542
3543 (define_expand "reload_outdf"
3544 [(set (match_operand:DF 0 "non_hard_reg_operand" "")
3545 (match_operand:DF 1 "register_operand" "Z"))
3546 (clobber (match_operand:DF 2 "register_operand" "=&r"))]
3547 ""
3548 "
3549 {
3550 if (emit_move_sequence (operands, DFmode, operands[2]))
3551 DONE;
3552
3553 /* We don't want the clobber emitted, so handle this ourselves. */
3554 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
3555 DONE;
3556 }")
3557
3558 (define_insn ""
3559 [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand"
3560 "=f,*r,RQ,?o,?Q,f,*r,*r")
3561 (match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
3562 "fG,*rG,f,*r,*r,RQ,o,RQ"))]
3563 "(register_operand (operands[0], DFmode)
3564 || reg_or_0_operand (operands[1], DFmode))
3565 && ! (GET_CODE (operands[1]) == CONST_DOUBLE
3566 && GET_CODE (operands[0]) == MEM)
3567 && ! TARGET_64BIT
3568 && ! TARGET_SOFT_FLOAT"
3569 "*
3570 {
3571 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1])
3572 || operands[1] == CONST0_RTX (DFmode))
3573 return output_fp_move_double (operands);
3574 return output_move_double (operands);
3575 }"
3576 [(set_attr "type" "fpalu,move,fpstore,store,store,fpload,load,load")
3577 (set_attr "length" "4,8,4,8,16,4,8,16")])
3578
3579 (define_insn ""
3580 [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand"
3581 "=r,?o,?Q,r,r")
3582 (match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
3583 "rG,r,r,o,Q"))]
3584 "(register_operand (operands[0], DFmode)
3585 || reg_or_0_operand (operands[1], DFmode))
3586 && ! TARGET_64BIT
3587 && TARGET_SOFT_FLOAT"
3588 "*
3589 {
3590 return output_move_double (operands);
3591 }"
3592 [(set_attr "type" "move,store,store,load,load")
3593 (set_attr "length" "8,8,16,8,16")])
3594
3595 (define_insn ""
3596 [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand"
3597 "=r,r,r,r,r,Q,!*q,!f,f,*TR")
3598 (match_operand:DF 1 "move_operand"
3599 "r,J,N,K,RQ,rM,!rM,!fM,*RT,f"))]
3600 "(register_operand (operands[0], DFmode)
3601 || reg_or_0_operand (operands[1], DFmode))
3602 && ! TARGET_SOFT_FLOAT && TARGET_64BIT"
3603 "@
3604 copy %1,%0
3605 ldi %1,%0
3606 ldil L'%1,%0
3607 depdi,z %z1,%0
3608 ldd%M1 %1,%0
3609 std%M0 %r1,%0
3610 mtsar %r1
3611 fcpy,dbl %f1,%0
3612 fldd%F1 %1,%0
3613 fstd%F0 %1,%0"
3614 [(set_attr "type" "move,move,move,shift,load,store,move,fpalu,fpload,fpstore")
3615 (set_attr "pa_combine_type" "addmove")
3616 (set_attr "length" "4,4,4,4,4,4,4,4,4,4")])
3617
3618 (define_insn ""
3619 [(set (match_operand:DF 0 "register_operand" "=fx")
3620 (mem:DF (plus:SI (match_operand:SI 1 "basereg_operand" "r")
3621 (match_operand:SI 2 "register_operand" "r"))))]
3622 "! TARGET_DISABLE_INDEXING && ! TARGET_SOFT_FLOAT"
3623 "{flddx|fldd} %2(%1),%0"
3624 [(set_attr "type" "fpload")
3625 (set_attr "length" "4")])
3626
3627 (define_insn ""
3628 [(set (match_operand:DF 0 "register_operand" "=fx")
3629 (mem:DF (plus:SI (match_operand:SI 1 "register_operand" "r")
3630 (match_operand:SI 2 "basereg_operand" "r"))))]
3631 "! TARGET_DISABLE_INDEXING && ! TARGET_SOFT_FLOAT"
3632 "{flddx|fldd} %1(%2),%0"
3633 [(set_attr "type" "fpload")
3634 (set_attr "length" "4")])
3635
3636 (define_insn ""
3637 [(set (mem:DF (plus:SI (match_operand:SI 1 "basereg_operand" "r")
3638 (match_operand:SI 2 "register_operand" "r")))
3639 (match_operand:DF 0 "register_operand" "fx"))]
3640 "! TARGET_DISABLE_INDEXING && ! TARGET_SOFT_FLOAT"
3641 "{fstdx|fstd} %0,%2(%1)"
3642 [(set_attr "type" "fpstore")
3643 (set_attr "length" "4")])
3644
3645 (define_insn ""
3646 [(set (mem:DF (plus:SI (match_operand:SI 1 "register_operand" "r")
3647 (match_operand:SI 2 "basereg_operand" "r")))
3648 (match_operand:DF 0 "register_operand" "fx"))]
3649 "! TARGET_DISABLE_INDEXING && ! TARGET_SOFT_FLOAT"
3650 "{fstdx|fstd} %0,%1(%2)"
3651 [(set_attr "type" "fpstore")
3652 (set_attr "length" "4")])
3653
3654 (define_expand "movdi"
3655 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "")
3656 (match_operand:DI 1 "general_operand" ""))]
3657 ""
3658 "
3659 {
3660 if (GET_CODE (operands[1]) == CONST_DOUBLE && TARGET_64BIT)
3661 operands[1] = force_const_mem (DImode, operands[1]);
3662
3663 if (emit_move_sequence (operands, DImode, 0))
3664 DONE;
3665 }")
3666
3667 (define_expand "reload_indi"
3668 [(set (match_operand:DI 0 "register_operand" "=Z")
3669 (match_operand:DI 1 "non_hard_reg_operand" ""))
3670 (clobber (match_operand:SI 2 "register_operand" "=&r"))]
3671 ""
3672 "
3673 {
3674 if (emit_move_sequence (operands, DImode, operands[2]))
3675 DONE;
3676
3677 /* We don't want the clobber emitted, so handle this ourselves. */
3678 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
3679 DONE;
3680 }")
3681
3682 (define_expand "reload_outdi"
3683 [(set (match_operand:DI 0 "non_hard_reg_operand" "")
3684 (match_operand:DI 1 "register_operand" "Z"))
3685 (clobber (match_operand:SI 2 "register_operand" "=&r"))]
3686 ""
3687 "
3688 {
3689 if (emit_move_sequence (operands, DImode, operands[2]))
3690 DONE;
3691
3692 /* We don't want the clobber emitted, so handle this ourselves. */
3693 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
3694 DONE;
3695 }")
3696
3697 (define_insn ""
3698 [(set (match_operand:DI 0 "register_operand" "=r")
3699 (high:DI (match_operand 1 "" "")))]
3700 "!TARGET_64BIT"
3701 "*
3702 {
3703 rtx op0 = operands[0];
3704 rtx op1 = operands[1];
3705
3706 if (GET_CODE (op1) == CONST_INT)
3707 {
3708 operands[0] = operand_subword (op0, 1, 0, DImode);
3709 output_asm_insn (\"ldil L'%1,%0\", operands);
3710
3711 operands[0] = operand_subword (op0, 0, 0, DImode);
3712 if (INTVAL (op1) < 0)
3713 output_asm_insn (\"ldi -1,%0\", operands);
3714 else
3715 output_asm_insn (\"ldi 0,%0\", operands);
3716 return \"\";
3717 }
3718 else if (GET_CODE (op1) == CONST_DOUBLE)
3719 {
3720 operands[0] = operand_subword (op0, 1, 0, DImode);
3721 operands[1] = GEN_INT (CONST_DOUBLE_LOW (op1));
3722 output_asm_insn (\"ldil L'%1,%0\", operands);
3723
3724 operands[0] = operand_subword (op0, 0, 0, DImode);
3725 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (op1));
3726 output_asm_insn (singlemove_string (operands), operands);
3727 return \"\";
3728 }
3729 else
3730 abort ();
3731 }"
3732 [(set_attr "type" "move")
3733 (set_attr "length" "8")])
3734
3735 (define_insn ""
3736 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
3737 "=r,o,Q,r,r,r,f,f,*TR")
3738 (match_operand:DI 1 "general_operand"
3739 "rM,r,r,o*R,Q,i,fM,*TR,f"))]
3740 "(register_operand (operands[0], DImode)
3741 || reg_or_0_operand (operands[1], DImode))
3742 && ! TARGET_64BIT
3743 && ! TARGET_SOFT_FLOAT"
3744 "*
3745 {
3746 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1])
3747 || (operands[1] == CONST0_RTX (DImode)))
3748 return output_fp_move_double (operands);
3749 return output_move_double (operands);
3750 }"
3751 [(set_attr "type" "move,store,store,load,load,multi,fpalu,fpload,fpstore")
3752 (set_attr "length" "8,8,16,8,16,16,4,4,4")])
3753
3754 (define_insn ""
3755 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
3756 "=r,r,r,r,r,r,Q,!*q,!f,f,*TR")
3757 (match_operand:DI 1 "move_operand"
3758 "A,r,J,N,K,RQ,rM,!rM,!fM,*RT,f"))]
3759 "(register_operand (operands[0], DImode)
3760 || reg_or_0_operand (operands[1], DImode))
3761 && ! TARGET_SOFT_FLOAT && TARGET_64BIT"
3762 "@
3763 ldd RT'%A1,%0
3764 copy %1,%0
3765 ldi %1,%0
3766 ldil L'%1,%0
3767 depdi,z %z1,%0
3768 ldd%M1 %1,%0
3769 std%M0 %r1,%0
3770 mtsar %r1
3771 fcpy,dbl %f1,%0
3772 fldd%F1 %1,%0
3773 fstd%F0 %1,%0"
3774 [(set_attr "type" "load,move,move,move,shift,load,store,move,fpalu,fpload,fpstore")
3775 (set_attr "pa_combine_type" "addmove")
3776 (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4")])
3777
3778 (define_insn ""
3779 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand"
3780 "=r,o,Q,r,r,r")
3781 (match_operand:DI 1 "general_operand"
3782 "rM,r,r,o,Q,i"))]
3783 "(register_operand (operands[0], DImode)
3784 || reg_or_0_operand (operands[1], DImode))
3785 && ! TARGET_64BIT
3786 && TARGET_SOFT_FLOAT"
3787 "*
3788 {
3789 return output_move_double (operands);
3790 }"
3791 [(set_attr "type" "move,store,store,load,load,multi")
3792 (set_attr "length" "8,8,16,8,16,16")])
3793
3794 (define_insn ""
3795 [(set (match_operand:DI 0 "register_operand" "=r,&r")
3796 (lo_sum:DI (match_operand:DI 1 "register_operand" "0,r")
3797 (match_operand:DI 2 "immediate_operand" "i,i")))]
3798 "!TARGET_64BIT"
3799 "*
3800 {
3801 /* Don't output a 64 bit constant, since we can't trust the assembler to
3802 handle it correctly. */
3803 if (GET_CODE (operands[2]) == CONST_DOUBLE)
3804 operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
3805 if (which_alternative == 1)
3806 output_asm_insn (\"copy %1,%0\", operands);
3807 return \"ldo R'%G2(%R1),%R0\";
3808 }"
3809 [(set_attr "type" "move,move")
3810 (set_attr "length" "4,8")])
3811
3812 ;; This pattern forces (set (reg:SF ...) (const_double ...))
3813 ;; to be reloaded by putting the constant into memory when
3814 ;; reg is a floating point register.
3815 ;;
3816 ;; For integer registers we use ldil;ldo to set the appropriate
3817 ;; value.
3818 ;;
3819 ;; This must come before the movsf pattern, and it must be present
3820 ;; to handle obscure reloading cases.
3821 (define_insn ""
3822 [(set (match_operand:SF 0 "register_operand" "=?r,f")
3823 (match_operand:SF 1 "" "?F,m"))]
3824 "GET_CODE (operands[1]) == CONST_DOUBLE
3825 && operands[1] != CONST0_RTX (SFmode)
3826 && ! TARGET_SOFT_FLOAT"
3827 "* return (which_alternative == 0 ? singlemove_string (operands)
3828 : \" fldw%F1 %1,%0\");"
3829 [(set_attr "type" "move,fpload")
3830 (set_attr "length" "8,4")])
3831
3832 (define_expand "movsf"
3833 [(set (match_operand:SF 0 "general_operand" "")
3834 (match_operand:SF 1 "general_operand" ""))]
3835 ""
3836 "
3837 {
3838 if (emit_move_sequence (operands, SFmode, 0))
3839 DONE;
3840 }")
3841
3842 ;; Reloading an SImode or DImode value requires a scratch register if
3843 ;; going in to or out of float point registers.
3844
3845 (define_expand "reload_insf"
3846 [(set (match_operand:SF 0 "register_operand" "=Z")
3847 (match_operand:SF 1 "non_hard_reg_operand" ""))
3848 (clobber (match_operand:SF 2 "register_operand" "=&r"))]
3849 ""
3850 "
3851 {
3852 if (emit_move_sequence (operands, SFmode, operands[2]))
3853 DONE;
3854
3855 /* We don't want the clobber emitted, so handle this ourselves. */
3856 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
3857 DONE;
3858 }")
3859
3860 (define_expand "reload_outsf"
3861 [(set (match_operand:SF 0 "non_hard_reg_operand" "")
3862 (match_operand:SF 1 "register_operand" "Z"))
3863 (clobber (match_operand:SF 2 "register_operand" "=&r"))]
3864 ""
3865 "
3866 {
3867 if (emit_move_sequence (operands, SFmode, operands[2]))
3868 DONE;
3869
3870 /* We don't want the clobber emitted, so handle this ourselves. */
3871 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
3872 DONE;
3873 }")
3874
3875 (define_insn ""
3876 [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand"
3877 "=f,r,f,r,RQ,Q")
3878 (match_operand:SF 1 "reg_or_0_or_nonsymb_mem_operand"
3879 "fG,rG,RQ,RQ,f,rG"))]
3880 "(register_operand (operands[0], SFmode)
3881 || reg_or_0_operand (operands[1], SFmode))
3882 && ! TARGET_SOFT_FLOAT"
3883 "@
3884 fcpy,sgl %f1,%0
3885 copy %r1,%0
3886 fldw%F1 %1,%0
3887 ldw%M1 %1,%0
3888 fstw%F0 %r1,%0
3889 stw%M0 %r1,%0"
3890 [(set_attr "type" "fpalu,move,fpload,load,fpstore,store")
3891 (set_attr "pa_combine_type" "addmove")
3892 (set_attr "length" "4,4,4,4,4,4")])
3893
3894 (define_insn ""
3895 [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand"
3896 "=r,r,Q")
3897 (match_operand:SF 1 "reg_or_0_or_nonsymb_mem_operand"
3898 "rG,RQ,rG"))]
3899 "(register_operand (operands[0], SFmode)
3900 || reg_or_0_operand (operands[1], SFmode))
3901 && TARGET_SOFT_FLOAT"
3902 "@
3903 copy %r1,%0
3904 ldw%M1 %1,%0
3905 stw%M0 %r1,%0"
3906 [(set_attr "type" "move,load,store")
3907 (set_attr "pa_combine_type" "addmove")
3908 (set_attr "length" "4,4,4")])
3909
3910 (define_insn ""
3911 [(set (match_operand:SF 0 "register_operand" "=fx")
3912 (mem:SF (plus:SI (match_operand:SI 1 "basereg_operand" "r")
3913 (match_operand:SI 2 "register_operand" "r"))))]
3914 "! TARGET_DISABLE_INDEXING && ! TARGET_SOFT_FLOAT"
3915 "{fldwx|fldw} %2(%1),%0"
3916 [(set_attr "type" "fpload")
3917 (set_attr "length" "4")])
3918
3919 (define_insn ""
3920 [(set (match_operand:SF 0 "register_operand" "=fx")
3921 (mem:SF (plus:SI (match_operand:SI 1 "register_operand" "r")
3922 (match_operand:SI 2 "basereg_operand" "r"))))]
3923 "! TARGET_DISABLE_INDEXING && ! TARGET_SOFT_FLOAT"
3924 "{fldwx|fldw} %1(%2),%0"
3925 [(set_attr "type" "fpload")
3926 (set_attr "length" "4")])
3927
3928 (define_insn ""
3929 [(set (mem:SF (plus:SI (match_operand:SI 1 "basereg_operand" "r")
3930 (match_operand:SI 2 "register_operand" "r")))
3931 (match_operand:SF 0 "register_operand" "fx"))]
3932 "! TARGET_DISABLE_INDEXING && ! TARGET_SOFT_FLOAT"
3933 "{fstwx|fstw} %0,%2(%1)"
3934 [(set_attr "type" "fpstore")
3935 (set_attr "length" "4")])
3936 \f
3937 (define_insn ""
3938 [(set (mem:SF (plus:SI (match_operand:SI 1 "register_operand" "r")
3939 (match_operand:SI 2 "basereg_operand" "r")))
3940 (match_operand:SF 0 "register_operand" "fx"))]
3941 "! TARGET_DISABLE_INDEXING && ! TARGET_SOFT_FLOAT"
3942 "{fstwx|fstw} %0,%1(%2)"
3943 [(set_attr "type" "fpstore")
3944 (set_attr "length" "4")])
3945 \f
3946
3947 ;;- zero extension instructions
3948 ;; We have define_expand for zero extension patterns to make sure the
3949 ;; operands get loaded into registers. The define_insns accept
3950 ;; memory operands. This gives us better overall code than just
3951 ;; having a pattern that does or does not accept memory operands.
3952
3953 (define_expand "zero_extendhisi2"
3954 [(set (match_operand:SI 0 "register_operand" "")
3955 (zero_extend:SI
3956 (match_operand:HI 1 "register_operand" "")))]
3957 ""
3958 "")
3959
3960 (define_insn ""
3961 [(set (match_operand:SI 0 "register_operand" "=r,r")
3962 (zero_extend:SI
3963 (match_operand:HI 1 "move_operand" "r,RQ")))]
3964 "GET_CODE (operands[1]) != CONST_INT"
3965 "@
3966 {extru|extrw,u} %1,31,16,%0
3967 ldh%M1 %1,%0"
3968 [(set_attr "type" "shift,load")
3969 (set_attr "length" "4,4")])
3970
3971 (define_expand "zero_extendqihi2"
3972 [(set (match_operand:HI 0 "register_operand" "")
3973 (zero_extend:HI
3974 (match_operand:QI 1 "register_operand" "")))]
3975 ""
3976 "")
3977
3978 (define_insn ""
3979 [(set (match_operand:HI 0 "register_operand" "=r,r")
3980 (zero_extend:HI
3981 (match_operand:QI 1 "move_operand" "r,RQ")))]
3982 "GET_CODE (operands[1]) != CONST_INT"
3983 "@
3984 {extru|extrw,u} %1,31,8,%0
3985 ldb%M1 %1,%0"
3986 [(set_attr "type" "shift,load")
3987 (set_attr "length" "4,4")])
3988
3989 (define_expand "zero_extendqisi2"
3990 [(set (match_operand:SI 0 "register_operand" "")
3991 (zero_extend:SI
3992 (match_operand:QI 1 "register_operand" "")))]
3993 ""
3994 "")
3995
3996 (define_insn ""
3997 [(set (match_operand:SI 0 "register_operand" "=r,r")
3998 (zero_extend:SI
3999 (match_operand:QI 1 "move_operand" "r,RQ")))]
4000 "GET_CODE (operands[1]) != CONST_INT"
4001 "@
4002 {extru|extrw,u} %1,31,8,%0
4003 ldb%M1 %1,%0"
4004 [(set_attr "type" "shift,load")
4005 (set_attr "length" "4,4")])
4006
4007 (define_insn "zero_extendqidi2"
4008 [(set (match_operand:DI 0 "register_operand" "=r")
4009 (zero_extend:DI (match_operand:QI 1 "register_operand" "r")))]
4010 "TARGET_64BIT"
4011 "extrd,u %1,63,8,%0"
4012 [(set_attr "type" "shift")
4013 (set_attr "length" "4")])
4014
4015 (define_insn "zero_extendhidi2"
4016 [(set (match_operand:DI 0 "register_operand" "=r")
4017 (zero_extend:DI (match_operand:HI 1 "register_operand" "r")))]
4018 "TARGET_64BIT"
4019 "extrd,u %1,63,16,%0"
4020 [(set_attr "type" "shift")
4021 (set_attr "length" "4")])
4022
4023 (define_insn "zero_extendsidi2"
4024 [(set (match_operand:DI 0 "register_operand" "=r")
4025 (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
4026 "TARGET_64BIT"
4027 "extrd,u %1,63,32,%0"
4028 [(set_attr "type" "shift")
4029 (set_attr "length" "4")])
4030
4031 ;;- sign extension instructions
4032
4033 (define_insn "extendhisi2"
4034 [(set (match_operand:SI 0 "register_operand" "=r")
4035 (sign_extend:SI (match_operand:HI 1 "register_operand" "r")))]
4036 ""
4037 "{extrs|extrw,s} %1,31,16,%0"
4038 [(set_attr "type" "shift")
4039 (set_attr "length" "4")])
4040
4041 (define_insn "extendqihi2"
4042 [(set (match_operand:HI 0 "register_operand" "=r")
4043 (sign_extend:HI (match_operand:QI 1 "register_operand" "r")))]
4044 ""
4045 "{extrs|extrw,s} %1,31,8,%0"
4046 [(set_attr "type" "shift")
4047 (set_attr "length" "4")])
4048
4049 (define_insn "extendqisi2"
4050 [(set (match_operand:SI 0 "register_operand" "=r")
4051 (sign_extend:SI (match_operand:QI 1 "register_operand" "r")))]
4052 ""
4053 "{extrs|extrw,s} %1,31,8,%0"
4054 [(set_attr "type" "shift")
4055 (set_attr "length" "4")])
4056
4057 (define_insn "extendqidi2"
4058 [(set (match_operand:DI 0 "register_operand" "=r")
4059 (sign_extend:DI (match_operand:QI 1 "register_operand" "r")))]
4060 "TARGET_64BIT"
4061 "extrd,s %1,63,8,%0"
4062 [(set_attr "type" "shift")
4063 (set_attr "length" "4")])
4064
4065 (define_insn "extendhidi2"
4066 [(set (match_operand:DI 0 "register_operand" "=r")
4067 (sign_extend:DI (match_operand:HI 1 "register_operand" "r")))]
4068 "TARGET_64BIT"
4069 "extrd,s %1,63,16,%0"
4070 [(set_attr "type" "shift")
4071 (set_attr "length" "4")])
4072
4073 (define_insn "extendsidi2"
4074 [(set (match_operand:DI 0 "register_operand" "=r")
4075 (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))]
4076 "TARGET_64BIT"
4077 "extrd,s %1,63,32,%0"
4078 [(set_attr "type" "shift")
4079 (set_attr "length" "4")])
4080
4081 \f
4082 ;; Conversions between float and double.
4083
4084 (define_insn "extendsfdf2"
4085 [(set (match_operand:DF 0 "register_operand" "=f")
4086 (float_extend:DF
4087 (match_operand:SF 1 "register_operand" "f")))]
4088 "! TARGET_SOFT_FLOAT"
4089 "{fcnvff|fcnv},sgl,dbl %1,%0"
4090 [(set_attr "type" "fpalu")
4091 (set_attr "length" "4")])
4092
4093 (define_insn "truncdfsf2"
4094 [(set (match_operand:SF 0 "register_operand" "=f")
4095 (float_truncate:SF
4096 (match_operand:DF 1 "register_operand" "f")))]
4097 "! TARGET_SOFT_FLOAT"
4098 "{fcnvff|fcnv},dbl,sgl %1,%0"
4099 [(set_attr "type" "fpalu")
4100 (set_attr "length" "4")])
4101
4102 ;; Conversion between fixed point and floating point.
4103 ;; Note that among the fix-to-float insns
4104 ;; the ones that start with SImode come first.
4105 ;; That is so that an operand that is a CONST_INT
4106 ;; (and therefore lacks a specific machine mode).
4107 ;; will be recognized as SImode (which is always valid)
4108 ;; rather than as QImode or HImode.
4109
4110 ;; This pattern forces (set (reg:SF ...) (float:SF (const_int ...)))
4111 ;; to be reloaded by putting the constant into memory.
4112 ;; It must come before the more general floatsisf2 pattern.
4113 (define_insn ""
4114 [(set (match_operand:SF 0 "register_operand" "=f")
4115 (float:SF (match_operand:SI 1 "const_int_operand" "m")))]
4116 "! TARGET_SOFT_FLOAT"
4117 "fldw%F1 %1,%0\;{fcnvxf,sgl,sgl|fcnv,w,sgl} %0,%0"
4118 [(set_attr "type" "fpalu")
4119 (set_attr "length" "8")])
4120
4121 (define_insn "floatsisf2"
4122 [(set (match_operand:SF 0 "register_operand" "=f")
4123 (float:SF (match_operand:SI 1 "register_operand" "f")))]
4124 "! TARGET_SOFT_FLOAT"
4125 "{fcnvxf,sgl,sgl|fcnv,w,sgl} %1,%0"
4126 [(set_attr "type" "fpalu")
4127 (set_attr "length" "4")])
4128
4129 ;; This pattern forces (set (reg:DF ...) (float:DF (const_int ...)))
4130 ;; to be reloaded by putting the constant into memory.
4131 ;; It must come before the more general floatsidf2 pattern.
4132 (define_insn ""
4133 [(set (match_operand:DF 0 "register_operand" "=f")
4134 (float:DF (match_operand:SI 1 "const_int_operand" "m")))]
4135 "! TARGET_SOFT_FLOAT"
4136 "fldw%F1 %1,%0\;{fcnvxf,sgl,dbl|fcnv,w,dbl} %0,%0"
4137 [(set_attr "type" "fpalu")
4138 (set_attr "length" "8")])
4139
4140 (define_insn "floatsidf2"
4141 [(set (match_operand:DF 0 "register_operand" "=f")
4142 (float:DF (match_operand:SI 1 "register_operand" "f")))]
4143 "! TARGET_SOFT_FLOAT"
4144 "{fcnvxf,sgl,dbl|fcnv,w,dbl} %1,%0"
4145 [(set_attr "type" "fpalu")
4146 (set_attr "length" "4")])
4147
4148 (define_expand "floatunssisf2"
4149 [(set (subreg:SI (match_dup 2) 4)
4150 (match_operand:SI 1 "register_operand" ""))
4151 (set (subreg:SI (match_dup 2) 0)
4152 (const_int 0))
4153 (set (match_operand:SF 0 "register_operand" "")
4154 (float:SF (match_dup 2)))]
4155 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT"
4156 "
4157 {
4158 if (TARGET_PA_20)
4159 {
4160 emit_insn (gen_floatunssisf2_pa20 (operands[0], operands[1]));
4161 DONE;
4162 }
4163 operands[2] = gen_reg_rtx (DImode);
4164 }")
4165
4166 (define_expand "floatunssidf2"
4167 [(set (subreg:SI (match_dup 2) 4)
4168 (match_operand:SI 1 "register_operand" ""))
4169 (set (subreg:SI (match_dup 2) 0)
4170 (const_int 0))
4171 (set (match_operand:DF 0 "register_operand" "")
4172 (float:DF (match_dup 2)))]
4173 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT"
4174 "
4175 {
4176 if (TARGET_PA_20)
4177 {
4178 emit_insn (gen_floatunssidf2_pa20 (operands[0], operands[1]));
4179 DONE;
4180 }
4181 operands[2] = gen_reg_rtx (DImode);
4182 }")
4183
4184 (define_insn "floatdisf2"
4185 [(set (match_operand:SF 0 "register_operand" "=f")
4186 (float:SF (match_operand:DI 1 "register_operand" "f")))]
4187 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT"
4188 "{fcnvxf,dbl,sgl|fcnv,dw,sgl} %1,%0"
4189 [(set_attr "type" "fpalu")
4190 (set_attr "length" "4")])
4191
4192 (define_insn "floatdidf2"
4193 [(set (match_operand:DF 0 "register_operand" "=f")
4194 (float:DF (match_operand:DI 1 "register_operand" "f")))]
4195 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT"
4196 "{fcnvxf,dbl,dbl|fcnv,dw,dbl} %1,%0"
4197 [(set_attr "type" "fpalu")
4198 (set_attr "length" "4")])
4199
4200 ;; Convert a float to an actual integer.
4201 ;; Truncation is performed as part of the conversion.
4202
4203 (define_insn "fix_truncsfsi2"
4204 [(set (match_operand:SI 0 "register_operand" "=f")
4205 (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
4206 "! TARGET_SOFT_FLOAT"
4207 "{fcnvfxt,sgl,sgl|fcnv,t,sgl,w} %1,%0"
4208 [(set_attr "type" "fpalu")
4209 (set_attr "length" "4")])
4210
4211 (define_insn "fix_truncdfsi2"
4212 [(set (match_operand:SI 0 "register_operand" "=f")
4213 (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f"))))]
4214 "! TARGET_SOFT_FLOAT"
4215 "{fcnvfxt,dbl,sgl|fcnv,t,dbl,w} %1,%0"
4216 [(set_attr "type" "fpalu")
4217 (set_attr "length" "4")])
4218
4219 (define_insn "fix_truncsfdi2"
4220 [(set (match_operand:DI 0 "register_operand" "=f")
4221 (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
4222 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT"
4223 "{fcnvfxt,sgl,dbl|fcnv,t,sgl,dw} %1,%0"
4224 [(set_attr "type" "fpalu")
4225 (set_attr "length" "4")])
4226
4227 (define_insn "fix_truncdfdi2"
4228 [(set (match_operand:DI 0 "register_operand" "=f")
4229 (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "f"))))]
4230 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT"
4231 "{fcnvfxt,dbl,dbl|fcnv,t,dbl,dw} %1,%0"
4232 [(set_attr "type" "fpalu")
4233 (set_attr "length" "4")])
4234
4235 (define_insn "floatunssidf2_pa20"
4236 [(set (match_operand:DF 0 "register_operand" "=f")
4237 (unsigned_float:DF (match_operand:SI 1 "register_operand" "f")))]
4238 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
4239 "fcnv,uw,dbl %1,%0"
4240 [(set_attr "type" "fpalu")
4241 (set_attr "length" "4")])
4242
4243 (define_insn "floatunssisf2_pa20"
4244 [(set (match_operand:SF 0 "register_operand" "=f")
4245 (unsigned_float:SF (match_operand:SI 1 "register_operand" "f")))]
4246 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
4247 "fcnv,uw,sgl %1,%0"
4248 [(set_attr "type" "fpalu")
4249 (set_attr "length" "4")])
4250
4251 (define_insn "floatunsdisf2"
4252 [(set (match_operand:SF 0 "register_operand" "=f")
4253 (unsigned_float:SF (match_operand:DI 1 "register_operand" "f")))]
4254 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
4255 "fcnv,udw,sgl %1,%0"
4256 [(set_attr "type" "fpalu")
4257 (set_attr "length" "4")])
4258
4259 (define_insn "floatunsdidf2"
4260 [(set (match_operand:DF 0 "register_operand" "=f")
4261 (unsigned_float:DF (match_operand:DI 1 "register_operand" "f")))]
4262 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
4263 "fcnv,udw,dbl %1,%0"
4264 [(set_attr "type" "fpalu")
4265 (set_attr "length" "4")])
4266
4267 (define_insn "fixuns_truncsfsi2"
4268 [(set (match_operand:SI 0 "register_operand" "=f")
4269 (unsigned_fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
4270 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
4271 "fcnv,t,sgl,uw %1,%0"
4272 [(set_attr "type" "fpalu")
4273 (set_attr "length" "4")])
4274
4275 (define_insn "fixuns_truncdfsi2"
4276 [(set (match_operand:SI 0 "register_operand" "=f")
4277 (unsigned_fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f"))))]
4278 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
4279 "fcnv,t,dbl,uw %1,%0"
4280 [(set_attr "type" "fpalu")
4281 (set_attr "length" "4")])
4282
4283 (define_insn "fixuns_truncsfdi2"
4284 [(set (match_operand:DI 0 "register_operand" "=f")
4285 (unsigned_fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
4286 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
4287 "fcnv,t,sgl,udw %1,%0"
4288 [(set_attr "type" "fpalu")
4289 (set_attr "length" "4")])
4290
4291 (define_insn "fixuns_truncdfdi2"
4292 [(set (match_operand:DI 0 "register_operand" "=f")
4293 (unsigned_fix:DI (fix:DF (match_operand:DF 1 "register_operand" "f"))))]
4294 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
4295 "fcnv,t,dbl,udw %1,%0"
4296 [(set_attr "type" "fpalu")
4297 (set_attr "length" "4")])
4298 \f
4299 ;;- arithmetic instructions
4300
4301 (define_expand "adddi3"
4302 [(set (match_operand:DI 0 "register_operand" "")
4303 (plus:DI (match_operand:DI 1 "register_operand" "")
4304 (match_operand:DI 2 "adddi3_operand" "")))]
4305 ""
4306 "")
4307
4308 (define_insn ""
4309 [(set (match_operand:DI 0 "register_operand" "=r")
4310 (plus:DI (match_operand:DI 1 "register_operand" "%r")
4311 (match_operand:DI 2 "arith11_operand" "rI")))]
4312 "!TARGET_64BIT"
4313 "*
4314 {
4315 if (GET_CODE (operands[2]) == CONST_INT)
4316 {
4317 if (INTVAL (operands[2]) >= 0)
4318 return \"addi %2,%R1,%R0\;{addc|add,c} %1,%%r0,%0\";
4319 else
4320 return \"addi %2,%R1,%R0\;{subb|sub,b} %1,%%r0,%0\";
4321 }
4322 else
4323 return \"add %R2,%R1,%R0\;{addc|add,c} %2,%1,%0\";
4324 }"
4325 [(set_attr "type" "binary")
4326 (set_attr "length" "8")])
4327
4328 (define_insn ""
4329 [(set (match_operand:DI 0 "register_operand" "=r,r")
4330 (plus:DI (match_operand:DI 1 "register_operand" "%r,r")
4331 (match_operand:DI 2 "arith_operand" "r,J")))]
4332 "TARGET_64BIT"
4333 "@
4334 {addl|add,l} %1,%2,%0
4335 ldo %2(%1),%0"
4336 [(set_attr "type" "binary,binary")
4337 (set_attr "pa_combine_type" "addmove")
4338 (set_attr "length" "4,4")])
4339
4340 (define_insn ""
4341 [(set (match_operand:DI 0 "register_operand" "=r")
4342 (plus:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
4343 (match_operand:DI 2 "register_operand" "r")))]
4344 "TARGET_64BIT"
4345 "uaddcm %2,%1,%0"
4346 [(set_attr "type" "binary")
4347 (set_attr "length" "4")])
4348
4349 (define_insn ""
4350 [(set (match_operand:SI 0 "register_operand" "=r")
4351 (plus:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
4352 (match_operand:SI 2 "register_operand" "r")))]
4353 ""
4354 "uaddcm %2,%1,%0"
4355 [(set_attr "type" "binary")
4356 (set_attr "length" "4")])
4357
4358 ;; define_splits to optimize cases of adding a constant integer
4359 ;; to a register when the constant does not fit in 14 bits. */
4360 (define_split
4361 [(set (match_operand:SI 0 "register_operand" "")
4362 (plus:SI (match_operand:SI 1 "register_operand" "")
4363 (match_operand:SI 2 "const_int_operand" "")))
4364 (clobber (match_operand:SI 4 "register_operand" ""))]
4365 "! cint_ok_for_move (INTVAL (operands[2]))
4366 && VAL_14_BITS_P (INTVAL (operands[2]) >> 1)"
4367 [(set (match_dup 4) (plus:SI (match_dup 1) (match_dup 2)))
4368 (set (match_dup 0) (plus:SI (match_dup 4) (match_dup 3)))]
4369 "
4370 {
4371 int val = INTVAL (operands[2]);
4372 int low = (val < 0) ? -0x2000 : 0x1fff;
4373 int rest = val - low;
4374
4375 operands[2] = GEN_INT (rest);
4376 operands[3] = GEN_INT (low);
4377 }")
4378
4379 (define_split
4380 [(set (match_operand:SI 0 "register_operand" "")
4381 (plus:SI (match_operand:SI 1 "register_operand" "")
4382 (match_operand:SI 2 "const_int_operand" "")))
4383 (clobber (match_operand:SI 4 "register_operand" ""))]
4384 "! cint_ok_for_move (INTVAL (operands[2]))"
4385 [(set (match_dup 4) (match_dup 2))
4386 (set (match_dup 0) (plus:SI (mult:SI (match_dup 4) (match_dup 3))
4387 (match_dup 1)))]
4388 "
4389 {
4390 HOST_WIDE_INT intval = INTVAL (operands[2]);
4391
4392 /* Try dividing the constant by 2, then 4, and finally 8 to see
4393 if we can get a constant which can be loaded into a register
4394 in a single instruction (cint_ok_for_move).
4395
4396 If that fails, try to negate the constant and subtract it
4397 from our input operand. */
4398 if (intval % 2 == 0 && cint_ok_for_move (intval / 2))
4399 {
4400 operands[2] = GEN_INT (intval / 2);
4401 operands[3] = GEN_INT (2);
4402 }
4403 else if (intval % 4 == 0 && cint_ok_for_move (intval / 4))
4404 {
4405 operands[2] = GEN_INT (intval / 4);
4406 operands[3] = GEN_INT (4);
4407 }
4408 else if (intval % 8 == 0 && cint_ok_for_move (intval / 8))
4409 {
4410 operands[2] = GEN_INT (intval / 8);
4411 operands[3] = GEN_INT (8);
4412 }
4413 else if (cint_ok_for_move (-intval))
4414 {
4415 emit_insn (gen_rtx_SET (VOIDmode, operands[4], GEN_INT (-intval)));
4416 emit_insn (gen_subsi3 (operands[0], operands[1], operands[4]));
4417 DONE;
4418 }
4419 else
4420 FAIL;
4421 }")
4422
4423 (define_insn "addsi3"
4424 [(set (match_operand:SI 0 "register_operand" "=r,r")
4425 (plus:SI (match_operand:SI 1 "register_operand" "%r,r")
4426 (match_operand:SI 2 "arith_operand" "r,J")))]
4427 ""
4428 "@
4429 {addl|add,l} %1,%2,%0
4430 ldo %2(%1),%0"
4431 [(set_attr "type" "binary,binary")
4432 (set_attr "pa_combine_type" "addmove")
4433 (set_attr "length" "4,4")])
4434
4435 (define_expand "subdi3"
4436 [(set (match_operand:DI 0 "register_operand" "")
4437 (minus:DI (match_operand:DI 1 "register_operand" "")
4438 (match_operand:DI 2 "register_operand" "")))]
4439 ""
4440 "")
4441
4442 (define_insn ""
4443 [(set (match_operand:DI 0 "register_operand" "=r")
4444 (minus:DI (match_operand:DI 1 "register_operand" "r")
4445 (match_operand:DI 2 "register_operand" "r")))]
4446 "!TARGET_64BIT"
4447 "sub %R1,%R2,%R0\;{subb|sub,b} %1,%2,%0"
4448 [(set_attr "type" "binary")
4449 (set_attr "length" "8")])
4450
4451 (define_insn ""
4452 [(set (match_operand:DI 0 "register_operand" "=r,r,!q")
4453 (minus:DI (match_operand:DI 1 "arith11_operand" "r,I,!U")
4454 (match_operand:DI 2 "register_operand" "r,r,!r")))]
4455 "TARGET_64BIT"
4456 "@
4457 sub %1,%2,%0
4458 subi %1,%2,%0
4459 mtsarcm %2"
4460 [(set_attr "type" "binary,binary,move")
4461 (set_attr "length" "4,4,4")])
4462
4463 (define_expand "subsi3"
4464 [(set (match_operand:SI 0 "register_operand" "")
4465 (minus:SI (match_operand:SI 1 "arith11_operand" "")
4466 (match_operand:SI 2 "register_operand" "")))]
4467 ""
4468 "")
4469
4470 (define_insn ""
4471 [(set (match_operand:SI 0 "register_operand" "=r,r")
4472 (minus:SI (match_operand:SI 1 "arith11_operand" "r,I")
4473 (match_operand:SI 2 "register_operand" "r,r")))]
4474 "!TARGET_PA_20"
4475 "@
4476 sub %1,%2,%0
4477 subi %1,%2,%0"
4478 [(set_attr "type" "binary,binary")
4479 (set_attr "length" "4,4")])
4480
4481 (define_insn ""
4482 [(set (match_operand:SI 0 "register_operand" "=r,r,!q")
4483 (minus:SI (match_operand:SI 1 "arith11_operand" "r,I,!S")
4484 (match_operand:SI 2 "register_operand" "r,r,!r")))]
4485 "TARGET_PA_20"
4486 "@
4487 sub %1,%2,%0
4488 subi %1,%2,%0
4489 mtsarcm %2"
4490 [(set_attr "type" "binary,binary,move")
4491 (set_attr "length" "4,4,4")])
4492
4493 ;; Clobbering a "register_operand" instead of a match_scratch
4494 ;; in operand3 of millicode calls avoids spilling %r1 and
4495 ;; produces better code.
4496
4497 ;; The mulsi3 insns set up registers for the millicode call.
4498 (define_expand "mulsi3"
4499 [(set (reg:SI 26) (match_operand:SI 1 "move_operand" ""))
4500 (set (reg:SI 25) (match_operand:SI 2 "move_operand" ""))
4501 (parallel [(set (reg:SI 29) (mult:SI (reg:SI 26) (reg:SI 25)))
4502 (clobber (match_dup 3))
4503 (clobber (reg:SI 26))
4504 (clobber (reg:SI 25))
4505 (clobber (match_dup 4))])
4506 (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
4507 ""
4508 "
4509 {
4510 operands[4] = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
4511 if (TARGET_PA_11 && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT)
4512 {
4513 rtx scratch = gen_reg_rtx (DImode);
4514 operands[1] = force_reg (SImode, operands[1]);
4515 operands[2] = force_reg (SImode, operands[2]);
4516 emit_insn (gen_umulsidi3 (scratch, operands[1], operands[2]));
4517 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4518 gen_rtx_SUBREG (SImode, scratch, GET_MODE_SIZE (SImode))));
4519 DONE;
4520 }
4521 operands[3] = gen_reg_rtx (SImode);
4522 }")
4523
4524 (define_insn "umulsidi3"
4525 [(set (match_operand:DI 0 "nonimmediate_operand" "=f")
4526 (mult:DI (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "f"))
4527 (zero_extend:DI (match_operand:SI 2 "nonimmediate_operand" "f"))))]
4528 "TARGET_PA_11 && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT"
4529 "xmpyu %1,%2,%0"
4530 [(set_attr "type" "fpmuldbl")
4531 (set_attr "length" "4")])
4532
4533 (define_insn ""
4534 [(set (match_operand:DI 0 "nonimmediate_operand" "=f")
4535 (mult:DI (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "f"))
4536 (match_operand:DI 2 "uint32_operand" "f")))]
4537 "TARGET_PA_11 && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT && !TARGET_64BIT"
4538 "xmpyu %1,%R2,%0"
4539 [(set_attr "type" "fpmuldbl")
4540 (set_attr "length" "4")])
4541
4542 (define_insn ""
4543 [(set (match_operand:DI 0 "nonimmediate_operand" "=f")
4544 (mult:DI (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "f"))
4545 (match_operand:DI 2 "uint32_operand" "f")))]
4546 "TARGET_PA_11 && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT && TARGET_64BIT"
4547 "xmpyu %1,%2R,%0"
4548 [(set_attr "type" "fpmuldbl")
4549 (set_attr "length" "4")])
4550
4551 (define_insn ""
4552 [(set (reg:SI 29) (mult:SI (reg:SI 26) (reg:SI 25)))
4553 (clobber (match_operand:SI 0 "register_operand" "=a"))
4554 (clobber (reg:SI 26))
4555 (clobber (reg:SI 25))
4556 (clobber (reg:SI 31))]
4557 "!TARGET_64BIT"
4558 "* return output_mul_insn (0, insn);"
4559 [(set_attr "type" "milli")
4560 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4561
4562 (define_insn ""
4563 [(set (reg:SI 29) (mult:SI (reg:SI 26) (reg:SI 25)))
4564 (clobber (match_operand:SI 0 "register_operand" "=a"))
4565 (clobber (reg:SI 26))
4566 (clobber (reg:SI 25))
4567 (clobber (reg:SI 2))]
4568 "TARGET_64BIT"
4569 "* return output_mul_insn (0, insn);"
4570 [(set_attr "type" "milli")
4571 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4572
4573 (define_expand "muldi3"
4574 [(set (match_operand:DI 0 "register_operand" "")
4575 (mult:DI (match_operand:DI 1 "register_operand" "")
4576 (match_operand:DI 2 "register_operand" "")))]
4577 "TARGET_64BIT && ! TARGET_DISABLE_FPREGS && ! TARGET_SOFT_FLOAT"
4578 "
4579 {
4580 rtx low_product = gen_reg_rtx (DImode);
4581 rtx cross_product1 = gen_reg_rtx (DImode);
4582 rtx cross_product2 = gen_reg_rtx (DImode);
4583 rtx cross_scratch = gen_reg_rtx (DImode);
4584 rtx cross_product = gen_reg_rtx (DImode);
4585 rtx op1l, op1r, op2l, op2r;
4586 rtx op1shifted, op2shifted;
4587
4588 op1shifted = gen_reg_rtx (DImode);
4589 op2shifted = gen_reg_rtx (DImode);
4590 op1l = gen_reg_rtx (SImode);
4591 op1r = gen_reg_rtx (SImode);
4592 op2l = gen_reg_rtx (SImode);
4593 op2r = gen_reg_rtx (SImode);
4594
4595 emit_move_insn (op1shifted, gen_rtx_LSHIFTRT (DImode, operands[1],
4596 GEN_INT (32)));
4597 emit_move_insn (op2shifted, gen_rtx_LSHIFTRT (DImode, operands[2],
4598 GEN_INT (32)));
4599 op1r = gen_rtx_SUBREG (SImode, operands[1], 4);
4600 op2r = gen_rtx_SUBREG (SImode, operands[2], 4);
4601 op1l = gen_rtx_SUBREG (SImode, op1shifted, 4);
4602 op2l = gen_rtx_SUBREG (SImode, op2shifted, 4);
4603
4604 /* Emit multiplies for the cross products. */
4605 emit_insn (gen_umulsidi3 (cross_product1, op2r, op1l));
4606 emit_insn (gen_umulsidi3 (cross_product2, op2l, op1r));
4607
4608 /* Emit a multiply for the low sub-word. */
4609 emit_insn (gen_umulsidi3 (low_product, copy_rtx (op2r), copy_rtx (op1r)));
4610
4611 /* Sum the cross products and shift them into proper position. */
4612 emit_insn (gen_adddi3 (cross_scratch, cross_product1, cross_product2));
4613 emit_insn (gen_ashldi3 (cross_product, cross_scratch, GEN_INT (32)));
4614
4615 /* Add the cross product to the low product and store the result
4616 into the output operand . */
4617 emit_insn (gen_adddi3 (operands[0], cross_product, low_product));
4618 DONE;
4619 }")
4620
4621 ;;; Division and mod.
4622 (define_expand "divsi3"
4623 [(set (reg:SI 26) (match_operand:SI 1 "move_operand" ""))
4624 (set (reg:SI 25) (match_operand:SI 2 "move_operand" ""))
4625 (parallel [(set (reg:SI 29) (div:SI (reg:SI 26) (reg:SI 25)))
4626 (clobber (match_dup 3))
4627 (clobber (match_dup 4))
4628 (clobber (reg:SI 26))
4629 (clobber (reg:SI 25))
4630 (clobber (match_dup 5))])
4631 (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
4632 ""
4633 "
4634 {
4635 operands[3] = gen_reg_rtx (SImode);
4636 if (TARGET_64BIT)
4637 {
4638 operands[5] = gen_rtx_REG (SImode, 2);
4639 operands[4] = operands[5];
4640 }
4641 else
4642 {
4643 operands[5] = gen_rtx_REG (SImode, 31);
4644 operands[4] = gen_reg_rtx (SImode);
4645 }
4646 if (GET_CODE (operands[2]) == CONST_INT && emit_hpdiv_const (operands, 0))
4647 DONE;
4648 }")
4649
4650 (define_insn ""
4651 [(set (reg:SI 29)
4652 (div:SI (reg:SI 26) (match_operand:SI 0 "div_operand" "")))
4653 (clobber (match_operand:SI 1 "register_operand" "=a"))
4654 (clobber (match_operand:SI 2 "register_operand" "=&r"))
4655 (clobber (reg:SI 26))
4656 (clobber (reg:SI 25))
4657 (clobber (reg:SI 31))]
4658 "!TARGET_64BIT"
4659 "*
4660 return output_div_insn (operands, 0, insn);"
4661 [(set_attr "type" "milli")
4662 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4663
4664 (define_insn ""
4665 [(set (reg:SI 29)
4666 (div:SI (reg:SI 26) (match_operand:SI 0 "div_operand" "")))
4667 (clobber (match_operand:SI 1 "register_operand" "=a"))
4668 (clobber (match_operand:SI 2 "register_operand" "=&r"))
4669 (clobber (reg:SI 26))
4670 (clobber (reg:SI 25))
4671 (clobber (reg:SI 2))]
4672 "TARGET_64BIT"
4673 "*
4674 return output_div_insn (operands, 0, insn);"
4675 [(set_attr "type" "milli")
4676 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4677
4678 (define_expand "udivsi3"
4679 [(set (reg:SI 26) (match_operand:SI 1 "move_operand" ""))
4680 (set (reg:SI 25) (match_operand:SI 2 "move_operand" ""))
4681 (parallel [(set (reg:SI 29) (udiv:SI (reg:SI 26) (reg:SI 25)))
4682 (clobber (match_dup 3))
4683 (clobber (match_dup 4))
4684 (clobber (reg:SI 26))
4685 (clobber (reg:SI 25))
4686 (clobber (match_dup 5))])
4687 (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
4688 ""
4689 "
4690 {
4691 operands[3] = gen_reg_rtx (SImode);
4692
4693 if (TARGET_64BIT)
4694 {
4695 operands[5] = gen_rtx_REG (SImode, 2);
4696 operands[4] = operands[5];
4697 }
4698 else
4699 {
4700 operands[5] = gen_rtx_REG (SImode, 31);
4701 operands[4] = gen_reg_rtx (SImode);
4702 }
4703 if (GET_CODE (operands[2]) == CONST_INT && emit_hpdiv_const (operands, 1))
4704 DONE;
4705 }")
4706
4707 (define_insn ""
4708 [(set (reg:SI 29)
4709 (udiv:SI (reg:SI 26) (match_operand:SI 0 "div_operand" "")))
4710 (clobber (match_operand:SI 1 "register_operand" "=a"))
4711 (clobber (match_operand:SI 2 "register_operand" "=&r"))
4712 (clobber (reg:SI 26))
4713 (clobber (reg:SI 25))
4714 (clobber (reg:SI 31))]
4715 "!TARGET_64BIT"
4716 "*
4717 return output_div_insn (operands, 1, insn);"
4718 [(set_attr "type" "milli")
4719 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4720
4721 (define_insn ""
4722 [(set (reg:SI 29)
4723 (udiv:SI (reg:SI 26) (match_operand:SI 0 "div_operand" "")))
4724 (clobber (match_operand:SI 1 "register_operand" "=a"))
4725 (clobber (match_operand:SI 2 "register_operand" "=&r"))
4726 (clobber (reg:SI 26))
4727 (clobber (reg:SI 25))
4728 (clobber (reg:SI 2))]
4729 "TARGET_64BIT"
4730 "*
4731 return output_div_insn (operands, 1, insn);"
4732 [(set_attr "type" "milli")
4733 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4734
4735 (define_expand "modsi3"
4736 [(set (reg:SI 26) (match_operand:SI 1 "move_operand" ""))
4737 (set (reg:SI 25) (match_operand:SI 2 "move_operand" ""))
4738 (parallel [(set (reg:SI 29) (mod:SI (reg:SI 26) (reg:SI 25)))
4739 (clobber (match_dup 3))
4740 (clobber (match_dup 4))
4741 (clobber (reg:SI 26))
4742 (clobber (reg:SI 25))
4743 (clobber (match_dup 5))])
4744 (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
4745 ""
4746 "
4747 {
4748 if (TARGET_64BIT)
4749 {
4750 operands[5] = gen_rtx_REG (SImode, 2);
4751 operands[4] = operands[5];
4752 }
4753 else
4754 {
4755 operands[5] = gen_rtx_REG (SImode, 31);
4756 operands[4] = gen_reg_rtx (SImode);
4757 }
4758 operands[3] = gen_reg_rtx (SImode);
4759 }")
4760
4761 (define_insn ""
4762 [(set (reg:SI 29) (mod:SI (reg:SI 26) (reg:SI 25)))
4763 (clobber (match_operand:SI 0 "register_operand" "=a"))
4764 (clobber (match_operand:SI 1 "register_operand" "=&r"))
4765 (clobber (reg:SI 26))
4766 (clobber (reg:SI 25))
4767 (clobber (reg:SI 31))]
4768 "!TARGET_64BIT"
4769 "*
4770 return output_mod_insn (0, insn);"
4771 [(set_attr "type" "milli")
4772 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4773
4774 (define_insn ""
4775 [(set (reg:SI 29) (mod:SI (reg:SI 26) (reg:SI 25)))
4776 (clobber (match_operand:SI 0 "register_operand" "=a"))
4777 (clobber (match_operand:SI 1 "register_operand" "=&r"))
4778 (clobber (reg:SI 26))
4779 (clobber (reg:SI 25))
4780 (clobber (reg:SI 2))]
4781 "TARGET_64BIT"
4782 "*
4783 return output_mod_insn (0, insn);"
4784 [(set_attr "type" "milli")
4785 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4786
4787 (define_expand "umodsi3"
4788 [(set (reg:SI 26) (match_operand:SI 1 "move_operand" ""))
4789 (set (reg:SI 25) (match_operand:SI 2 "move_operand" ""))
4790 (parallel [(set (reg:SI 29) (umod:SI (reg:SI 26) (reg:SI 25)))
4791 (clobber (match_dup 3))
4792 (clobber (match_dup 4))
4793 (clobber (reg:SI 26))
4794 (clobber (reg:SI 25))
4795 (clobber (match_dup 5))])
4796 (set (match_operand:SI 0 "general_operand" "") (reg:SI 29))]
4797 ""
4798 "
4799 {
4800 if (TARGET_64BIT)
4801 {
4802 operands[5] = gen_rtx_REG (SImode, 2);
4803 operands[4] = operands[5];
4804 }
4805 else
4806 {
4807 operands[5] = gen_rtx_REG (SImode, 31);
4808 operands[4] = gen_reg_rtx (SImode);
4809 }
4810 operands[3] = gen_reg_rtx (SImode);
4811 }")
4812
4813 (define_insn ""
4814 [(set (reg:SI 29) (umod:SI (reg:SI 26) (reg:SI 25)))
4815 (clobber (match_operand:SI 0 "register_operand" "=a"))
4816 (clobber (match_operand:SI 1 "register_operand" "=&r"))
4817 (clobber (reg:SI 26))
4818 (clobber (reg:SI 25))
4819 (clobber (reg:SI 31))]
4820 "!TARGET_64BIT"
4821 "*
4822 return output_mod_insn (1, insn);"
4823 [(set_attr "type" "milli")
4824 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4825
4826 (define_insn ""
4827 [(set (reg:SI 29) (umod:SI (reg:SI 26) (reg:SI 25)))
4828 (clobber (match_operand:SI 0 "register_operand" "=a"))
4829 (clobber (match_operand:SI 1 "register_operand" "=&r"))
4830 (clobber (reg:SI 26))
4831 (clobber (reg:SI 25))
4832 (clobber (reg:SI 2))]
4833 "TARGET_64BIT"
4834 "*
4835 return output_mod_insn (1, insn);"
4836 [(set_attr "type" "milli")
4837 (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
4838
4839 ;;- and instructions
4840 ;; We define DImode `and` so with DImode `not` we can get
4841 ;; DImode `andn`. Other combinations are possible.
4842
4843 (define_expand "anddi3"
4844 [(set (match_operand:DI 0 "register_operand" "")
4845 (and:DI (match_operand:DI 1 "arith_double_operand" "")
4846 (match_operand:DI 2 "arith_double_operand" "")))]
4847 ""
4848 "
4849 {
4850 if (! register_operand (operands[1], DImode)
4851 || ! register_operand (operands[2], DImode))
4852 /* Let GCC break this into word-at-a-time operations. */
4853 FAIL;
4854 }")
4855
4856 (define_insn ""
4857 [(set (match_operand:DI 0 "register_operand" "=r")
4858 (and:DI (match_operand:DI 1 "register_operand" "%r")
4859 (match_operand:DI 2 "register_operand" "r")))]
4860 "!TARGET_64BIT"
4861 "and %1,%2,%0\;and %R1,%R2,%R0"
4862 [(set_attr "type" "binary")
4863 (set_attr "length" "8")])
4864
4865 (define_insn ""
4866 [(set (match_operand:DI 0 "register_operand" "=r,r")
4867 (and:DI (match_operand:DI 1 "register_operand" "%?r,0")
4868 (match_operand:DI 2 "and_operand" "rO,P")))]
4869 "TARGET_64BIT"
4870 "* return output_64bit_and (operands); "
4871 [(set_attr "type" "binary")
4872 (set_attr "length" "4")])
4873
4874 ; The ? for op1 makes reload prefer zdepi instead of loading a huge
4875 ; constant with ldil;ldo.
4876 (define_insn "andsi3"
4877 [(set (match_operand:SI 0 "register_operand" "=r,r")
4878 (and:SI (match_operand:SI 1 "register_operand" "%?r,0")
4879 (match_operand:SI 2 "and_operand" "rO,P")))]
4880 ""
4881 "* return output_and (operands); "
4882 [(set_attr "type" "binary,shift")
4883 (set_attr "length" "4,4")])
4884
4885 (define_insn ""
4886 [(set (match_operand:DI 0 "register_operand" "=r")
4887 (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
4888 (match_operand:DI 2 "register_operand" "r")))]
4889 "!TARGET_64BIT"
4890 "andcm %2,%1,%0\;andcm %R2,%R1,%R0"
4891 [(set_attr "type" "binary")
4892 (set_attr "length" "8")])
4893
4894 (define_insn ""
4895 [(set (match_operand:DI 0 "register_operand" "=r")
4896 (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
4897 (match_operand:DI 2 "register_operand" "r")))]
4898 "TARGET_64BIT"
4899 "andcm %2,%1,%0"
4900 [(set_attr "type" "binary")
4901 (set_attr "length" "4")])
4902
4903 (define_insn ""
4904 [(set (match_operand:SI 0 "register_operand" "=r")
4905 (and:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
4906 (match_operand:SI 2 "register_operand" "r")))]
4907 ""
4908 "andcm %2,%1,%0"
4909 [(set_attr "type" "binary")
4910 (set_attr "length" "4")])
4911
4912 (define_expand "iordi3"
4913 [(set (match_operand:DI 0 "register_operand" "")
4914 (ior:DI (match_operand:DI 1 "arith_double_operand" "")
4915 (match_operand:DI 2 "arith_double_operand" "")))]
4916 ""
4917 "
4918 {
4919 if (! register_operand (operands[1], DImode)
4920 || ! register_operand (operands[2], DImode))
4921 /* Let GCC break this into word-at-a-time operations. */
4922 FAIL;
4923 }")
4924
4925 (define_insn ""
4926 [(set (match_operand:DI 0 "register_operand" "=r")
4927 (ior:DI (match_operand:DI 1 "register_operand" "%r")
4928 (match_operand:DI 2 "register_operand" "r")))]
4929 "!TARGET_64BIT"
4930 "or %1,%2,%0\;or %R1,%R2,%R0"
4931 [(set_attr "type" "binary")
4932 (set_attr "length" "8")])
4933
4934 (define_insn ""
4935 [(set (match_operand:DI 0 "register_operand" "=r,r")
4936 (ior:DI (match_operand:DI 1 "register_operand" "0,0")
4937 (match_operand:DI 2 "ior_operand" "M,i")))]
4938 "TARGET_64BIT"
4939 "* return output_64bit_ior (operands); "
4940 [(set_attr "type" "binary,shift")
4941 (set_attr "length" "4,4")])
4942
4943 (define_insn ""
4944 [(set (match_operand:DI 0 "register_operand" "=r")
4945 (ior:DI (match_operand:DI 1 "register_operand" "%r")
4946 (match_operand:DI 2 "register_operand" "r")))]
4947 "TARGET_64BIT"
4948 "or %1,%2,%0"
4949 [(set_attr "type" "binary")
4950 (set_attr "length" "4")])
4951
4952 ;; Need a define_expand because we've run out of CONST_OK... characters.
4953 (define_expand "iorsi3"
4954 [(set (match_operand:SI 0 "register_operand" "")
4955 (ior:SI (match_operand:SI 1 "register_operand" "")
4956 (match_operand:SI 2 "arith32_operand" "")))]
4957 ""
4958 "
4959 {
4960 if (! (ior_operand (operands[2], SImode)
4961 || register_operand (operands[2], SImode)))
4962 operands[2] = force_reg (SImode, operands[2]);
4963 }")
4964
4965 (define_insn ""
4966 [(set (match_operand:SI 0 "register_operand" "=r,r")
4967 (ior:SI (match_operand:SI 1 "register_operand" "0,0")
4968 (match_operand:SI 2 "ior_operand" "M,i")))]
4969 ""
4970 "* return output_ior (operands); "
4971 [(set_attr "type" "binary,shift")
4972 (set_attr "length" "4,4")])
4973
4974 (define_insn ""
4975 [(set (match_operand:SI 0 "register_operand" "=r")
4976 (ior:SI (match_operand:SI 1 "register_operand" "%r")
4977 (match_operand:SI 2 "register_operand" "r")))]
4978 ""
4979 "or %1,%2,%0"
4980 [(set_attr "type" "binary")
4981 (set_attr "length" "4")])
4982
4983 (define_expand "xordi3"
4984 [(set (match_operand:DI 0 "register_operand" "")
4985 (xor:DI (match_operand:DI 1 "arith_double_operand" "")
4986 (match_operand:DI 2 "arith_double_operand" "")))]
4987 ""
4988 "
4989 {
4990 if (! register_operand (operands[1], DImode)
4991 || ! register_operand (operands[2], DImode))
4992 /* Let GCC break this into word-at-a-time operations. */
4993 FAIL;
4994 }")
4995
4996 (define_insn ""
4997 [(set (match_operand:DI 0 "register_operand" "=r")
4998 (xor:DI (match_operand:DI 1 "register_operand" "%r")
4999 (match_operand:DI 2 "register_operand" "r")))]
5000 "!TARGET_64BIT"
5001 "xor %1,%2,%0\;xor %R1,%R2,%R0"
5002 [(set_attr "type" "binary")
5003 (set_attr "length" "8")])
5004
5005 (define_insn ""
5006 [(set (match_operand:DI 0 "register_operand" "=r")
5007 (xor:DI (match_operand:DI 1 "register_operand" "%r")
5008 (match_operand:DI 2 "register_operand" "r")))]
5009 "TARGET_64BIT"
5010 "xor %1,%2,%0"
5011 [(set_attr "type" "binary")
5012 (set_attr "length" "4")])
5013
5014 (define_insn "xorsi3"
5015 [(set (match_operand:SI 0 "register_operand" "=r")
5016 (xor:SI (match_operand:SI 1 "register_operand" "%r")
5017 (match_operand:SI 2 "register_operand" "r")))]
5018 ""
5019 "xor %1,%2,%0"
5020 [(set_attr "type" "binary")
5021 (set_attr "length" "4")])
5022
5023 (define_expand "negdi2"
5024 [(set (match_operand:DI 0 "register_operand" "")
5025 (neg:DI (match_operand:DI 1 "register_operand" "")))]
5026 ""
5027 "")
5028
5029 (define_insn ""
5030 [(set (match_operand:DI 0 "register_operand" "=r")
5031 (neg:DI (match_operand:DI 1 "register_operand" "r")))]
5032 "!TARGET_64BIT"
5033 "sub %%r0,%R1,%R0\;{subb|sub,b} %%r0,%1,%0"
5034 [(set_attr "type" "unary")
5035 (set_attr "length" "8")])
5036
5037 (define_insn ""
5038 [(set (match_operand:DI 0 "register_operand" "=r")
5039 (neg:DI (match_operand:DI 1 "register_operand" "r")))]
5040 "TARGET_64BIT"
5041 "sub %%r0,%1,%0"
5042 [(set_attr "type" "unary")
5043 (set_attr "length" "4")])
5044
5045 (define_insn "negsi2"
5046 [(set (match_operand:SI 0 "register_operand" "=r")
5047 (neg:SI (match_operand:SI 1 "register_operand" "r")))]
5048 ""
5049 "sub %%r0,%1,%0"
5050 [(set_attr "type" "unary")
5051 (set_attr "length" "4")])
5052
5053 (define_expand "one_cmpldi2"
5054 [(set (match_operand:DI 0 "register_operand" "")
5055 (not:DI (match_operand:DI 1 "arith_double_operand" "")))]
5056 ""
5057 "
5058 {
5059 if (! register_operand (operands[1], DImode))
5060 FAIL;
5061 }")
5062
5063 (define_insn ""
5064 [(set (match_operand:DI 0 "register_operand" "=r")
5065 (not:DI (match_operand:DI 1 "register_operand" "r")))]
5066 "!TARGET_64BIT"
5067 "uaddcm %%r0,%1,%0\;uaddcm %%r0,%R1,%R0"
5068 [(set_attr "type" "unary")
5069 (set_attr "length" "8")])
5070
5071 (define_insn ""
5072 [(set (match_operand:DI 0 "register_operand" "=r")
5073 (not:DI (match_operand:DI 1 "register_operand" "r")))]
5074 "TARGET_64BIT"
5075 "uaddcm %%r0,%1,%0"
5076 [(set_attr "type" "unary")
5077 (set_attr "length" "4")])
5078
5079 (define_insn "one_cmplsi2"
5080 [(set (match_operand:SI 0 "register_operand" "=r")
5081 (not:SI (match_operand:SI 1 "register_operand" "r")))]
5082 ""
5083 "uaddcm %%r0,%1,%0"
5084 [(set_attr "type" "unary")
5085 (set_attr "length" "4")])
5086 \f
5087 ;; Floating point arithmetic instructions.
5088
5089 (define_insn "adddf3"
5090 [(set (match_operand:DF 0 "register_operand" "=f")
5091 (plus:DF (match_operand:DF 1 "register_operand" "f")
5092 (match_operand:DF 2 "register_operand" "f")))]
5093 "! TARGET_SOFT_FLOAT"
5094 "fadd,dbl %1,%2,%0"
5095 [(set_attr "type" "fpalu")
5096 (set_attr "pa_combine_type" "faddsub")
5097 (set_attr "length" "4")])
5098
5099 (define_insn "addsf3"
5100 [(set (match_operand:SF 0 "register_operand" "=f")
5101 (plus:SF (match_operand:SF 1 "register_operand" "f")
5102 (match_operand:SF 2 "register_operand" "f")))]
5103 "! TARGET_SOFT_FLOAT"
5104 "fadd,sgl %1,%2,%0"
5105 [(set_attr "type" "fpalu")
5106 (set_attr "pa_combine_type" "faddsub")
5107 (set_attr "length" "4")])
5108
5109 (define_insn "subdf3"
5110 [(set (match_operand:DF 0 "register_operand" "=f")
5111 (minus:DF (match_operand:DF 1 "register_operand" "f")
5112 (match_operand:DF 2 "register_operand" "f")))]
5113 "! TARGET_SOFT_FLOAT"
5114 "fsub,dbl %1,%2,%0"
5115 [(set_attr "type" "fpalu")
5116 (set_attr "pa_combine_type" "faddsub")
5117 (set_attr "length" "4")])
5118
5119 (define_insn "subsf3"
5120 [(set (match_operand:SF 0 "register_operand" "=f")
5121 (minus:SF (match_operand:SF 1 "register_operand" "f")
5122 (match_operand:SF 2 "register_operand" "f")))]
5123 "! TARGET_SOFT_FLOAT"
5124 "fsub,sgl %1,%2,%0"
5125 [(set_attr "type" "fpalu")
5126 (set_attr "pa_combine_type" "faddsub")
5127 (set_attr "length" "4")])
5128
5129 (define_insn "muldf3"
5130 [(set (match_operand:DF 0 "register_operand" "=f")
5131 (mult:DF (match_operand:DF 1 "register_operand" "f")
5132 (match_operand:DF 2 "register_operand" "f")))]
5133 "! TARGET_SOFT_FLOAT"
5134 "fmpy,dbl %1,%2,%0"
5135 [(set_attr "type" "fpmuldbl")
5136 (set_attr "pa_combine_type" "fmpy")
5137 (set_attr "length" "4")])
5138
5139 (define_insn "mulsf3"
5140 [(set (match_operand:SF 0 "register_operand" "=f")
5141 (mult:SF (match_operand:SF 1 "register_operand" "f")
5142 (match_operand:SF 2 "register_operand" "f")))]
5143 "! TARGET_SOFT_FLOAT"
5144 "fmpy,sgl %1,%2,%0"
5145 [(set_attr "type" "fpmulsgl")
5146 (set_attr "pa_combine_type" "fmpy")
5147 (set_attr "length" "4")])
5148
5149 (define_insn "divdf3"
5150 [(set (match_operand:DF 0 "register_operand" "=f")
5151 (div:DF (match_operand:DF 1 "register_operand" "f")
5152 (match_operand:DF 2 "register_operand" "f")))]
5153 "! TARGET_SOFT_FLOAT"
5154 "fdiv,dbl %1,%2,%0"
5155 [(set_attr "type" "fpdivdbl")
5156 (set_attr "length" "4")])
5157
5158 (define_insn "divsf3"
5159 [(set (match_operand:SF 0 "register_operand" "=f")
5160 (div:SF (match_operand:SF 1 "register_operand" "f")
5161 (match_operand:SF 2 "register_operand" "f")))]
5162 "! TARGET_SOFT_FLOAT"
5163 "fdiv,sgl %1,%2,%0"
5164 [(set_attr "type" "fpdivsgl")
5165 (set_attr "length" "4")])
5166
5167 ;; Processors prior to PA 2.0 don't have a fneg instruction. Fast
5168 ;; negation can be done by subtracting from plus zero. However, this
5169 ;; violates the IEEE standard when negating plus and minus zero.
5170 (define_expand "negdf2"
5171 [(parallel [(set (match_operand:DF 0 "register_operand" "")
5172 (neg:DF (match_operand:DF 1 "register_operand" "")))
5173 (use (match_dup 2))])]
5174 "! TARGET_SOFT_FLOAT"
5175 {
5176 if (TARGET_PA_20 || flag_unsafe_math_optimizations)
5177 emit_insn (gen_negdf2_fast (operands[0], operands[1]));
5178 else
5179 {
5180 operands[2] = force_reg (DFmode,
5181 CONST_DOUBLE_FROM_REAL_VALUE (dconstm1, DFmode));
5182 emit_insn (gen_muldf3 (operands[0], operands[1], operands[2]));
5183 }
5184 DONE;
5185 })
5186
5187 (define_insn "negdf2_fast"
5188 [(set (match_operand:DF 0 "register_operand" "=f")
5189 (neg:DF (match_operand:DF 1 "register_operand" "f")))]
5190 "! TARGET_SOFT_FLOAT && (TARGET_PA_20 || flag_unsafe_math_optimizations)"
5191 "*
5192 {
5193 if (TARGET_PA_20)
5194 return \"fneg,dbl %1,%0\";
5195 else
5196 return \"fsub,dbl %%fr0,%1,%0\";
5197 }"
5198 [(set_attr "type" "fpalu")
5199 (set_attr "length" "4")])
5200
5201 (define_expand "negsf2"
5202 [(parallel [(set (match_operand:SF 0 "register_operand" "")
5203 (neg:SF (match_operand:SF 1 "register_operand" "")))
5204 (use (match_dup 2))])]
5205 "! TARGET_SOFT_FLOAT"
5206 {
5207 if (TARGET_PA_20 || flag_unsafe_math_optimizations)
5208 emit_insn (gen_negsf2_fast (operands[0], operands[1]));
5209 else
5210 {
5211 operands[2] = force_reg (SFmode,
5212 CONST_DOUBLE_FROM_REAL_VALUE (dconstm1, SFmode));
5213 emit_insn (gen_mulsf3 (operands[0], operands[1], operands[2]));
5214 }
5215 DONE;
5216 })
5217
5218 (define_insn "negsf2_fast"
5219 [(set (match_operand:SF 0 "register_operand" "=f")
5220 (neg:SF (match_operand:SF 1 "register_operand" "f")))]
5221 "! TARGET_SOFT_FLOAT && (TARGET_PA_20 || flag_unsafe_math_optimizations)"
5222 "*
5223 {
5224 if (TARGET_PA_20)
5225 return \"fneg,sgl %1,%0\";
5226 else
5227 return \"fsub,sgl %%fr0,%1,%0\";
5228 }"
5229 [(set_attr "type" "fpalu")
5230 (set_attr "length" "4")])
5231
5232 (define_insn "absdf2"
5233 [(set (match_operand:DF 0 "register_operand" "=f")
5234 (abs:DF (match_operand:DF 1 "register_operand" "f")))]
5235 "! TARGET_SOFT_FLOAT"
5236 "fabs,dbl %1,%0"
5237 [(set_attr "type" "fpalu")
5238 (set_attr "length" "4")])
5239
5240 (define_insn "abssf2"
5241 [(set (match_operand:SF 0 "register_operand" "=f")
5242 (abs:SF (match_operand:SF 1 "register_operand" "f")))]
5243 "! TARGET_SOFT_FLOAT"
5244 "fabs,sgl %1,%0"
5245 [(set_attr "type" "fpalu")
5246 (set_attr "length" "4")])
5247
5248 (define_insn "sqrtdf2"
5249 [(set (match_operand:DF 0 "register_operand" "=f")
5250 (sqrt:DF (match_operand:DF 1 "register_operand" "f")))]
5251 "! TARGET_SOFT_FLOAT"
5252 "fsqrt,dbl %1,%0"
5253 [(set_attr "type" "fpsqrtdbl")
5254 (set_attr "length" "4")])
5255
5256 (define_insn "sqrtsf2"
5257 [(set (match_operand:SF 0 "register_operand" "=f")
5258 (sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
5259 "! TARGET_SOFT_FLOAT"
5260 "fsqrt,sgl %1,%0"
5261 [(set_attr "type" "fpsqrtsgl")
5262 (set_attr "length" "4")])
5263
5264 ;; PA 2.0 floating point instructions
5265
5266 ; fmpyfadd patterns
5267 (define_insn ""
5268 [(set (match_operand:DF 0 "register_operand" "=f")
5269 (plus:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
5270 (match_operand:DF 2 "register_operand" "f"))
5271 (match_operand:DF 3 "register_operand" "f")))]
5272 "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
5273 "fmpyfadd,dbl %1,%2,%3,%0"
5274 [(set_attr "type" "fpmuldbl")
5275 (set_attr "length" "4")])
5276
5277 (define_insn ""
5278 [(set (match_operand:DF 0 "register_operand" "=f")
5279 (plus:DF (match_operand:DF 1 "register_operand" "f")
5280 (mult:DF (match_operand:DF 2 "register_operand" "f")
5281 (match_operand:DF 3 "register_operand" "f"))))]
5282 "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
5283 "fmpyfadd,dbl %2,%3,%1,%0"
5284 [(set_attr "type" "fpmuldbl")
5285 (set_attr "length" "4")])
5286
5287 (define_insn ""
5288 [(set (match_operand:SF 0 "register_operand" "=f")
5289 (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
5290 (match_operand:SF 2 "register_operand" "f"))
5291 (match_operand:SF 3 "register_operand" "f")))]
5292 "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
5293 "fmpyfadd,sgl %1,%2,%3,%0"
5294 [(set_attr "type" "fpmulsgl")
5295 (set_attr "length" "4")])
5296
5297 (define_insn ""
5298 [(set (match_operand:SF 0 "register_operand" "=f")
5299 (plus:SF (match_operand:SF 1 "register_operand" "f")
5300 (mult:SF (match_operand:SF 2 "register_operand" "f")
5301 (match_operand:SF 3 "register_operand" "f"))))]
5302 "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
5303 "fmpyfadd,sgl %2,%3,%1,%0"
5304 [(set_attr "type" "fpmulsgl")
5305 (set_attr "length" "4")])
5306
5307 ; fmpynfadd patterns
5308 (define_insn ""
5309 [(set (match_operand:DF 0 "register_operand" "=f")
5310 (minus:DF (match_operand:DF 1 "register_operand" "f")
5311 (mult:DF (match_operand:DF 2 "register_operand" "f")
5312 (match_operand:DF 3 "register_operand" "f"))))]
5313 "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
5314 "fmpynfadd,dbl %2,%3,%1,%0"
5315 [(set_attr "type" "fpmuldbl")
5316 (set_attr "length" "4")])
5317
5318 (define_insn ""
5319 [(set (match_operand:SF 0 "register_operand" "=f")
5320 (minus:SF (match_operand:SF 1 "register_operand" "f")
5321 (mult:SF (match_operand:SF 2 "register_operand" "f")
5322 (match_operand:SF 3 "register_operand" "f"))))]
5323 "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
5324 "fmpynfadd,sgl %2,%3,%1,%0"
5325 [(set_attr "type" "fpmulsgl")
5326 (set_attr "length" "4")])
5327
5328 ; fnegabs patterns
5329 (define_insn ""
5330 [(set (match_operand:DF 0 "register_operand" "=f")
5331 (neg:DF (abs:DF (match_operand:DF 1 "register_operand" "f"))))]
5332 "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
5333 "fnegabs,dbl %1,%0"
5334 [(set_attr "type" "fpalu")
5335 (set_attr "length" "4")])
5336
5337 (define_insn ""
5338 [(set (match_operand:SF 0 "register_operand" "=f")
5339 (neg:SF (abs:SF (match_operand:SF 1 "register_operand" "f"))))]
5340 "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
5341 "fnegabs,sgl %1,%0"
5342 [(set_attr "type" "fpalu")
5343 (set_attr "length" "4")])
5344
5345 ;; Generating a fused multiply sequence is a win for this case as it will
5346 ;; reduce the latency for the fused case without impacting the plain
5347 ;; multiply case.
5348 ;;
5349 ;; Similar possibilities exist for fnegabs, shadd and other insns which
5350 ;; perform two operations with the result of the first feeding the second.
5351 (define_insn ""
5352 [(set (match_operand:DF 0 "register_operand" "=f")
5353 (plus:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
5354 (match_operand:DF 2 "register_operand" "f"))
5355 (match_operand:DF 3 "register_operand" "f")))
5356 (set (match_operand:DF 4 "register_operand" "=&f")
5357 (mult:DF (match_dup 1) (match_dup 2)))]
5358 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5359 && ! (reg_overlap_mentioned_p (operands[4], operands[1])
5360 || reg_overlap_mentioned_p (operands[4], operands[2])))"
5361 "#"
5362 [(set_attr "type" "fpmuldbl")
5363 (set_attr "length" "8")])
5364
5365 ;; We want to split this up during scheduling since we want both insns
5366 ;; to schedule independently.
5367 (define_split
5368 [(set (match_operand:DF 0 "register_operand" "")
5369 (plus:DF (mult:DF (match_operand:DF 1 "register_operand" "")
5370 (match_operand:DF 2 "register_operand" ""))
5371 (match_operand:DF 3 "register_operand" "")))
5372 (set (match_operand:DF 4 "register_operand" "")
5373 (mult:DF (match_dup 1) (match_dup 2)))]
5374 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5375 [(set (match_dup 4) (mult:DF (match_dup 1) (match_dup 2)))
5376 (set (match_dup 0) (plus:DF (mult:DF (match_dup 1) (match_dup 2))
5377 (match_dup 3)))]
5378 "")
5379
5380 (define_insn ""
5381 [(set (match_operand:SF 0 "register_operand" "=f")
5382 (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
5383 (match_operand:SF 2 "register_operand" "f"))
5384 (match_operand:SF 3 "register_operand" "f")))
5385 (set (match_operand:SF 4 "register_operand" "=&f")
5386 (mult:SF (match_dup 1) (match_dup 2)))]
5387 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5388 && ! (reg_overlap_mentioned_p (operands[4], operands[1])
5389 || reg_overlap_mentioned_p (operands[4], operands[2])))"
5390 "#"
5391 [(set_attr "type" "fpmuldbl")
5392 (set_attr "length" "8")])
5393
5394 ;; We want to split this up during scheduling since we want both insns
5395 ;; to schedule independently.
5396 (define_split
5397 [(set (match_operand:SF 0 "register_operand" "")
5398 (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "")
5399 (match_operand:SF 2 "register_operand" ""))
5400 (match_operand:SF 3 "register_operand" "")))
5401 (set (match_operand:SF 4 "register_operand" "")
5402 (mult:SF (match_dup 1) (match_dup 2)))]
5403 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5404 [(set (match_dup 4) (mult:SF (match_dup 1) (match_dup 2)))
5405 (set (match_dup 0) (plus:SF (mult:SF (match_dup 1) (match_dup 2))
5406 (match_dup 3)))]
5407 "")
5408
5409 ;; Negating a multiply can be faked by adding zero in a fused multiply-add
5410 ;; instruction.
5411 (define_insn ""
5412 [(set (match_operand:DF 0 "register_operand" "=f")
5413 (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
5414 (match_operand:DF 2 "register_operand" "f"))))]
5415 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5416 "fmpynfadd,dbl %1,%2,%%fr0,%0"
5417 [(set_attr "type" "fpmuldbl")
5418 (set_attr "length" "4")])
5419
5420 (define_insn ""
5421 [(set (match_operand:SF 0 "register_operand" "=f")
5422 (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
5423 (match_operand:SF 2 "register_operand" "f"))))]
5424 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5425 "fmpynfadd,sgl %1,%2,%%fr0,%0"
5426 [(set_attr "type" "fpmuldbl")
5427 (set_attr "length" "4")])
5428
5429 (define_insn ""
5430 [(set (match_operand:DF 0 "register_operand" "=f")
5431 (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
5432 (match_operand:DF 2 "register_operand" "f"))))
5433 (set (match_operand:DF 3 "register_operand" "=&f")
5434 (mult:DF (match_dup 1) (match_dup 2)))]
5435 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5436 && ! (reg_overlap_mentioned_p (operands[3], operands[1])
5437 || reg_overlap_mentioned_p (operands[3], operands[2])))"
5438 "#"
5439 [(set_attr "type" "fpmuldbl")
5440 (set_attr "length" "8")])
5441
5442 (define_split
5443 [(set (match_operand:DF 0 "register_operand" "")
5444 (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "")
5445 (match_operand:DF 2 "register_operand" ""))))
5446 (set (match_operand:DF 3 "register_operand" "")
5447 (mult:DF (match_dup 1) (match_dup 2)))]
5448 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5449 [(set (match_dup 3) (mult:DF (match_dup 1) (match_dup 2)))
5450 (set (match_dup 0) (neg:DF (mult:DF (match_dup 1) (match_dup 2))))]
5451 "")
5452
5453 (define_insn ""
5454 [(set (match_operand:SF 0 "register_operand" "=f")
5455 (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
5456 (match_operand:SF 2 "register_operand" "f"))))
5457 (set (match_operand:SF 3 "register_operand" "=&f")
5458 (mult:SF (match_dup 1) (match_dup 2)))]
5459 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5460 && ! (reg_overlap_mentioned_p (operands[3], operands[1])
5461 || reg_overlap_mentioned_p (operands[3], operands[2])))"
5462 "#"
5463 [(set_attr "type" "fpmuldbl")
5464 (set_attr "length" "8")])
5465
5466 (define_split
5467 [(set (match_operand:SF 0 "register_operand" "")
5468 (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "")
5469 (match_operand:SF 2 "register_operand" ""))))
5470 (set (match_operand:SF 3 "register_operand" "")
5471 (mult:SF (match_dup 1) (match_dup 2)))]
5472 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5473 [(set (match_dup 3) (mult:SF (match_dup 1) (match_dup 2)))
5474 (set (match_dup 0) (neg:SF (mult:SF (match_dup 1) (match_dup 2))))]
5475 "")
5476
5477 ;; Now fused multiplies with the result of the multiply negated.
5478 (define_insn ""
5479 [(set (match_operand:DF 0 "register_operand" "=f")
5480 (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
5481 (match_operand:DF 2 "register_operand" "f")))
5482 (match_operand:DF 3 "register_operand" "f")))]
5483 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5484 "fmpynfadd,dbl %1,%2,%3,%0"
5485 [(set_attr "type" "fpmuldbl")
5486 (set_attr "length" "4")])
5487
5488 (define_insn ""
5489 [(set (match_operand:SF 0 "register_operand" "=f")
5490 (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
5491 (match_operand:SF 2 "register_operand" "f")))
5492 (match_operand:SF 3 "register_operand" "f")))]
5493 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5494 "fmpynfadd,sgl %1,%2,%3,%0"
5495 [(set_attr "type" "fpmuldbl")
5496 (set_attr "length" "4")])
5497
5498 (define_insn ""
5499 [(set (match_operand:DF 0 "register_operand" "=f")
5500 (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
5501 (match_operand:DF 2 "register_operand" "f")))
5502 (match_operand:DF 3 "register_operand" "f")))
5503 (set (match_operand:DF 4 "register_operand" "=&f")
5504 (mult:DF (match_dup 1) (match_dup 2)))]
5505 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5506 && ! (reg_overlap_mentioned_p (operands[4], operands[1])
5507 || reg_overlap_mentioned_p (operands[4], operands[2])))"
5508 "#"
5509 [(set_attr "type" "fpmuldbl")
5510 (set_attr "length" "8")])
5511
5512 (define_split
5513 [(set (match_operand:DF 0 "register_operand" "")
5514 (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "")
5515 (match_operand:DF 2 "register_operand" "")))
5516 (match_operand:DF 3 "register_operand" "")))
5517 (set (match_operand:DF 4 "register_operand" "")
5518 (mult:DF (match_dup 1) (match_dup 2)))]
5519 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5520 [(set (match_dup 4) (mult:DF (match_dup 1) (match_dup 2)))
5521 (set (match_dup 0) (plus:DF (neg:DF (mult:DF (match_dup 1) (match_dup 2)))
5522 (match_dup 3)))]
5523 "")
5524
5525 (define_insn ""
5526 [(set (match_operand:SF 0 "register_operand" "=f")
5527 (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
5528 (match_operand:SF 2 "register_operand" "f")))
5529 (match_operand:SF 3 "register_operand" "f")))
5530 (set (match_operand:SF 4 "register_operand" "=&f")
5531 (mult:SF (match_dup 1) (match_dup 2)))]
5532 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5533 && ! (reg_overlap_mentioned_p (operands[4], operands[1])
5534 || reg_overlap_mentioned_p (operands[4], operands[2])))"
5535 "#"
5536 [(set_attr "type" "fpmuldbl")
5537 (set_attr "length" "8")])
5538
5539 (define_split
5540 [(set (match_operand:SF 0 "register_operand" "")
5541 (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "")
5542 (match_operand:SF 2 "register_operand" "")))
5543 (match_operand:SF 3 "register_operand" "")))
5544 (set (match_operand:SF 4 "register_operand" "")
5545 (mult:SF (match_dup 1) (match_dup 2)))]
5546 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5547 [(set (match_dup 4) (mult:SF (match_dup 1) (match_dup 2)))
5548 (set (match_dup 0) (plus:SF (neg:SF (mult:SF (match_dup 1) (match_dup 2)))
5549 (match_dup 3)))]
5550 "")
5551
5552 (define_insn ""
5553 [(set (match_operand:DF 0 "register_operand" "=f")
5554 (minus:DF (match_operand:DF 3 "register_operand" "f")
5555 (mult:DF (match_operand:DF 1 "register_operand" "f")
5556 (match_operand:DF 2 "register_operand" "f"))))
5557 (set (match_operand:DF 4 "register_operand" "=&f")
5558 (mult:DF (match_dup 1) (match_dup 2)))]
5559 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5560 && ! (reg_overlap_mentioned_p (operands[4], operands[1])
5561 || reg_overlap_mentioned_p (operands[4], operands[2])))"
5562 "#"
5563 [(set_attr "type" "fpmuldbl")
5564 (set_attr "length" "8")])
5565
5566 (define_split
5567 [(set (match_operand:DF 0 "register_operand" "")
5568 (minus:DF (match_operand:DF 3 "register_operand" "")
5569 (mult:DF (match_operand:DF 1 "register_operand" "")
5570 (match_operand:DF 2 "register_operand" ""))))
5571 (set (match_operand:DF 4 "register_operand" "")
5572 (mult:DF (match_dup 1) (match_dup 2)))]
5573 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5574 [(set (match_dup 4) (mult:DF (match_dup 1) (match_dup 2)))
5575 (set (match_dup 0) (minus:DF (match_dup 3)
5576 (mult:DF (match_dup 1) (match_dup 2))))]
5577 "")
5578
5579 (define_insn ""
5580 [(set (match_operand:SF 0 "register_operand" "=f")
5581 (minus:SF (match_operand:SF 3 "register_operand" "f")
5582 (mult:SF (match_operand:SF 1 "register_operand" "f")
5583 (match_operand:SF 2 "register_operand" "f"))))
5584 (set (match_operand:SF 4 "register_operand" "=&f")
5585 (mult:SF (match_dup 1) (match_dup 2)))]
5586 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5587 && ! (reg_overlap_mentioned_p (operands[4], operands[1])
5588 || reg_overlap_mentioned_p (operands[4], operands[2])))"
5589 "#"
5590 [(set_attr "type" "fpmuldbl")
5591 (set_attr "length" "8")])
5592
5593 (define_split
5594 [(set (match_operand:SF 0 "register_operand" "")
5595 (minus:SF (match_operand:SF 3 "register_operand" "")
5596 (mult:SF (match_operand:SF 1 "register_operand" "")
5597 (match_operand:SF 2 "register_operand" ""))))
5598 (set (match_operand:SF 4 "register_operand" "")
5599 (mult:SF (match_dup 1) (match_dup 2)))]
5600 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5601 [(set (match_dup 4) (mult:SF (match_dup 1) (match_dup 2)))
5602 (set (match_dup 0) (minus:SF (match_dup 3)
5603 (mult:SF (match_dup 1) (match_dup 2))))]
5604 "")
5605
5606 (define_insn ""
5607 [(set (match_operand:DF 0 "register_operand" "=f")
5608 (neg:DF (abs:DF (match_operand:DF 1 "register_operand" "f"))))
5609 (set (match_operand:DF 2 "register_operand" "=&f") (abs:DF (match_dup 1)))]
5610 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5611 && ! reg_overlap_mentioned_p (operands[2], operands[1]))"
5612 "#"
5613 [(set_attr "type" "fpalu")
5614 (set_attr "length" "8")])
5615
5616 (define_split
5617 [(set (match_operand:DF 0 "register_operand" "")
5618 (neg:DF (abs:DF (match_operand:DF 1 "register_operand" ""))))
5619 (set (match_operand:DF 2 "register_operand" "") (abs:DF (match_dup 1)))]
5620 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5621 [(set (match_dup 2) (abs:DF (match_dup 1)))
5622 (set (match_dup 0) (neg:DF (abs:DF (match_dup 1))))]
5623 "")
5624
5625 (define_insn ""
5626 [(set (match_operand:SF 0 "register_operand" "=f")
5627 (neg:SF (abs:SF (match_operand:SF 1 "register_operand" "f"))))
5628 (set (match_operand:SF 2 "register_operand" "=&f") (abs:SF (match_dup 1)))]
5629 "(! TARGET_SOFT_FLOAT && TARGET_PA_20
5630 && ! reg_overlap_mentioned_p (operands[2], operands[1]))"
5631 "#"
5632 [(set_attr "type" "fpalu")
5633 (set_attr "length" "8")])
5634
5635 (define_split
5636 [(set (match_operand:SF 0 "register_operand" "")
5637 (neg:SF (abs:SF (match_operand:SF 1 "register_operand" ""))))
5638 (set (match_operand:SF 2 "register_operand" "") (abs:SF (match_dup 1)))]
5639 "! TARGET_SOFT_FLOAT && TARGET_PA_20"
5640 [(set (match_dup 2) (abs:SF (match_dup 1)))
5641 (set (match_dup 0) (neg:SF (abs:SF (match_dup 1))))]
5642 "")
5643 \f
5644 ;;- Shift instructions
5645
5646 ;; Optimized special case of shifting.
5647
5648 (define_insn ""
5649 [(set (match_operand:SI 0 "register_operand" "=r")
5650 (lshiftrt:SI (match_operand:SI 1 "memory_operand" "m")
5651 (const_int 24)))]
5652 ""
5653 "ldb%M1 %1,%0"
5654 [(set_attr "type" "load")
5655 (set_attr "length" "4")])
5656
5657 (define_insn ""
5658 [(set (match_operand:SI 0 "register_operand" "=r")
5659 (lshiftrt:SI (match_operand:SI 1 "memory_operand" "m")
5660 (const_int 16)))]
5661 ""
5662 "ldh%M1 %1,%0"
5663 [(set_attr "type" "load")
5664 (set_attr "length" "4")])
5665
5666 (define_insn ""
5667 [(set (match_operand:SI 0 "register_operand" "=r")
5668 (plus:SI (mult:SI (match_operand:SI 2 "register_operand" "r")
5669 (match_operand:SI 3 "shadd_operand" ""))
5670 (match_operand:SI 1 "register_operand" "r")))]
5671 ""
5672 "{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0} "
5673 [(set_attr "type" "binary")
5674 (set_attr "length" "4")])
5675
5676 (define_insn ""
5677 [(set (match_operand:DI 0 "register_operand" "=r")
5678 (plus:DI (mult:DI (match_operand:DI 2 "register_operand" "r")
5679 (match_operand:DI 3 "shadd_operand" ""))
5680 (match_operand:DI 1 "register_operand" "r")))]
5681 "TARGET_64BIT"
5682 "shladd,l %2,%O3,%1,%0"
5683 [(set_attr "type" "binary")
5684 (set_attr "length" "4")])
5685
5686 (define_expand "ashlsi3"
5687 [(set (match_operand:SI 0 "register_operand" "")
5688 (ashift:SI (match_operand:SI 1 "lhs_lshift_operand" "")
5689 (match_operand:SI 2 "arith32_operand" "")))]
5690 ""
5691 "
5692 {
5693 if (GET_CODE (operands[2]) != CONST_INT)
5694 {
5695 rtx temp = gen_reg_rtx (SImode);
5696 emit_insn (gen_subsi3 (temp, GEN_INT (31), operands[2]));
5697 if (GET_CODE (operands[1]) == CONST_INT)
5698 emit_insn (gen_zvdep_imm32 (operands[0], operands[1], temp));
5699 else
5700 emit_insn (gen_zvdep32 (operands[0], operands[1], temp));
5701 DONE;
5702 }
5703 /* Make sure both inputs are not constants,
5704 there are no patterns for that. */
5705 operands[1] = force_reg (SImode, operands[1]);
5706 }")
5707
5708 (define_insn ""
5709 [(set (match_operand:SI 0 "register_operand" "=r")
5710 (ashift:SI (match_operand:SI 1 "register_operand" "r")
5711 (match_operand:SI 2 "const_int_operand" "n")))]
5712 ""
5713 "{zdep|depw,z} %1,%P2,%L2,%0"
5714 [(set_attr "type" "shift")
5715 (set_attr "length" "4")])
5716
5717 ; Match cases of op1 a CONST_INT here that zvdep_imm32 doesn't handle.
5718 ; Doing it like this makes slightly better code since reload can
5719 ; replace a register with a known value in range -16..15 with a
5720 ; constant. Ideally, we would like to merge zvdep32 and zvdep_imm32,
5721 ; but since we have no more CONST_OK... characters, that is not
5722 ; possible.
5723 (define_insn "zvdep32"
5724 [(set (match_operand:SI 0 "register_operand" "=r,r")
5725 (ashift:SI (match_operand:SI 1 "arith5_operand" "r,L")
5726 (minus:SI (const_int 31)
5727 (match_operand:SI 2 "register_operand" "q,q"))))]
5728 ""
5729 "@
5730 {zvdep %1,32,%0|depw,z %1,%%sar,32,%0}
5731 {zvdepi %1,32,%0|depwi,z %1,%%sar,32,%0}"
5732 [(set_attr "type" "shift,shift")
5733 (set_attr "length" "4,4")])
5734
5735 (define_insn "zvdep_imm32"
5736 [(set (match_operand:SI 0 "register_operand" "=r")
5737 (ashift:SI (match_operand:SI 1 "lhs_lshift_cint_operand" "")
5738 (minus:SI (const_int 31)
5739 (match_operand:SI 2 "register_operand" "q"))))]
5740 ""
5741 "*
5742 {
5743 int x = INTVAL (operands[1]);
5744 operands[2] = GEN_INT (4 + exact_log2 ((x >> 4) + 1));
5745 operands[1] = GEN_INT ((x & 0xf) - 0x10);
5746 return \"{zvdepi %1,%2,%0|depwi,z %1,%%sar,%2,%0}\";
5747 }"
5748 [(set_attr "type" "shift")
5749 (set_attr "length" "4")])
5750
5751 (define_insn "vdepi_ior"
5752 [(set (match_operand:SI 0 "register_operand" "=r")
5753 (ior:SI (ashift:SI (match_operand:SI 1 "const_int_operand" "")
5754 (minus:SI (const_int 31)
5755 (match_operand:SI 2 "register_operand" "q")))
5756 (match_operand:SI 3 "register_operand" "0")))]
5757 ; accept ...0001...1, can this be generalized?
5758 "exact_log2 (INTVAL (operands[1]) + 1) >= 0"
5759 "*
5760 {
5761 int x = INTVAL (operands[1]);
5762 operands[2] = GEN_INT (exact_log2 (x + 1));
5763 return \"{vdepi -1,%2,%0|depwi -1,%%sar,%2,%0}\";
5764 }"
5765 [(set_attr "type" "shift")
5766 (set_attr "length" "4")])
5767
5768 (define_insn "vdepi_and"
5769 [(set (match_operand:SI 0 "register_operand" "=r")
5770 (and:SI (rotate:SI (match_operand:SI 1 "const_int_operand" "")
5771 (minus:SI (const_int 31)
5772 (match_operand:SI 2 "register_operand" "q")))
5773 (match_operand:SI 3 "register_operand" "0")))]
5774 ; this can be generalized...!
5775 "INTVAL (operands[1]) == -2"
5776 "*
5777 {
5778 int x = INTVAL (operands[1]);
5779 operands[2] = GEN_INT (exact_log2 ((~x) + 1));
5780 return \"{vdepi 0,%2,%0|depwi 0,%%sar,%2,%0}\";
5781 }"
5782 [(set_attr "type" "shift")
5783 (set_attr "length" "4")])
5784
5785 (define_expand "ashldi3"
5786 [(set (match_operand:DI 0 "register_operand" "")
5787 (ashift:DI (match_operand:DI 1 "lhs_lshift_operand" "")
5788 (match_operand:DI 2 "arith32_operand" "")))]
5789 "TARGET_64BIT"
5790 "
5791 {
5792 if (GET_CODE (operands[2]) != CONST_INT)
5793 {
5794 rtx temp = gen_reg_rtx (DImode);
5795 emit_insn (gen_subdi3 (temp, GEN_INT (63), operands[2]));
5796 if (GET_CODE (operands[1]) == CONST_INT)
5797 emit_insn (gen_zvdep_imm64 (operands[0], operands[1], temp));
5798 else
5799 emit_insn (gen_zvdep64 (operands[0], operands[1], temp));
5800 DONE;
5801 }
5802 /* Make sure both inputs are not constants,
5803 there are no patterns for that. */
5804 operands[1] = force_reg (DImode, operands[1]);
5805 }")
5806
5807 (define_insn ""
5808 [(set (match_operand:DI 0 "register_operand" "=r")
5809 (ashift:DI (match_operand:DI 1 "register_operand" "r")
5810 (match_operand:DI 2 "const_int_operand" "n")))]
5811 "TARGET_64BIT"
5812 "depd,z %1,%p2,%Q2,%0"
5813 [(set_attr "type" "shift")
5814 (set_attr "length" "4")])
5815
5816 ; Match cases of op1 a CONST_INT here that zvdep_imm64 doesn't handle.
5817 ; Doing it like this makes slightly better code since reload can
5818 ; replace a register with a known value in range -16..15 with a
5819 ; constant. Ideally, we would like to merge zvdep64 and zvdep_imm64,
5820 ; but since we have no more CONST_OK... characters, that is not
5821 ; possible.
5822 (define_insn "zvdep64"
5823 [(set (match_operand:DI 0 "register_operand" "=r,r")
5824 (ashift:DI (match_operand:DI 1 "arith5_operand" "r,L")
5825 (minus:DI (const_int 63)
5826 (match_operand:DI 2 "register_operand" "q,q"))))]
5827 "TARGET_64BIT"
5828 "@
5829 depd,z %1,%%sar,64,%0
5830 depdi,z %1,%%sar,64,%0"
5831 [(set_attr "type" "shift,shift")
5832 (set_attr "length" "4,4")])
5833
5834 (define_insn "zvdep_imm64"
5835 [(set (match_operand:DI 0 "register_operand" "=r")
5836 (ashift:DI (match_operand:DI 1 "lhs_lshift_cint_operand" "")
5837 (minus:DI (const_int 63)
5838 (match_operand:DI 2 "register_operand" "q"))))]
5839 "TARGET_64BIT"
5840 "*
5841 {
5842 int x = INTVAL (operands[1]);
5843 operands[2] = GEN_INT (4 + exact_log2 ((x >> 4) + 1));
5844 operands[1] = GEN_INT ((x & 0x1f) - 0x20);
5845 return \"depdi,z %1,%%sar,%2,%0\";
5846 }"
5847 [(set_attr "type" "shift")
5848 (set_attr "length" "4")])
5849
5850 (define_insn ""
5851 [(set (match_operand:DI 0 "register_operand" "=r")
5852 (ior:DI (ashift:DI (match_operand:DI 1 "const_int_operand" "")
5853 (minus:DI (const_int 63)
5854 (match_operand:DI 2 "register_operand" "q")))
5855 (match_operand:DI 3 "register_operand" "0")))]
5856 ; accept ...0001...1, can this be generalized?
5857 "TARGET_64BIT && exact_log2 (INTVAL (operands[1]) + 1) >= 0"
5858 "*
5859 {
5860 int x = INTVAL (operands[1]);
5861 operands[2] = GEN_INT (exact_log2 (x + 1));
5862 return \"depdi -1,%%sar,%2,%0\";
5863 }"
5864 [(set_attr "type" "shift")
5865 (set_attr "length" "4")])
5866
5867 (define_insn ""
5868 [(set (match_operand:DI 0 "register_operand" "=r")
5869 (and:DI (rotate:DI (match_operand:DI 1 "const_int_operand" "")
5870 (minus:DI (const_int 63)
5871 (match_operand:DI 2 "register_operand" "q")))
5872 (match_operand:DI 3 "register_operand" "0")))]
5873 ; this can be generalized...!
5874 "TARGET_64BIT && INTVAL (operands[1]) == -2"
5875 "*
5876 {
5877 int x = INTVAL (operands[1]);
5878 operands[2] = GEN_INT (exact_log2 ((~x) + 1));
5879 return \"depdi 0,%%sar,%2,%0\";
5880 }"
5881 [(set_attr "type" "shift")
5882 (set_attr "length" "4")])
5883
5884 (define_expand "ashrsi3"
5885 [(set (match_operand:SI 0 "register_operand" "")
5886 (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
5887 (match_operand:SI 2 "arith32_operand" "")))]
5888 ""
5889 "
5890 {
5891 if (GET_CODE (operands[2]) != CONST_INT)
5892 {
5893 rtx temp = gen_reg_rtx (SImode);
5894 emit_insn (gen_subsi3 (temp, GEN_INT (31), operands[2]));
5895 emit_insn (gen_vextrs32 (operands[0], operands[1], temp));
5896 DONE;
5897 }
5898 }")
5899
5900 (define_insn ""
5901 [(set (match_operand:SI 0 "register_operand" "=r")
5902 (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
5903 (match_operand:SI 2 "const_int_operand" "n")))]
5904 ""
5905 "{extrs|extrw,s} %1,%P2,%L2,%0"
5906 [(set_attr "type" "shift")
5907 (set_attr "length" "4")])
5908
5909 (define_insn "vextrs32"
5910 [(set (match_operand:SI 0 "register_operand" "=r")
5911 (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
5912 (minus:SI (const_int 31)
5913 (match_operand:SI 2 "register_operand" "q"))))]
5914 ""
5915 "{vextrs %1,32,%0|extrw,s %1,%%sar,32,%0}"
5916 [(set_attr "type" "shift")
5917 (set_attr "length" "4")])
5918
5919 (define_expand "ashrdi3"
5920 [(set (match_operand:DI 0 "register_operand" "")
5921 (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
5922 (match_operand:DI 2 "arith32_operand" "")))]
5923 "TARGET_64BIT"
5924 "
5925 {
5926 if (GET_CODE (operands[2]) != CONST_INT)
5927 {
5928 rtx temp = gen_reg_rtx (DImode);
5929 emit_insn (gen_subdi3 (temp, GEN_INT (63), operands[2]));
5930 emit_insn (gen_vextrs64 (operands[0], operands[1], temp));
5931 DONE;
5932 }
5933 }")
5934
5935 (define_insn ""
5936 [(set (match_operand:DI 0 "register_operand" "=r")
5937 (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
5938 (match_operand:DI 2 "const_int_operand" "n")))]
5939 "TARGET_64BIT"
5940 "extrd,s %1,%p2,%Q2,%0"
5941 [(set_attr "type" "shift")
5942 (set_attr "length" "4")])
5943
5944 (define_insn "vextrs64"
5945 [(set (match_operand:DI 0 "register_operand" "=r")
5946 (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
5947 (minus:DI (const_int 63)
5948 (match_operand:DI 2 "register_operand" "q"))))]
5949 "TARGET_64BIT"
5950 "extrd,s %1,%%sar,64,%0"
5951 [(set_attr "type" "shift")
5952 (set_attr "length" "4")])
5953
5954 (define_insn "lshrsi3"
5955 [(set (match_operand:SI 0 "register_operand" "=r,r")
5956 (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
5957 (match_operand:SI 2 "arith32_operand" "q,n")))]
5958 ""
5959 "@
5960 {vshd %%r0,%1,%0|shrpw %%r0,%1,%%sar,%0}
5961 {extru|extrw,u} %1,%P2,%L2,%0"
5962 [(set_attr "type" "shift")
5963 (set_attr "length" "4")])
5964
5965 (define_insn "lshrdi3"
5966 [(set (match_operand:DI 0 "register_operand" "=r,r")
5967 (lshiftrt:DI (match_operand:DI 1 "register_operand" "r,r")
5968 (match_operand:DI 2 "arith32_operand" "q,n")))]
5969 "TARGET_64BIT"
5970 "@
5971 shrpd %%r0,%1,%%sar,%0
5972 extrd,u %1,%p2,%Q2,%0"
5973 [(set_attr "type" "shift")
5974 (set_attr "length" "4")])
5975
5976 (define_insn "rotrsi3"
5977 [(set (match_operand:SI 0 "register_operand" "=r,r")
5978 (rotatert:SI (match_operand:SI 1 "register_operand" "r,r")
5979 (match_operand:SI 2 "arith32_operand" "q,n")))]
5980 ""
5981 "*
5982 {
5983 if (GET_CODE (operands[2]) == CONST_INT)
5984 {
5985 operands[2] = GEN_INT (INTVAL (operands[2]) & 31);
5986 return \"{shd|shrpw} %1,%1,%2,%0\";
5987 }
5988 else
5989 return \"{vshd %1,%1,%0|shrpw %1,%1,%%sar,%0}\";
5990 }"
5991 [(set_attr "type" "shift")
5992 (set_attr "length" "4")])
5993
5994 (define_expand "rotlsi3"
5995 [(set (match_operand:SI 0 "register_operand" "")
5996 (rotate:SI (match_operand:SI 1 "register_operand" "")
5997 (match_operand:SI 2 "arith32_operand" "")))]
5998 ""
5999 "
6000 {
6001 if (GET_CODE (operands[2]) != CONST_INT)
6002 {
6003 rtx temp = gen_reg_rtx (SImode);
6004 emit_insn (gen_subsi3 (temp, GEN_INT (32), operands[2]));
6005 emit_insn (gen_rotrsi3 (operands[0], operands[1], temp));
6006 DONE;
6007 }
6008 /* Else expand normally. */
6009 }")
6010
6011 (define_insn ""
6012 [(set (match_operand:SI 0 "register_operand" "=r")
6013 (rotate:SI (match_operand:SI 1 "register_operand" "r")
6014 (match_operand:SI 2 "const_int_operand" "n")))]
6015 ""
6016 "*
6017 {
6018 operands[2] = GEN_INT ((32 - INTVAL (operands[2])) & 31);
6019 return \"{shd|shrpw} %1,%1,%2,%0\";
6020 }"
6021 [(set_attr "type" "shift")
6022 (set_attr "length" "4")])
6023
6024 (define_insn ""
6025 [(set (match_operand:SI 0 "register_operand" "=r")
6026 (match_operator:SI 5 "plus_xor_ior_operator"
6027 [(ashift:SI (match_operand:SI 1 "register_operand" "r")
6028 (match_operand:SI 3 "const_int_operand" "n"))
6029 (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
6030 (match_operand:SI 4 "const_int_operand" "n"))]))]
6031 "INTVAL (operands[3]) + INTVAL (operands[4]) == 32"
6032 "{shd|shrpw} %1,%2,%4,%0"
6033 [(set_attr "type" "shift")
6034 (set_attr "length" "4")])
6035
6036 (define_insn ""
6037 [(set (match_operand:SI 0 "register_operand" "=r")
6038 (match_operator:SI 5 "plus_xor_ior_operator"
6039 [(lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
6040 (match_operand:SI 4 "const_int_operand" "n"))
6041 (ashift:SI (match_operand:SI 1 "register_operand" "r")
6042 (match_operand:SI 3 "const_int_operand" "n"))]))]
6043 "INTVAL (operands[3]) + INTVAL (operands[4]) == 32"
6044 "{shd|shrpw} %1,%2,%4,%0"
6045 [(set_attr "type" "shift")
6046 (set_attr "length" "4")])
6047
6048 (define_insn ""
6049 [(set (match_operand:SI 0 "register_operand" "=r")
6050 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
6051 (match_operand:SI 2 "const_int_operand" ""))
6052 (match_operand:SI 3 "const_int_operand" "")))]
6053 "exact_log2 (1 + (INTVAL (operands[3]) >> (INTVAL (operands[2]) & 31))) >= 0"
6054 "*
6055 {
6056 int cnt = INTVAL (operands[2]) & 31;
6057 operands[3] = GEN_INT (exact_log2 (1 + (INTVAL (operands[3]) >> cnt)));
6058 operands[2] = GEN_INT (31 - cnt);
6059 return \"{zdep|depw,z} %1,%2,%3,%0\";
6060 }"
6061 [(set_attr "type" "shift")
6062 (set_attr "length" "4")])
6063 \f
6064 ;; Unconditional and other jump instructions.
6065
6066 ;; This can only be used in a leaf function, so we do
6067 ;; not need to use the PIC register when generating PIC code.
6068 (define_insn "return"
6069 [(return)
6070 (use (reg:SI 2))
6071 (const_int 0)]
6072 "hppa_can_use_return_insn_p ()"
6073 "*
6074 {
6075 if (TARGET_PA_20)
6076 return \"bve%* (%%r2)\";
6077 return \"bv%* %%r0(%%r2)\";
6078 }"
6079 [(set_attr "type" "branch")
6080 (set_attr "length" "4")])
6081
6082 ;; Emit a different pattern for functions which have non-trivial
6083 ;; epilogues so as not to confuse jump and reorg.
6084 (define_insn "return_internal"
6085 [(return)
6086 (use (reg:SI 2))
6087 (const_int 1)]
6088 ""
6089 "*
6090 {
6091 if (TARGET_PA_20)
6092 return \"bve%* (%%r2)\";
6093 return \"bv%* %%r0(%%r2)\";
6094 }"
6095 [(set_attr "type" "branch")
6096 (set_attr "length" "4")])
6097
6098 ;; This is used for eh returns which bypass the return stub.
6099 (define_insn "return_external_pic"
6100 [(return)
6101 (clobber (reg:SI 1))
6102 (use (reg:SI 2))]
6103 "!TARGET_NO_SPACE_REGS
6104 && !TARGET_PA_20
6105 && flag_pic && current_function_calls_eh_return"
6106 "ldsid (%%sr0,%%r2),%%r1\;mtsp %%r1,%%sr0\;be%* 0(%%sr0,%%r2)"
6107 [(set_attr "type" "branch")
6108 (set_attr "length" "12")])
6109
6110 (define_expand "prologue"
6111 [(const_int 0)]
6112 ""
6113 "hppa_expand_prologue ();DONE;")
6114
6115 (define_expand "sibcall_epilogue"
6116 [(return)]
6117 ""
6118 "
6119 {
6120 hppa_expand_epilogue ();
6121 DONE;
6122 }")
6123
6124 (define_expand "epilogue"
6125 [(return)]
6126 ""
6127 "
6128 {
6129 /* Try to use the trivial return first. Else use the full
6130 epilogue. */
6131 if (hppa_can_use_return_insn_p ())
6132 emit_jump_insn (gen_return ());
6133 else
6134 {
6135 rtx x;
6136
6137 hppa_expand_epilogue ();
6138
6139 /* EH returns bypass the normal return stub. Thus, we must do an
6140 interspace branch to return from functions that call eh_return.
6141 This is only a problem for returns from shared code on ports
6142 using space registers. */
6143 if (!TARGET_NO_SPACE_REGS
6144 && !TARGET_PA_20
6145 && flag_pic && current_function_calls_eh_return)
6146 x = gen_return_external_pic ();
6147 else
6148 x = gen_return_internal ();
6149
6150 emit_jump_insn (x);
6151 }
6152 DONE;
6153 }")
6154
6155 ; Used by hppa_profile_hook to load the starting address of the current
6156 ; function; operand 1 contains the address of the label in operand 3
6157 (define_insn "load_offset_label_address"
6158 [(set (match_operand:SI 0 "register_operand" "=r")
6159 (plus:SI (match_operand:SI 1 "register_operand" "r")
6160 (minus:SI (match_operand:SI 2 "" "")
6161 (label_ref:SI (match_operand 3 "" "")))))]
6162 ""
6163 "ldo %2-%l3(%1),%0"
6164 [(set_attr "type" "multi")
6165 (set_attr "length" "4")])
6166
6167 ; Output a code label and load its address.
6168 (define_insn "lcla1"
6169 [(set (match_operand:SI 0 "register_operand" "=r")
6170 (label_ref:SI (match_operand 1 "" "")))
6171 (const_int 0)]
6172 "!TARGET_PA_20"
6173 "*
6174 {
6175 output_asm_insn (\"bl .+8,%0\;depi 0,31,2,%0\", operands);
6176 (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
6177 CODE_LABEL_NUMBER (operands[1]));
6178 return \"\";
6179 }"
6180 [(set_attr "type" "multi")
6181 (set_attr "length" "8")])
6182
6183 (define_insn "lcla2"
6184 [(set (match_operand:SI 0 "register_operand" "=r")
6185 (label_ref:SI (match_operand 1 "" "")))
6186 (const_int 0)]
6187 "TARGET_PA_20"
6188 "*
6189 {
6190 (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
6191 CODE_LABEL_NUMBER (operands[1]));
6192 return \"mfia %0\";
6193 }"
6194 [(set_attr "type" "move")
6195 (set_attr "length" "4")])
6196
6197 (define_insn "blockage"
6198 [(unspec_volatile [(const_int 2)] 0)]
6199 ""
6200 ""
6201 [(set_attr "length" "0")])
6202
6203 (define_insn "jump"
6204 [(set (pc) (label_ref (match_operand 0 "" "")))]
6205 ""
6206 "*
6207 {
6208 /* An unconditional branch which can reach its target. */
6209 if (get_attr_length (insn) != 24
6210 && get_attr_length (insn) != 16)
6211 return \"b%* %l0\";
6212
6213 return output_lbranch (operands[0], insn);
6214 }"
6215 [(set_attr "type" "uncond_branch")
6216 (set_attr "pa_combine_type" "uncond_branch")
6217 (set (attr "length")
6218 (cond [(eq (symbol_ref "jump_in_call_delay (insn)") (const_int 1))
6219 (if_then_else (lt (abs (minus (match_dup 0)
6220 (plus (pc) (const_int 8))))
6221 (const_int 8184))
6222 (const_int 4)
6223 (const_int 8))
6224 (ge (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
6225 (const_int 262100))
6226 (if_then_else (eq (symbol_ref "flag_pic") (const_int 0))
6227 (const_int 16)
6228 (const_int 24))]
6229 (const_int 4)))])
6230
6231 ;;; Hope this is only within a function...
6232 (define_insn "indirect_jump"
6233 [(set (pc) (match_operand 0 "register_operand" "r"))]
6234 "GET_MODE (operands[0]) == word_mode"
6235 "bv%* %%r0(%0)"
6236 [(set_attr "type" "branch")
6237 (set_attr "length" "4")])
6238
6239 ;;; This jump is used in branch tables where the insn length is fixed.
6240 ;;; The length of this insn is adjusted if the delay slot is not filled.
6241 (define_insn "short_jump"
6242 [(set (pc) (label_ref (match_operand 0 "" "")))
6243 (const_int 0)]
6244 ""
6245 "b%* %l0%#"
6246 [(set_attr "type" "btable_branch")
6247 (set_attr "length" "4")])
6248
6249 ;; Subroutines of "casesi".
6250 ;; operand 0 is index
6251 ;; operand 1 is the minimum bound
6252 ;; operand 2 is the maximum bound - minimum bound + 1
6253 ;; operand 3 is CODE_LABEL for the table;
6254 ;; operand 4 is the CODE_LABEL to go to if index out of range.
6255
6256 (define_expand "casesi"
6257 [(match_operand:SI 0 "general_operand" "")
6258 (match_operand:SI 1 "const_int_operand" "")
6259 (match_operand:SI 2 "const_int_operand" "")
6260 (match_operand 3 "" "")
6261 (match_operand 4 "" "")]
6262 ""
6263 "
6264 {
6265 if (GET_CODE (operands[0]) != REG)
6266 operands[0] = force_reg (SImode, operands[0]);
6267
6268 if (operands[1] != const0_rtx)
6269 {
6270 rtx index = gen_reg_rtx (SImode);
6271
6272 operands[1] = GEN_INT (-INTVAL (operands[1]));
6273 if (!INT_14_BITS (operands[1]))
6274 operands[1] = force_reg (SImode, operands[1]);
6275 emit_insn (gen_addsi3 (index, operands[0], operands[1]));
6276 operands[0] = index;
6277 }
6278
6279 /* In 64bit mode we must make sure to wipe the upper bits of the register
6280 just in case the addition overflowed or we had random bits in the
6281 high part of the register. */
6282 if (TARGET_64BIT)
6283 {
6284 rtx index = gen_reg_rtx (DImode);
6285
6286 emit_insn (gen_extendsidi2 (index, operands[0]));
6287 operands[0] = gen_rtx_SUBREG (SImode, index, 4);
6288 }
6289
6290 if (!INT_5_BITS (operands[2]))
6291 operands[2] = force_reg (SImode, operands[2]);
6292
6293 /* This branch prevents us finding an insn for the delay slot of the
6294 following vectored branch. It might be possible to use the delay
6295 slot if an index value of -1 was used to transfer to the out-of-range
6296 label. In order to do this, we would have to output the -1 vector
6297 element after the delay insn. The casesi output code would have to
6298 check if the casesi insn is in a delay branch sequence and output
6299 the delay insn if one is found. If this was done, then it might
6300 then be worthwhile to split the casesi patterns to improve scheduling.
6301 However, it's not clear that all this extra complexity is worth
6302 the effort. */
6303 emit_insn (gen_cmpsi (operands[0], operands[2]));
6304 emit_jump_insn (gen_bgtu (operands[4]));
6305
6306 if (TARGET_BIG_SWITCH)
6307 {
6308 if (TARGET_64BIT)
6309 {
6310 rtx tmp1 = gen_reg_rtx (DImode);
6311 rtx tmp2 = gen_reg_rtx (DImode);
6312
6313 emit_jump_insn (gen_casesi64p (operands[0], operands[3],
6314 tmp1, tmp2));
6315 }
6316 else
6317 {
6318 rtx tmp1 = gen_reg_rtx (SImode);
6319
6320 if (flag_pic)
6321 {
6322 rtx tmp2 = gen_reg_rtx (SImode);
6323
6324 emit_jump_insn (gen_casesi32p (operands[0], operands[3],
6325 tmp1, tmp2));
6326 }
6327 else
6328 emit_jump_insn (gen_casesi32 (operands[0], operands[3], tmp1));
6329 }
6330 }
6331 else
6332 emit_jump_insn (gen_casesi0 (operands[0], operands[3]));
6333 DONE;
6334 }")
6335
6336 ;;; The rtl for this pattern doesn't accurately describe what the insn
6337 ;;; actually does, particularly when case-vector elements are exploded
6338 ;;; in pa_reorg. However, the initial SET in these patterns must show
6339 ;;; the connection of the insn to the following jump table.
6340 (define_insn "casesi0"
6341 [(set (pc) (mem:SI (plus:SI
6342 (mult:SI (match_operand:SI 0 "register_operand" "r")
6343 (const_int 4))
6344 (label_ref (match_operand 1 "" "")))))]
6345 ""
6346 "blr,n %0,%%r0\;nop"
6347 [(set_attr "type" "multi")
6348 (set_attr "length" "8")])
6349
6350 ;;; 32-bit code, absolute branch table.
6351 (define_insn "casesi32"
6352 [(set (pc) (mem:SI (plus:SI
6353 (mult:SI (match_operand:SI 0 "register_operand" "r")
6354 (const_int 4))
6355 (label_ref (match_operand 1 "" "")))))
6356 (clobber (match_operand:SI 2 "register_operand" "=&r"))]
6357 "!TARGET_64BIT && TARGET_BIG_SWITCH"
6358 "ldil L'%l1,%2\;ldo R'%l1(%2),%2\;{ldwx|ldw},s %0(%2),%2\;bv,n %%r0(%2)"
6359 [(set_attr "type" "multi")
6360 (set_attr "length" "16")])
6361
6362 ;;; 32-bit code, relative branch table.
6363 (define_insn "casesi32p"
6364 [(set (pc) (mem:SI (plus:SI
6365 (mult:SI (match_operand:SI 0 "register_operand" "r")
6366 (const_int 4))
6367 (label_ref (match_operand 1 "" "")))))
6368 (clobber (match_operand:SI 2 "register_operand" "=&a"))
6369 (clobber (match_operand:SI 3 "register_operand" "=&r"))]
6370 "!TARGET_64BIT && TARGET_BIG_SWITCH"
6371 "{bl .+8,%2\;depi 0,31,2,%2|mfia %2}\;ldo {16|20}(%2),%2\;\
6372 {ldwx|ldw},s %0(%2),%3\;{addl|add,l} %2,%3,%3\;bv,n %%r0(%3)"
6373 [(set_attr "type" "multi")
6374 (set (attr "length")
6375 (if_then_else (ne (symbol_ref "TARGET_PA_20") (const_int 0))
6376 (const_int 20)
6377 (const_int 24)))])
6378
6379 ;;; 64-bit code, 32-bit relative branch table.
6380 (define_insn "casesi64p"
6381 [(set (pc) (mem:DI (plus:DI
6382 (mult:DI (sign_extend:DI
6383 (match_operand:SI 0 "register_operand" "r"))
6384 (const_int 8))
6385 (label_ref (match_operand 1 "" "")))))
6386 (clobber (match_operand:DI 2 "register_operand" "=&r"))
6387 (clobber (match_operand:DI 3 "register_operand" "=&r"))]
6388 "TARGET_64BIT && TARGET_BIG_SWITCH"
6389 "mfia %2\;ldo 24(%2),%2\;ldw,s %0(%2),%3\;extrd,s %3,63,32,%3\;\
6390 add,l %2,%3,%3\;bv,n %%r0(%3)"
6391 [(set_attr "type" "multi")
6392 (set_attr "length" "24")])
6393
6394
6395 ;; Call patterns.
6396 ;;- jump to subroutine
6397
6398 (define_expand "call"
6399 [(parallel [(call (match_operand:SI 0 "" "")
6400 (match_operand 1 "" ""))
6401 (clobber (reg:SI 2))])]
6402 ""
6403 "
6404 {
6405 rtx op, call_insn;
6406 rtx nb = operands[1];
6407
6408 if (TARGET_PORTABLE_RUNTIME)
6409 op = force_reg (SImode, XEXP (operands[0], 0));
6410 else
6411 op = XEXP (operands[0], 0);
6412
6413 if (TARGET_64BIT)
6414 {
6415 if (!virtuals_instantiated)
6416 emit_move_insn (arg_pointer_rtx,
6417 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
6418 GEN_INT (64)));
6419 else
6420 {
6421 /* The loop pass can generate new libcalls after the virtual
6422 registers are instantiated when fpregs are disabled because
6423 the only method that we have for doing DImode multiplication
6424 is with a libcall. This could be trouble if we haven't
6425 allocated enough space for the outgoing arguments. */
6426 if (INTVAL (nb) > current_function_outgoing_args_size)
6427 abort ();
6428
6429 emit_move_insn (arg_pointer_rtx,
6430 gen_rtx_PLUS (word_mode, stack_pointer_rtx,
6431 GEN_INT (STACK_POINTER_OFFSET + 64)));
6432 }
6433 }
6434
6435 /* Use two different patterns for calls to explicitly named functions
6436 and calls through function pointers. This is necessary as these two
6437 types of calls use different calling conventions, and CSE might try
6438 to change the named call into an indirect call in some cases (using
6439 two patterns keeps CSE from performing this optimization).
6440
6441 We now use even more call patterns as there was a subtle bug in
6442 attempting to restore the pic register after a call using a simple
6443 move insn. During reload, a instruction involving a pseudo register
6444 with no explicit dependence on the PIC register can be converted
6445 to an equivalent load from memory using the PIC register. If we
6446 emit a simple move to restore the PIC register in the initial rtl
6447 generation, then it can potentially be repositioned during scheduling.
6448 and an instruction that eventually uses the PIC register may end up
6449 between the call and the PIC register restore.
6450
6451 This only worked because there is a post call group of instructions
6452 that are scheduled with the call. These instructions are included
6453 in the same basic block as the call. However, calls can throw in
6454 C++ code and a basic block has to terminate at the call if the call
6455 can throw. This results in the PIC register restore being scheduled
6456 independently from the call. So, we now hide the save and restore
6457 of the PIC register in the call pattern until after reload. Then,
6458 we split the moves out. A small side benefit is that we now don't
6459 need to have a use of the PIC register in the return pattern and
6460 the final save/restore operation is not needed.
6461
6462 I elected to just clobber %r4 in the PIC patterns and use it instead
6463 of trying to force hppa_pic_save_rtx () to a callee saved register.
6464 This might have required a new register class and constraint. It
6465 was also simpler to just handle the restore from a register than a
6466 generic pseudo. */
6467 if (TARGET_64BIT)
6468 {
6469 if (GET_CODE (op) == SYMBOL_REF)
6470 call_insn = emit_call_insn (gen_call_symref_64bit (op, nb));
6471 else
6472 {
6473 op = force_reg (word_mode, op);
6474 call_insn = emit_call_insn (gen_call_reg_64bit (op, nb));
6475 }
6476 }
6477 else
6478 {
6479 if (GET_CODE (op) == SYMBOL_REF)
6480 {
6481 if (flag_pic)
6482 call_insn = emit_call_insn (gen_call_symref_pic (op, nb));
6483 else
6484 call_insn = emit_call_insn (gen_call_symref (op, nb));
6485 }
6486 else
6487 {
6488 rtx tmpreg = gen_rtx_REG (word_mode, 22);
6489
6490 emit_move_insn (tmpreg, force_reg (word_mode, op));
6491 if (flag_pic)
6492 call_insn = emit_call_insn (gen_call_reg_pic (nb));
6493 else
6494 call_insn = emit_call_insn (gen_call_reg (nb));
6495 }
6496 }
6497
6498 DONE;
6499 }")
6500
6501 ;; We use function calls to set the attribute length of calls and millicode
6502 ;; calls. This is necessary because of the large variety of call sequences.
6503 ;; Implementing the calculation in rtl is difficult as well as ugly. As
6504 ;; we need the same calculation in several places, maintenance becomes a
6505 ;; nightmare.
6506 ;;
6507 ;; However, this has a subtle impact on branch shortening. When the
6508 ;; expression used to set the length attribute of an instruction depends
6509 ;; on a relative address (e.g., pc or a branch address), genattrtab
6510 ;; notes that the insn's length is variable, and attempts to determine a
6511 ;; worst-case default length and code to compute an insn's current length.
6512
6513 ;; The use of a function call hides the variable dependence of our calls
6514 ;; and millicode calls. The result is genattrtab doesn't treat the operation
6515 ;; as variable and it only generates code for the default case using our
6516 ;; function call. Because of this, calls and millicode calls have a fixed
6517 ;; length in the branch shortening pass, and some branches will use a longer
6518 ;; code sequence than necessary. However, the length of any given call
6519 ;; will still reflect its final code location and it may be shorter than
6520 ;; the initial length estimate.
6521
6522 ;; It's possible to trick genattrtab by adding an expression involving `pc'
6523 ;; in the set. However, when genattrtab hits a function call in its attempt
6524 ;; to compute the default length, it marks the result as unknown and sets
6525 ;; the default result to MAX_INT ;-( One possible fix that would allow
6526 ;; calls to participate in branch shortening would be to make the call to
6527 ;; insn_default_length a target option. Then, we could massage unknown
6528 ;; results. Another fix might be to change genattrtab so that it just does
6529 ;; the call in the variable case as it already does for the fixed case.
6530
6531 (define_insn "call_symref"
6532 [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6533 (match_operand 1 "" "i"))
6534 (clobber (reg:SI 1))
6535 (clobber (reg:SI 2))
6536 (use (const_int 0))]
6537 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
6538 "*
6539 {
6540 output_arg_descriptor (insn);
6541 return output_call (insn, operands[0], 0);
6542 }"
6543 [(set_attr "type" "call")
6544 (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
6545
6546 (define_insn "call_symref_pic"
6547 [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6548 (match_operand 1 "" "i"))
6549 (clobber (reg:SI 1))
6550 (clobber (reg:SI 2))
6551 (clobber (reg:SI 4))
6552 (use (reg:SI 19))
6553 (use (const_int 0))]
6554 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
6555 "*
6556 {
6557 output_arg_descriptor (insn);
6558 return output_call (insn, operands[0], 0);
6559 }"
6560 [(set_attr "type" "call")
6561 (set (attr "length")
6562 (plus (symbol_ref "attr_length_call (insn, 0)")
6563 (symbol_ref "attr_length_save_restore_dltp (insn)")))])
6564
6565 ;; Split out the PIC register save and restore after reload. This is
6566 ;; done only if the function returns. As the split is done after reload,
6567 ;; there are some situations in which we unnecessarily save and restore
6568 ;; %r4. This happens when there is a single call and the PIC register
6569 ;; is "dead" after the call. This isn't easy to fix as the usage of
6570 ;; the PIC register isn't completely determined until the reload pass.
6571 (define_split
6572 [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6573 (match_operand 1 "" ""))
6574 (clobber (reg:SI 1))
6575 (clobber (reg:SI 2))
6576 (clobber (reg:SI 4))
6577 (use (reg:SI 19))
6578 (use (const_int 0))])]
6579 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT
6580 && reload_completed
6581 && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
6582 [(set (reg:SI 4) (reg:SI 19))
6583 (parallel [(call (mem:SI (match_dup 0))
6584 (match_dup 1))
6585 (clobber (reg:SI 1))
6586 (clobber (reg:SI 2))
6587 (use (reg:SI 19))
6588 (use (const_int 0))])
6589 (set (reg:SI 19) (reg:SI 4))]
6590 "")
6591
6592 ;; Remove the clobber of register 4 when optimizing. This has to be
6593 ;; done with a peephole optimization rather than a split because the
6594 ;; split sequence for a call must be longer than one instruction.
6595 (define_peephole2
6596 [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6597 (match_operand 1 "" ""))
6598 (clobber (reg:SI 1))
6599 (clobber (reg:SI 2))
6600 (clobber (reg:SI 4))
6601 (use (reg:SI 19))
6602 (use (const_int 0))])]
6603 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed"
6604 [(parallel [(call (mem:SI (match_dup 0))
6605 (match_dup 1))
6606 (clobber (reg:SI 1))
6607 (clobber (reg:SI 2))
6608 (use (reg:SI 19))
6609 (use (const_int 0))])]
6610 "")
6611
6612 (define_insn "*call_symref_pic_post_reload"
6613 [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6614 (match_operand 1 "" "i"))
6615 (clobber (reg:SI 1))
6616 (clobber (reg:SI 2))
6617 (use (reg:SI 19))
6618 (use (const_int 0))]
6619 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
6620 "*
6621 {
6622 output_arg_descriptor (insn);
6623 return output_call (insn, operands[0], 0);
6624 }"
6625 [(set_attr "type" "call")
6626 (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
6627
6628 ;; This pattern is split if it is necessary to save and restore the
6629 ;; PIC register.
6630 (define_insn "call_symref_64bit"
6631 [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6632 (match_operand 1 "" "i"))
6633 (clobber (reg:DI 1))
6634 (clobber (reg:DI 2))
6635 (clobber (reg:DI 4))
6636 (use (reg:DI 27))
6637 (use (reg:DI 29))
6638 (use (const_int 0))]
6639 "TARGET_64BIT"
6640 "*
6641 {
6642 output_arg_descriptor (insn);
6643 return output_call (insn, operands[0], 0);
6644 }"
6645 [(set_attr "type" "call")
6646 (set (attr "length")
6647 (plus (symbol_ref "attr_length_call (insn, 0)")
6648 (symbol_ref "attr_length_save_restore_dltp (insn)")))])
6649
6650 ;; Split out the PIC register save and restore after reload. This is
6651 ;; done only if the function returns. As the split is done after reload,
6652 ;; there are some situations in which we unnecessarily save and restore
6653 ;; %r4. This happens when there is a single call and the PIC register
6654 ;; is "dead" after the call. This isn't easy to fix as the usage of
6655 ;; the PIC register isn't completely determined until the reload pass.
6656 (define_split
6657 [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6658 (match_operand 1 "" ""))
6659 (clobber (reg:DI 1))
6660 (clobber (reg:DI 2))
6661 (clobber (reg:DI 4))
6662 (use (reg:DI 27))
6663 (use (reg:DI 29))
6664 (use (const_int 0))])]
6665 "TARGET_64BIT
6666 && reload_completed
6667 && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
6668 [(set (reg:DI 4) (reg:DI 27))
6669 (parallel [(call (mem:SI (match_dup 0))
6670 (match_dup 1))
6671 (clobber (reg:DI 1))
6672 (clobber (reg:DI 2))
6673 (use (reg:DI 27))
6674 (use (reg:DI 29))
6675 (use (const_int 0))])
6676 (set (reg:DI 27) (reg:DI 4))]
6677 "")
6678
6679 ;; Remove the clobber of register 4 when optimizing. This has to be
6680 ;; done with a peephole optimization rather than a split because the
6681 ;; split sequence for a call must be longer than one instruction.
6682 (define_peephole2
6683 [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6684 (match_operand 1 "" ""))
6685 (clobber (reg:DI 1))
6686 (clobber (reg:DI 2))
6687 (clobber (reg:DI 4))
6688 (use (reg:DI 27))
6689 (use (reg:DI 29))
6690 (use (const_int 0))])]
6691 "TARGET_64BIT && reload_completed"
6692 [(parallel [(call (mem:SI (match_dup 0))
6693 (match_dup 1))
6694 (clobber (reg:DI 1))
6695 (clobber (reg:DI 2))
6696 (use (reg:DI 27))
6697 (use (reg:DI 29))
6698 (use (const_int 0))])]
6699 "")
6700
6701 (define_insn "*call_symref_64bit_post_reload"
6702 [(call (mem:SI (match_operand 0 "call_operand_address" ""))
6703 (match_operand 1 "" "i"))
6704 (clobber (reg:DI 1))
6705 (clobber (reg:DI 2))
6706 (use (reg:DI 27))
6707 (use (reg:DI 29))
6708 (use (const_int 0))]
6709 "TARGET_64BIT"
6710 "*
6711 {
6712 output_arg_descriptor (insn);
6713 return output_call (insn, operands[0], 0);
6714 }"
6715 [(set_attr "type" "call")
6716 (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
6717
6718 (define_insn "call_reg"
6719 [(call (mem:SI (reg:SI 22))
6720 (match_operand 0 "" "i"))
6721 (clobber (reg:SI 1))
6722 (clobber (reg:SI 2))
6723 (use (const_int 1))]
6724 "!TARGET_64BIT"
6725 "*
6726 {
6727 return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
6728 }"
6729 [(set_attr "type" "dyncall")
6730 (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
6731
6732 ;; This pattern is split if it is necessary to save and restore the
6733 ;; PIC register.
6734 (define_insn "call_reg_pic"
6735 [(call (mem:SI (reg:SI 22))
6736 (match_operand 0 "" "i"))
6737 (clobber (reg:SI 1))
6738 (clobber (reg:SI 2))
6739 (clobber (reg:SI 4))
6740 (use (reg:SI 19))
6741 (use (const_int 1))]
6742 "!TARGET_64BIT"
6743 "*
6744 {
6745 return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
6746 }"
6747 [(set_attr "type" "dyncall")
6748 (set (attr "length")
6749 (plus (symbol_ref "attr_length_indirect_call (insn)")
6750 (symbol_ref "attr_length_save_restore_dltp (insn)")))])
6751
6752 ;; Split out the PIC register save and restore after reload. This is
6753 ;; done only if the function returns. As the split is done after reload,
6754 ;; there are some situations in which we unnecessarily save and restore
6755 ;; %r4. This happens when there is a single call and the PIC register
6756 ;; is "dead" after the call. This isn't easy to fix as the usage of
6757 ;; the PIC register isn't completely determined until the reload pass.
6758 (define_split
6759 [(parallel [(call (mem:SI (reg:SI 22))
6760 (match_operand 0 "" ""))
6761 (clobber (reg:SI 1))
6762 (clobber (reg:SI 2))
6763 (clobber (reg:SI 4))
6764 (use (reg:SI 19))
6765 (use (const_int 1))])]
6766 "!TARGET_64BIT
6767 && reload_completed
6768 && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
6769 [(set (reg:SI 4) (reg:SI 19))
6770 (parallel [(call (mem:SI (reg:SI 22))
6771 (match_dup 0))
6772 (clobber (reg:SI 1))
6773 (clobber (reg:SI 2))
6774 (use (reg:SI 19))
6775 (use (const_int 1))])
6776 (set (reg:SI 19) (reg:SI 4))]
6777 "")
6778
6779 ;; Remove the clobber of register 4 when optimizing. This has to be
6780 ;; done with a peephole optimization rather than a split because the
6781 ;; split sequence for a call must be longer than one instruction.
6782 (define_peephole2
6783 [(parallel [(call (mem:SI (reg:SI 22))
6784 (match_operand 0 "" ""))
6785 (clobber (reg:SI 1))
6786 (clobber (reg:SI 2))
6787 (clobber (reg:SI 4))
6788 (use (reg:SI 19))
6789 (use (const_int 1))])]
6790 "!TARGET_64BIT && reload_completed"
6791 [(parallel [(call (mem:SI (reg:SI 22))
6792 (match_dup 0))
6793 (clobber (reg:SI 1))
6794 (clobber (reg:SI 2))
6795 (use (reg:SI 19))
6796 (use (const_int 1))])]
6797 "")
6798
6799 (define_insn "*call_reg_pic_post_reload"
6800 [(call (mem:SI (reg:SI 22))
6801 (match_operand 0 "" "i"))
6802 (clobber (reg:SI 1))
6803 (clobber (reg:SI 2))
6804 (use (reg:SI 19))
6805 (use (const_int 1))]
6806 "!TARGET_64BIT"
6807 "*
6808 {
6809 return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
6810 }"
6811 [(set_attr "type" "dyncall")
6812 (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
6813
6814 ;; This pattern is split if it is necessary to save and restore the
6815 ;; PIC register.
6816 (define_insn "call_reg_64bit"
6817 [(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
6818 (match_operand 1 "" "i"))
6819 (clobber (reg:DI 2))
6820 (clobber (reg:DI 4))
6821 (use (reg:DI 27))
6822 (use (reg:DI 29))
6823 (use (const_int 1))]
6824 "TARGET_64BIT"
6825 "*
6826 {
6827 return output_indirect_call (insn, operands[0]);
6828 }"
6829 [(set_attr "type" "dyncall")
6830 (set (attr "length")
6831 (plus (symbol_ref "attr_length_indirect_call (insn)")
6832 (symbol_ref "attr_length_save_restore_dltp (insn)")))])
6833
6834 ;; Split out the PIC register save and restore after reload. This is
6835 ;; done only if the function returns. As the split is done after reload,
6836 ;; there are some situations in which we unnecessarily save and restore
6837 ;; %r4. This happens when there is a single call and the PIC register
6838 ;; is "dead" after the call. This isn't easy to fix as the usage of
6839 ;; the PIC register isn't completely determined until the reload pass.
6840 (define_split
6841 [(parallel [(call (mem:SI (match_operand 0 "register_operand" ""))
6842 (match_operand 1 "" ""))
6843 (clobber (reg:DI 2))
6844 (clobber (reg:DI 4))
6845 (use (reg:DI 27))
6846 (use (reg:DI 29))
6847 (use (const_int 1))])]
6848 "TARGET_64BIT
6849 && reload_completed
6850 && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
6851 [(set (reg:DI 4) (reg:DI 27))
6852 (parallel [(call (mem:SI (match_dup 0))
6853 (match_dup 1))
6854 (clobber (reg:DI 2))
6855 (use (reg:DI 27))
6856 (use (reg:DI 29))
6857 (use (const_int 1))])
6858 (set (reg:DI 27) (reg:DI 4))]
6859 "")
6860
6861 ;; Remove the clobber of register 4 when optimizing. This has to be
6862 ;; done with a peephole optimization rather than a split because the
6863 ;; split sequence for a call must be longer than one instruction.
6864 (define_peephole2
6865 [(parallel [(call (mem:SI (match_operand 0 "register_operand" ""))
6866 (match_operand 1 "" ""))
6867 (clobber (reg:DI 2))
6868 (clobber (reg:DI 4))
6869 (use (reg:DI 27))
6870 (use (reg:DI 29))
6871 (use (const_int 1))])]
6872 "TARGET_64BIT && reload_completed"
6873 [(parallel [(call (mem:SI (match_dup 0))
6874 (match_dup 1))
6875 (clobber (reg:DI 2))
6876 (use (reg:DI 27))
6877 (use (reg:DI 29))
6878 (use (const_int 1))])]
6879 "")
6880
6881 (define_insn "*call_reg_64bit_post_reload"
6882 [(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
6883 (match_operand 1 "" "i"))
6884 (clobber (reg:DI 2))
6885 (use (reg:DI 27))
6886 (use (reg:DI 29))
6887 (use (const_int 1))]
6888 "TARGET_64BIT"
6889 "*
6890 {
6891 return output_indirect_call (insn, operands[0]);
6892 }"
6893 [(set_attr "type" "dyncall")
6894 (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
6895
6896 (define_expand "call_value"
6897 [(parallel [(set (match_operand 0 "" "")
6898 (call (match_operand:SI 1 "" "")
6899 (match_operand 2 "" "")))
6900 (clobber (reg:SI 2))])]
6901 ""
6902 "
6903 {
6904 rtx op, call_insn;
6905 rtx dst = operands[0];
6906 rtx nb = operands[2];
6907
6908 if (TARGET_PORTABLE_RUNTIME)
6909 op = force_reg (SImode, XEXP (operands[1], 0));
6910 else
6911 op = XEXP (operands[1], 0);
6912
6913 if (TARGET_64BIT)
6914 {
6915 if (!virtuals_instantiated)
6916 emit_move_insn (arg_pointer_rtx,
6917 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
6918 GEN_INT (64)));
6919 else
6920 {
6921 /* The loop pass can generate new libcalls after the virtual
6922 registers are instantiated when fpregs are disabled because
6923 the only method that we have for doing DImode multiplication
6924 is with a libcall. This could be trouble if we haven't
6925 allocated enough space for the outgoing arguments. */
6926 if (INTVAL (nb) > current_function_outgoing_args_size)
6927 abort ();
6928
6929 emit_move_insn (arg_pointer_rtx,
6930 gen_rtx_PLUS (word_mode, stack_pointer_rtx,
6931 GEN_INT (STACK_POINTER_OFFSET + 64)));
6932 }
6933 }
6934
6935 /* Use two different patterns for calls to explicitly named functions
6936 and calls through function pointers. This is necessary as these two
6937 types of calls use different calling conventions, and CSE might try
6938 to change the named call into an indirect call in some cases (using
6939 two patterns keeps CSE from performing this optimization).
6940
6941 We now use even more call patterns as there was a subtle bug in
6942 attempting to restore the pic register after a call using a simple
6943 move insn. During reload, a instruction involving a pseudo register
6944 with no explicit dependence on the PIC register can be converted
6945 to an equivalent load from memory using the PIC register. If we
6946 emit a simple move to restore the PIC register in the initial rtl
6947 generation, then it can potentially be repositioned during scheduling.
6948 and an instruction that eventually uses the PIC register may end up
6949 between the call and the PIC register restore.
6950
6951 This only worked because there is a post call group of instructions
6952 that are scheduled with the call. These instructions are included
6953 in the same basic block as the call. However, calls can throw in
6954 C++ code and a basic block has to terminate at the call if the call
6955 can throw. This results in the PIC register restore being scheduled
6956 independently from the call. So, we now hide the save and restore
6957 of the PIC register in the call pattern until after reload. Then,
6958 we split the moves out. A small side benefit is that we now don't
6959 need to have a use of the PIC register in the return pattern and
6960 the final save/restore operation is not needed.
6961
6962 I elected to just clobber %r4 in the PIC patterns and use it instead
6963 of trying to force hppa_pic_save_rtx () to a callee saved register.
6964 This might have required a new register class and constraint. It
6965 was also simpler to just handle the restore from a register than a
6966 generic pseudo. */
6967 if (TARGET_64BIT)
6968 {
6969 if (GET_CODE (op) == SYMBOL_REF)
6970 call_insn = emit_call_insn (gen_call_val_symref_64bit (dst, op, nb));
6971 else
6972 {
6973 op = force_reg (word_mode, op);
6974 call_insn = emit_call_insn (gen_call_val_reg_64bit (dst, op, nb));
6975 }
6976 }
6977 else
6978 {
6979 if (GET_CODE (op) == SYMBOL_REF)
6980 {
6981 if (flag_pic)
6982 call_insn = emit_call_insn (gen_call_val_symref_pic (dst, op, nb));
6983 else
6984 call_insn = emit_call_insn (gen_call_val_symref (dst, op, nb));
6985 }
6986 else
6987 {
6988 rtx tmpreg = gen_rtx_REG (word_mode, 22);
6989
6990 emit_move_insn (tmpreg, force_reg (word_mode, op));
6991 if (flag_pic)
6992 call_insn = emit_call_insn (gen_call_val_reg_pic (dst, nb));
6993 else
6994 call_insn = emit_call_insn (gen_call_val_reg (dst, nb));
6995 }
6996 }
6997
6998 DONE;
6999 }")
7000
7001 (define_insn "call_val_symref"
7002 [(set (match_operand 0 "" "")
7003 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7004 (match_operand 2 "" "i")))
7005 (clobber (reg:SI 1))
7006 (clobber (reg:SI 2))
7007 (use (const_int 0))]
7008 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
7009 "*
7010 {
7011 output_arg_descriptor (insn);
7012 return output_call (insn, operands[1], 0);
7013 }"
7014 [(set_attr "type" "call")
7015 (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
7016
7017 (define_insn "call_val_symref_pic"
7018 [(set (match_operand 0 "" "")
7019 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7020 (match_operand 2 "" "i")))
7021 (clobber (reg:SI 1))
7022 (clobber (reg:SI 2))
7023 (clobber (reg:SI 4))
7024 (use (reg:SI 19))
7025 (use (const_int 0))]
7026 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
7027 "*
7028 {
7029 output_arg_descriptor (insn);
7030 return output_call (insn, operands[1], 0);
7031 }"
7032 [(set_attr "type" "call")
7033 (set (attr "length")
7034 (plus (symbol_ref "attr_length_call (insn, 0)")
7035 (symbol_ref "attr_length_save_restore_dltp (insn)")))])
7036
7037 ;; Split out the PIC register save and restore after reload. This is
7038 ;; done only if the function returns. As the split is done after reload,
7039 ;; there are some situations in which we unnecessarily save and restore
7040 ;; %r4. This happens when there is a single call and the PIC register
7041 ;; is "dead" after the call. This isn't easy to fix as the usage of
7042 ;; the PIC register isn't completely determined until the reload pass.
7043 (define_split
7044 [(parallel [(set (match_operand 0 "" "")
7045 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7046 (match_operand 2 "" "")))
7047 (clobber (reg:SI 1))
7048 (clobber (reg:SI 2))
7049 (clobber (reg:SI 4))
7050 (use (reg:SI 19))
7051 (use (const_int 0))])]
7052 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT
7053 && reload_completed
7054 && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
7055 [(set (reg:SI 4) (reg:SI 19))
7056 (parallel [(set (match_dup 0)
7057 (call (mem:SI (match_dup 1))
7058 (match_dup 2)))
7059 (clobber (reg:SI 1))
7060 (clobber (reg:SI 2))
7061 (use (reg:SI 19))
7062 (use (const_int 0))])
7063 (set (reg:SI 19) (reg:SI 4))]
7064 "")
7065
7066 ;; Remove the clobber of register 4 when optimizing. This has to be
7067 ;; done with a peephole optimization rather than a split because the
7068 ;; split sequence for a call must be longer than one instruction.
7069 (define_peephole2
7070 [(parallel [(set (match_operand 0 "" "")
7071 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7072 (match_operand 2 "" "")))
7073 (clobber (reg:SI 1))
7074 (clobber (reg:SI 2))
7075 (clobber (reg:SI 4))
7076 (use (reg:SI 19))
7077 (use (const_int 0))])]
7078 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed"
7079 [(parallel [(set (match_dup 0)
7080 (call (mem:SI (match_dup 1))
7081 (match_dup 2)))
7082 (clobber (reg:SI 1))
7083 (clobber (reg:SI 2))
7084 (use (reg:SI 19))
7085 (use (const_int 0))])]
7086 "")
7087
7088 (define_insn "*call_val_symref_pic_post_reload"
7089 [(set (match_operand 0 "" "")
7090 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7091 (match_operand 2 "" "i")))
7092 (clobber (reg:SI 1))
7093 (clobber (reg:SI 2))
7094 (use (reg:SI 19))
7095 (use (const_int 0))]
7096 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
7097 "*
7098 {
7099 output_arg_descriptor (insn);
7100 return output_call (insn, operands[1], 0);
7101 }"
7102 [(set_attr "type" "call")
7103 (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
7104
7105 ;; This pattern is split if it is necessary to save and restore the
7106 ;; PIC register.
7107 (define_insn "call_val_symref_64bit"
7108 [(set (match_operand 0 "" "")
7109 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7110 (match_operand 2 "" "i")))
7111 (clobber (reg:DI 1))
7112 (clobber (reg:DI 2))
7113 (clobber (reg:DI 4))
7114 (use (reg:DI 27))
7115 (use (reg:DI 29))
7116 (use (const_int 0))]
7117 "TARGET_64BIT"
7118 "*
7119 {
7120 output_arg_descriptor (insn);
7121 return output_call (insn, operands[1], 0);
7122 }"
7123 [(set_attr "type" "call")
7124 (set (attr "length")
7125 (plus (symbol_ref "attr_length_call (insn, 0)")
7126 (symbol_ref "attr_length_save_restore_dltp (insn)")))])
7127
7128 ;; Split out the PIC register save and restore after reload. This is
7129 ;; done only if the function returns. As the split is done after reload,
7130 ;; there are some situations in which we unnecessarily save and restore
7131 ;; %r4. This happens when there is a single call and the PIC register
7132 ;; is "dead" after the call. This isn't easy to fix as the usage of
7133 ;; the PIC register isn't completely determined until the reload pass.
7134 (define_split
7135 [(parallel [(set (match_operand 0 "" "")
7136 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7137 (match_operand 2 "" "")))
7138 (clobber (reg:DI 1))
7139 (clobber (reg:DI 2))
7140 (clobber (reg:DI 4))
7141 (use (reg:DI 27))
7142 (use (reg:DI 29))
7143 (use (const_int 0))])]
7144 "TARGET_64BIT
7145 && reload_completed
7146 && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
7147 [(set (reg:DI 4) (reg:DI 27))
7148 (parallel [(set (match_dup 0)
7149 (call (mem:SI (match_dup 1))
7150 (match_dup 2)))
7151 (clobber (reg:DI 1))
7152 (clobber (reg:DI 2))
7153 (use (reg:DI 27))
7154 (use (reg:DI 29))
7155 (use (const_int 0))])
7156 (set (reg:DI 27) (reg:DI 4))]
7157 "")
7158
7159 ;; Remove the clobber of register 4 when optimizing. This has to be
7160 ;; done with a peephole optimization rather than a split because the
7161 ;; split sequence for a call must be longer than one instruction.
7162 (define_peephole2
7163 [(parallel [(set (match_operand 0 "" "")
7164 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7165 (match_operand 2 "" "")))
7166 (clobber (reg:DI 1))
7167 (clobber (reg:DI 2))
7168 (clobber (reg:DI 4))
7169 (use (reg:DI 27))
7170 (use (reg:DI 29))
7171 (use (const_int 0))])]
7172 "TARGET_64BIT && reload_completed"
7173 [(parallel [(set (match_dup 0)
7174 (call (mem:SI (match_dup 1))
7175 (match_dup 2)))
7176 (clobber (reg:DI 1))
7177 (clobber (reg:DI 2))
7178 (use (reg:DI 27))
7179 (use (reg:DI 29))
7180 (use (const_int 0))])]
7181 "")
7182
7183 (define_insn "*call_val_symref_64bit_post_reload"
7184 [(set (match_operand 0 "" "")
7185 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7186 (match_operand 2 "" "i")))
7187 (clobber (reg:DI 1))
7188 (clobber (reg:DI 2))
7189 (use (reg:DI 27))
7190 (use (reg:DI 29))
7191 (use (const_int 0))]
7192 "TARGET_64BIT"
7193 "*
7194 {
7195 output_arg_descriptor (insn);
7196 return output_call (insn, operands[1], 0);
7197 }"
7198 [(set_attr "type" "call")
7199 (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
7200
7201 (define_insn "call_val_reg"
7202 [(set (match_operand 0 "" "")
7203 (call (mem:SI (reg:SI 22))
7204 (match_operand 1 "" "i")))
7205 (clobber (reg:SI 1))
7206 (clobber (reg:SI 2))
7207 (use (const_int 1))]
7208 "!TARGET_64BIT"
7209 "*
7210 {
7211 return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
7212 }"
7213 [(set_attr "type" "dyncall")
7214 (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
7215
7216 ;; This pattern is split if it is necessary to save and restore the
7217 ;; PIC register.
7218 (define_insn "call_val_reg_pic"
7219 [(set (match_operand 0 "" "")
7220 (call (mem:SI (reg:SI 22))
7221 (match_operand 1 "" "i")))
7222 (clobber (reg:SI 1))
7223 (clobber (reg:SI 2))
7224 (clobber (reg:SI 4))
7225 (use (reg:SI 19))
7226 (use (const_int 1))]
7227 "!TARGET_64BIT"
7228 "*
7229 {
7230 return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
7231 }"
7232 [(set_attr "type" "dyncall")
7233 (set (attr "length")
7234 (plus (symbol_ref "attr_length_indirect_call (insn)")
7235 (symbol_ref "attr_length_save_restore_dltp (insn)")))])
7236
7237 ;; Split out the PIC register save and restore after reload. This is
7238 ;; done only if the function returns. As the split is done after reload,
7239 ;; there are some situations in which we unnecessarily save and restore
7240 ;; %r4. This happens when there is a single call and the PIC register
7241 ;; is "dead" after the call. This isn't easy to fix as the usage of
7242 ;; the PIC register isn't completely determined until the reload pass.
7243 (define_split
7244 [(parallel [(set (match_operand 0 "" "")
7245 (call (mem:SI (reg:SI 22))
7246 (match_operand 1 "" "")))
7247 (clobber (reg:SI 1))
7248 (clobber (reg:SI 2))
7249 (clobber (reg:SI 4))
7250 (use (reg:SI 19))
7251 (use (const_int 1))])]
7252 "!TARGET_64BIT
7253 && reload_completed
7254 && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
7255 [(set (reg:SI 4) (reg:SI 19))
7256 (parallel [(set (match_dup 0)
7257 (call (mem:SI (reg:SI 22))
7258 (match_dup 1)))
7259 (clobber (reg:SI 1))
7260 (clobber (reg:SI 2))
7261 (use (reg:SI 19))
7262 (use (const_int 1))])
7263 (set (reg:SI 19) (reg:SI 4))]
7264 "")
7265
7266 ;; Remove the clobber of register 4 when optimizing. This has to be
7267 ;; done with a peephole optimization rather than a split because the
7268 ;; split sequence for a call must be longer than one instruction.
7269 (define_peephole2
7270 [(parallel [(set (match_operand 0 "" "")
7271 (call (mem:SI (reg:SI 22))
7272 (match_operand 1 "" "")))
7273 (clobber (reg:SI 1))
7274 (clobber (reg:SI 2))
7275 (clobber (reg:SI 4))
7276 (use (reg:SI 19))
7277 (use (const_int 1))])]
7278 "!TARGET_64BIT && reload_completed"
7279 [(parallel [(set (match_dup 0)
7280 (call (mem:SI (reg:SI 22))
7281 (match_dup 1)))
7282 (clobber (reg:SI 1))
7283 (clobber (reg:SI 2))
7284 (use (reg:SI 19))
7285 (use (const_int 1))])]
7286 "")
7287
7288 (define_insn "*call_val_reg_pic_post_reload"
7289 [(set (match_operand 0 "" "")
7290 (call (mem:SI (reg:SI 22))
7291 (match_operand 1 "" "i")))
7292 (clobber (reg:SI 1))
7293 (clobber (reg:SI 2))
7294 (use (reg:SI 19))
7295 (use (const_int 1))]
7296 "!TARGET_64BIT"
7297 "*
7298 {
7299 return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
7300 }"
7301 [(set_attr "type" "dyncall")
7302 (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
7303
7304 ;; This pattern is split if it is necessary to save and restore the
7305 ;; PIC register.
7306 (define_insn "call_val_reg_64bit"
7307 [(set (match_operand 0 "" "")
7308 (call (mem:SI (match_operand:DI 1 "register_operand" "r"))
7309 (match_operand 2 "" "i")))
7310 (clobber (reg:DI 2))
7311 (clobber (reg:DI 4))
7312 (use (reg:DI 27))
7313 (use (reg:DI 29))
7314 (use (const_int 1))]
7315 "TARGET_64BIT"
7316 "*
7317 {
7318 return output_indirect_call (insn, operands[1]);
7319 }"
7320 [(set_attr "type" "dyncall")
7321 (set (attr "length")
7322 (plus (symbol_ref "attr_length_indirect_call (insn)")
7323 (symbol_ref "attr_length_save_restore_dltp (insn)")))])
7324
7325 ;; Split out the PIC register save and restore after reload. This is
7326 ;; done only if the function returns. As the split is done after reload,
7327 ;; there are some situations in which we unnecessarily save and restore
7328 ;; %r4. This happens when there is a single call and the PIC register
7329 ;; is "dead" after the call. This isn't easy to fix as the usage of
7330 ;; the PIC register isn't completely determined until the reload pass.
7331 (define_split
7332 [(parallel [(set (match_operand 0 "" "")
7333 (call (mem:SI (match_operand:DI 1 "register_operand" ""))
7334 (match_operand 2 "" "")))
7335 (clobber (reg:DI 2))
7336 (clobber (reg:DI 4))
7337 (use (reg:DI 27))
7338 (use (reg:DI 29))
7339 (use (const_int 1))])]
7340 "TARGET_64BIT
7341 && reload_completed
7342 && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
7343 [(set (reg:DI 4) (reg:DI 27))
7344 (parallel [(set (match_dup 0)
7345 (call (mem:SI (match_dup 1))
7346 (match_dup 2)))
7347 (clobber (reg:DI 2))
7348 (use (reg:DI 27))
7349 (use (reg:DI 29))
7350 (use (const_int 1))])
7351 (set (reg:DI 27) (reg:DI 4))]
7352 "")
7353
7354 ;; Remove the clobber of register 4 when optimizing. This has to be
7355 ;; done with a peephole optimization rather than a split because the
7356 ;; split sequence for a call must be longer than one instruction.
7357 (define_peephole2
7358 [(parallel [(set (match_operand 0 "" "")
7359 (call (mem:SI (match_operand:DI 1 "register_operand" ""))
7360 (match_operand 2 "" "")))
7361 (clobber (reg:DI 2))
7362 (clobber (reg:DI 4))
7363 (use (reg:DI 27))
7364 (use (reg:DI 29))
7365 (use (const_int 1))])]
7366 "TARGET_64BIT && reload_completed"
7367 [(parallel [(set (match_dup 0)
7368 (call (mem:SI (match_dup 1))
7369 (match_dup 2)))
7370 (clobber (reg:DI 2))
7371 (use (reg:DI 27))
7372 (use (reg:DI 29))
7373 (use (const_int 1))])]
7374 "")
7375
7376 (define_insn "*call_val_reg_64bit_post_reload"
7377 [(set (match_operand 0 "" "")
7378 (call (mem:SI (match_operand:DI 1 "register_operand" "r"))
7379 (match_operand 2 "" "i")))
7380 (clobber (reg:DI 2))
7381 (use (reg:DI 27))
7382 (use (reg:DI 29))
7383 (use (const_int 1))]
7384 "TARGET_64BIT"
7385 "*
7386 {
7387 return output_indirect_call (insn, operands[1]);
7388 }"
7389 [(set_attr "type" "dyncall")
7390 (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
7391
7392 ;; Call subroutine returning any type.
7393
7394 (define_expand "untyped_call"
7395 [(parallel [(call (match_operand 0 "" "")
7396 (const_int 0))
7397 (match_operand 1 "" "")
7398 (match_operand 2 "" "")])]
7399 ""
7400 "
7401 {
7402 int i;
7403
7404 emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
7405
7406 for (i = 0; i < XVECLEN (operands[2], 0); i++)
7407 {
7408 rtx set = XVECEXP (operands[2], 0, i);
7409 emit_move_insn (SET_DEST (set), SET_SRC (set));
7410 }
7411
7412 /* The optimizer does not know that the call sets the function value
7413 registers we stored in the result block. We avoid problems by
7414 claiming that all hard registers are used and clobbered at this
7415 point. */
7416 emit_insn (gen_blockage ());
7417
7418 DONE;
7419 }")
7420
7421 (define_expand "sibcall"
7422 [(call (match_operand:SI 0 "" "")
7423 (match_operand 1 "" ""))]
7424 "!TARGET_PORTABLE_RUNTIME"
7425 "
7426 {
7427 rtx op, call_insn;
7428 rtx nb = operands[1];
7429
7430 op = XEXP (operands[0], 0);
7431
7432 if (TARGET_64BIT)
7433 {
7434 if (!virtuals_instantiated)
7435 emit_move_insn (arg_pointer_rtx,
7436 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
7437 GEN_INT (64)));
7438 else
7439 {
7440 /* The loop pass can generate new libcalls after the virtual
7441 registers are instantiated when fpregs are disabled because
7442 the only method that we have for doing DImode multiplication
7443 is with a libcall. This could be trouble if we haven't
7444 allocated enough space for the outgoing arguments. */
7445 if (INTVAL (nb) > current_function_outgoing_args_size)
7446 abort ();
7447
7448 emit_move_insn (arg_pointer_rtx,
7449 gen_rtx_PLUS (word_mode, stack_pointer_rtx,
7450 GEN_INT (STACK_POINTER_OFFSET + 64)));
7451 }
7452 }
7453
7454 /* Indirect sibling calls are not allowed. */
7455 if (TARGET_64BIT)
7456 call_insn = gen_sibcall_internal_symref_64bit (op, operands[1]);
7457 else
7458 call_insn = gen_sibcall_internal_symref (op, operands[1]);
7459
7460 call_insn = emit_call_insn (call_insn);
7461
7462 if (TARGET_64BIT)
7463 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), arg_pointer_rtx);
7464
7465 /* We don't have to restore the PIC register. */
7466 if (flag_pic)
7467 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
7468
7469 DONE;
7470 }")
7471
7472 (define_insn "sibcall_internal_symref"
7473 [(call (mem:SI (match_operand 0 "call_operand_address" ""))
7474 (match_operand 1 "" "i"))
7475 (clobber (reg:SI 1))
7476 (use (reg:SI 2))
7477 (use (const_int 0))]
7478 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
7479 "*
7480 {
7481 output_arg_descriptor (insn);
7482 return output_call (insn, operands[0], 1);
7483 }"
7484 [(set_attr "type" "call")
7485 (set (attr "length") (symbol_ref "attr_length_call (insn, 1)"))])
7486
7487 (define_insn "sibcall_internal_symref_64bit"
7488 [(call (mem:SI (match_operand 0 "call_operand_address" ""))
7489 (match_operand 1 "" "i"))
7490 (clobber (reg:DI 1))
7491 (use (reg:DI 2))
7492 (use (const_int 0))]
7493 "TARGET_64BIT"
7494 "*
7495 {
7496 output_arg_descriptor (insn);
7497 return output_call (insn, operands[0], 1);
7498 }"
7499 [(set_attr "type" "call")
7500 (set (attr "length") (symbol_ref "attr_length_call (insn, 1)"))])
7501
7502 (define_expand "sibcall_value"
7503 [(set (match_operand 0 "" "")
7504 (call (match_operand:SI 1 "" "")
7505 (match_operand 2 "" "")))]
7506 "!TARGET_PORTABLE_RUNTIME"
7507 "
7508 {
7509 rtx op, call_insn;
7510 rtx nb = operands[1];
7511
7512 op = XEXP (operands[1], 0);
7513
7514 if (TARGET_64BIT)
7515 {
7516 if (!virtuals_instantiated)
7517 emit_move_insn (arg_pointer_rtx,
7518 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
7519 GEN_INT (64)));
7520 else
7521 {
7522 /* The loop pass can generate new libcalls after the virtual
7523 registers are instantiated when fpregs are disabled because
7524 the only method that we have for doing DImode multiplication
7525 is with a libcall. This could be trouble if we haven't
7526 allocated enough space for the outgoing arguments. */
7527 if (INTVAL (nb) > current_function_outgoing_args_size)
7528 abort ();
7529
7530 emit_move_insn (arg_pointer_rtx,
7531 gen_rtx_PLUS (word_mode, stack_pointer_rtx,
7532 GEN_INT (STACK_POINTER_OFFSET + 64)));
7533 }
7534 }
7535
7536 /* Indirect sibling calls are not allowed. */
7537 if (TARGET_64BIT)
7538 call_insn
7539 = gen_sibcall_value_internal_symref_64bit (operands[0], op, operands[2]);
7540 else
7541 call_insn
7542 = gen_sibcall_value_internal_symref (operands[0], op, operands[2]);
7543
7544 call_insn = emit_call_insn (call_insn);
7545
7546 if (TARGET_64BIT)
7547 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), arg_pointer_rtx);
7548
7549 /* We don't have to restore the PIC register. */
7550 if (flag_pic)
7551 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
7552
7553 DONE;
7554 }")
7555
7556 (define_insn "sibcall_value_internal_symref"
7557 [(set (match_operand 0 "" "")
7558 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7559 (match_operand 2 "" "i")))
7560 (clobber (reg:SI 1))
7561 (use (reg:SI 2))
7562 (use (const_int 0))]
7563 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
7564 "*
7565 {
7566 output_arg_descriptor (insn);
7567 return output_call (insn, operands[1], 1);
7568 }"
7569 [(set_attr "type" "call")
7570 (set (attr "length") (symbol_ref "attr_length_call (insn, 1)"))])
7571
7572 (define_insn "sibcall_value_internal_symref_64bit"
7573 [(set (match_operand 0 "" "")
7574 (call (mem:SI (match_operand 1 "call_operand_address" ""))
7575 (match_operand 2 "" "i")))
7576 (clobber (reg:DI 1))
7577 (use (reg:DI 2))
7578 (use (const_int 0))]
7579 "TARGET_64BIT"
7580 "*
7581 {
7582 output_arg_descriptor (insn);
7583 return output_call (insn, operands[1], 1);
7584 }"
7585 [(set_attr "type" "call")
7586 (set (attr "length") (symbol_ref "attr_length_call (insn, 1)"))])
7587
7588 (define_insn "nop"
7589 [(const_int 0)]
7590 ""
7591 "nop"
7592 [(set_attr "type" "move")
7593 (set_attr "length" "4")])
7594
7595 ;; These are just placeholders so we know where branch tables
7596 ;; begin and end.
7597 (define_insn "begin_brtab"
7598 [(const_int 1)]
7599 ""
7600 "*
7601 {
7602 /* Only GAS actually supports this pseudo-op. */
7603 if (TARGET_GAS)
7604 return \".begin_brtab\";
7605 else
7606 return \"\";
7607 }"
7608 [(set_attr "type" "move")
7609 (set_attr "length" "0")])
7610
7611 (define_insn "end_brtab"
7612 [(const_int 2)]
7613 ""
7614 "*
7615 {
7616 /* Only GAS actually supports this pseudo-op. */
7617 if (TARGET_GAS)
7618 return \".end_brtab\";
7619 else
7620 return \"\";
7621 }"
7622 [(set_attr "type" "move")
7623 (set_attr "length" "0")])
7624
7625 ;;; EH does longjmp's from and within the data section. Thus,
7626 ;;; an interspace branch is required for the longjmp implementation.
7627 ;;; Registers r1 and r2 are used as scratch registers for the jump
7628 ;;; when necessary.
7629 (define_expand "interspace_jump"
7630 [(parallel
7631 [(set (pc) (match_operand 0 "pmode_register_operand" "a"))
7632 (clobber (match_dup 1))])]
7633 ""
7634 "
7635 {
7636 operands[1] = gen_rtx_REG (word_mode, 2);
7637 }")
7638
7639 (define_insn ""
7640 [(set (pc) (match_operand 0 "pmode_register_operand" "a"))
7641 (clobber (reg:SI 2))]
7642 "TARGET_PA_20 && !TARGET_64BIT"
7643 "bve%* (%0)"
7644 [(set_attr "type" "branch")
7645 (set_attr "length" "4")])
7646
7647 (define_insn ""
7648 [(set (pc) (match_operand 0 "pmode_register_operand" "a"))
7649 (clobber (reg:SI 2))]
7650 "TARGET_NO_SPACE_REGS && !TARGET_64BIT"
7651 "be%* 0(%%sr4,%0)"
7652 [(set_attr "type" "branch")
7653 (set_attr "length" "4")])
7654
7655 (define_insn ""
7656 [(set (pc) (match_operand 0 "pmode_register_operand" "a"))
7657 (clobber (reg:SI 2))]
7658 "!TARGET_64BIT"
7659 "ldsid (%%sr0,%0),%%r2\; mtsp %%r2,%%sr0\; be%* 0(%%sr0,%0)"
7660 [(set_attr "type" "branch")
7661 (set_attr "length" "12")])
7662
7663 (define_insn ""
7664 [(set (pc) (match_operand 0 "pmode_register_operand" "a"))
7665 (clobber (reg:DI 2))]
7666 "TARGET_64BIT"
7667 "bve%* (%0)"
7668 [(set_attr "type" "branch")
7669 (set_attr "length" "4")])
7670
7671 (define_expand "builtin_longjmp"
7672 [(unspec_volatile [(match_operand 0 "register_operand" "r")] 3)]
7673 ""
7674 "
7675 {
7676 /* The elements of the buffer are, in order: */
7677 rtx fp = gen_rtx_MEM (Pmode, operands[0]);
7678 rtx lab = gen_rtx_MEM (Pmode, plus_constant (operands[0],
7679 POINTER_SIZE / BITS_PER_UNIT));
7680 rtx stack = gen_rtx_MEM (Pmode, plus_constant (operands[0],
7681 (POINTER_SIZE * 2) / BITS_PER_UNIT));
7682 rtx pv = gen_rtx_REG (Pmode, 1);
7683
7684 /* This bit is the same as expand_builtin_longjmp. */
7685 emit_move_insn (hard_frame_pointer_rtx, fp);
7686 emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
7687 emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
7688 emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
7689
7690 /* Load the label we are jumping through into r1 so that we know
7691 where to look for it when we get back to setjmp's function for
7692 restoring the gp. */
7693 emit_move_insn (pv, lab);
7694
7695 /* Prevent the insns above from being scheduled into the delay slot
7696 of the interspace jump because the space register could change. */
7697 emit_insn (gen_blockage ());
7698
7699 emit_jump_insn (gen_interspace_jump (pv));
7700 emit_barrier ();
7701 DONE;
7702 }")
7703
7704 ;;; Operands 2 and 3 are assumed to be CONST_INTs.
7705 (define_expand "extzv"
7706 [(set (match_operand 0 "register_operand" "")
7707 (zero_extract (match_operand 1 "register_operand" "")
7708 (match_operand 2 "uint32_operand" "")
7709 (match_operand 3 "uint32_operand" "")))]
7710 ""
7711 "
7712 {
7713 HOST_WIDE_INT len = INTVAL (operands[2]);
7714 HOST_WIDE_INT pos = INTVAL (operands[3]);
7715
7716 /* PA extraction insns don't support zero length bitfields or fields
7717 extending beyond the left or right-most bits. Also, we reject lengths
7718 equal to a word as they are better handled by the move patterns. */
7719 if (len <= 0 || len >= BITS_PER_WORD || pos < 0 || pos + len > BITS_PER_WORD)
7720 FAIL;
7721
7722 /* From mips.md: extract_bit_field doesn't verify that our source
7723 matches the predicate, so check it again here. */
7724 if (!register_operand (operands[1], VOIDmode))
7725 FAIL;
7726
7727 if (TARGET_64BIT)
7728 emit_insn (gen_extzv_64 (operands[0], operands[1],
7729 operands[2], operands[3]));
7730 else
7731 emit_insn (gen_extzv_32 (operands[0], operands[1],
7732 operands[2], operands[3]));
7733 DONE;
7734 }")
7735
7736 (define_insn "extzv_32"
7737 [(set (match_operand:SI 0 "register_operand" "=r")
7738 (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
7739 (match_operand:SI 2 "uint5_operand" "")
7740 (match_operand:SI 3 "uint5_operand" "")))]
7741 ""
7742 "{extru|extrw,u} %1,%3+%2-1,%2,%0"
7743 [(set_attr "type" "shift")
7744 (set_attr "length" "4")])
7745
7746 (define_insn ""
7747 [(set (match_operand:SI 0 "register_operand" "=r")
7748 (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
7749 (const_int 1)
7750 (match_operand:SI 2 "register_operand" "q")))]
7751 ""
7752 "{vextru %1,1,%0|extrw,u %1,%%sar,1,%0}"
7753 [(set_attr "type" "shift")
7754 (set_attr "length" "4")])
7755
7756 (define_insn "extzv_64"
7757 [(set (match_operand:DI 0 "register_operand" "=r")
7758 (zero_extract:DI (match_operand:DI 1 "register_operand" "r")
7759 (match_operand:DI 2 "uint32_operand" "")
7760 (match_operand:DI 3 "uint32_operand" "")))]
7761 "TARGET_64BIT"
7762 "extrd,u %1,%3+%2-1,%2,%0"
7763 [(set_attr "type" "shift")
7764 (set_attr "length" "4")])
7765
7766 (define_insn ""
7767 [(set (match_operand:DI 0 "register_operand" "=r")
7768 (zero_extract:DI (match_operand:DI 1 "register_operand" "r")
7769 (const_int 1)
7770 (match_operand:DI 2 "register_operand" "q")))]
7771 "TARGET_64BIT"
7772 "extrd,u %1,%%sar,1,%0"
7773 [(set_attr "type" "shift")
7774 (set_attr "length" "4")])
7775
7776 ;;; Operands 2 and 3 are assumed to be CONST_INTs.
7777 (define_expand "extv"
7778 [(set (match_operand 0 "register_operand" "")
7779 (sign_extract (match_operand 1 "register_operand" "")
7780 (match_operand 2 "uint32_operand" "")
7781 (match_operand 3 "uint32_operand" "")))]
7782 ""
7783 "
7784 {
7785 HOST_WIDE_INT len = INTVAL (operands[2]);
7786 HOST_WIDE_INT pos = INTVAL (operands[3]);
7787
7788 /* PA extraction insns don't support zero length bitfields or fields
7789 extending beyond the left or right-most bits. Also, we reject lengths
7790 equal to a word as they are better handled by the move patterns. */
7791 if (len <= 0 || len >= BITS_PER_WORD || pos < 0 || pos + len > BITS_PER_WORD)
7792 FAIL;
7793
7794 /* From mips.md: extract_bit_field doesn't verify that our source
7795 matches the predicate, so check it again here. */
7796 if (!register_operand (operands[1], VOIDmode))
7797 FAIL;
7798
7799 if (TARGET_64BIT)
7800 emit_insn (gen_extv_64 (operands[0], operands[1],
7801 operands[2], operands[3]));
7802 else
7803 emit_insn (gen_extv_32 (operands[0], operands[1],
7804 operands[2], operands[3]));
7805 DONE;
7806 }")
7807
7808 (define_insn "extv_32"
7809 [(set (match_operand:SI 0 "register_operand" "=r")
7810 (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
7811 (match_operand:SI 2 "uint5_operand" "")
7812 (match_operand:SI 3 "uint5_operand" "")))]
7813 ""
7814 "{extrs|extrw,s} %1,%3+%2-1,%2,%0"
7815 [(set_attr "type" "shift")
7816 (set_attr "length" "4")])
7817
7818 (define_insn ""
7819 [(set (match_operand:SI 0 "register_operand" "=r")
7820 (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
7821 (const_int 1)
7822 (match_operand:SI 2 "register_operand" "q")))]
7823 "!TARGET_64BIT"
7824 "{vextrs %1,1,%0|extrw,s %1,%%sar,1,%0}"
7825 [(set_attr "type" "shift")
7826 (set_attr "length" "4")])
7827
7828 (define_insn "extv_64"
7829 [(set (match_operand:DI 0 "register_operand" "=r")
7830 (sign_extract:DI (match_operand:DI 1 "register_operand" "r")
7831 (match_operand:DI 2 "uint32_operand" "")
7832 (match_operand:DI 3 "uint32_operand" "")))]
7833 "TARGET_64BIT"
7834 "extrd,s %1,%3+%2-1,%2,%0"
7835 [(set_attr "type" "shift")
7836 (set_attr "length" "4")])
7837
7838 (define_insn ""
7839 [(set (match_operand:DI 0 "register_operand" "=r")
7840 (sign_extract:DI (match_operand:DI 1 "register_operand" "r")
7841 (const_int 1)
7842 (match_operand:DI 2 "register_operand" "q")))]
7843 "TARGET_64BIT"
7844 "extrd,s %1,%%sar,1,%0"
7845 [(set_attr "type" "shift")
7846 (set_attr "length" "4")])
7847
7848 ;;; Operands 1 and 2 are assumed to be CONST_INTs.
7849 (define_expand "insv"
7850 [(set (zero_extract (match_operand 0 "register_operand" "")
7851 (match_operand 1 "uint32_operand" "")
7852 (match_operand 2 "uint32_operand" ""))
7853 (match_operand 3 "arith5_operand" ""))]
7854 ""
7855 "
7856 {
7857 HOST_WIDE_INT len = INTVAL (operands[1]);
7858 HOST_WIDE_INT pos = INTVAL (operands[2]);
7859
7860 /* PA insertion insns don't support zero length bitfields or fields
7861 extending beyond the left or right-most bits. Also, we reject lengths
7862 equal to a word as they are better handled by the move patterns. */
7863 if (len <= 0 || len >= BITS_PER_WORD || pos < 0 || pos + len > BITS_PER_WORD)
7864 FAIL;
7865
7866 /* From mips.md: insert_bit_field doesn't verify that our destination
7867 matches the predicate, so check it again here. */
7868 if (!register_operand (operands[0], VOIDmode))
7869 FAIL;
7870
7871 if (TARGET_64BIT)
7872 emit_insn (gen_insv_64 (operands[0], operands[1],
7873 operands[2], operands[3]));
7874 else
7875 emit_insn (gen_insv_32 (operands[0], operands[1],
7876 operands[2], operands[3]));
7877 DONE;
7878 }")
7879
7880 (define_insn "insv_32"
7881 [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r,r")
7882 (match_operand:SI 1 "uint5_operand" "")
7883 (match_operand:SI 2 "uint5_operand" ""))
7884 (match_operand:SI 3 "arith5_operand" "r,L"))]
7885 ""
7886 "@
7887 {dep|depw} %3,%2+%1-1,%1,%0
7888 {depi|depwi} %3,%2+%1-1,%1,%0"
7889 [(set_attr "type" "shift,shift")
7890 (set_attr "length" "4,4")])
7891
7892 ;; Optimize insertion of const_int values of type 1...1xxxx.
7893 (define_insn ""
7894 [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
7895 (match_operand:SI 1 "uint5_operand" "")
7896 (match_operand:SI 2 "uint5_operand" ""))
7897 (match_operand:SI 3 "const_int_operand" ""))]
7898 "(INTVAL (operands[3]) & 0x10) != 0 &&
7899 (~INTVAL (operands[3]) & ((1L << INTVAL (operands[1])) - 1) & ~0xf) == 0"
7900 "*
7901 {
7902 operands[3] = GEN_INT ((INTVAL (operands[3]) & 0xf) - 0x10);
7903 return \"{depi|depwi} %3,%2+%1-1,%1,%0\";
7904 }"
7905 [(set_attr "type" "shift")
7906 (set_attr "length" "4")])
7907
7908 (define_insn "insv_64"
7909 [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+r,r")
7910 (match_operand:DI 1 "uint32_operand" "")
7911 (match_operand:DI 2 "uint32_operand" ""))
7912 (match_operand:DI 3 "arith32_operand" "r,L"))]
7913 "TARGET_64BIT"
7914 "@
7915 depd %3,%2+%1-1,%1,%0
7916 depdi %3,%2+%1-1,%1,%0"
7917 [(set_attr "type" "shift,shift")
7918 (set_attr "length" "4,4")])
7919
7920 ;; Optimize insertion of const_int values of type 1...1xxxx.
7921 (define_insn ""
7922 [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+r")
7923 (match_operand:DI 1 "uint32_operand" "")
7924 (match_operand:DI 2 "uint32_operand" ""))
7925 (match_operand:DI 3 "const_int_operand" ""))]
7926 "(INTVAL (operands[3]) & 0x10) != 0
7927 && TARGET_64BIT
7928 && (~INTVAL (operands[3]) & ((1L << INTVAL (operands[1])) - 1) & ~0xf) == 0"
7929 "*
7930 {
7931 operands[3] = GEN_INT ((INTVAL (operands[3]) & 0xf) - 0x10);
7932 return \"depdi %3,%2+%1-1,%1,%0\";
7933 }"
7934 [(set_attr "type" "shift")
7935 (set_attr "length" "4")])
7936
7937 (define_insn ""
7938 [(set (match_operand:DI 0 "register_operand" "=r")
7939 (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
7940 (const_int 32)))]
7941 "TARGET_64BIT"
7942 "depd,z %1,31,32,%0"
7943 [(set_attr "type" "shift")
7944 (set_attr "length" "4")])
7945
7946 ;; This insn is used for some loop tests, typically loops reversed when
7947 ;; strength reduction is used. It is actually created when the instruction
7948 ;; combination phase combines the special loop test. Since this insn
7949 ;; is both a jump insn and has an output, it must deal with its own
7950 ;; reloads, hence the `m' constraints. The `!' constraints direct reload
7951 ;; to not choose the register alternatives in the event a reload is needed.
7952 (define_insn "decrement_and_branch_until_zero"
7953 [(set (pc)
7954 (if_then_else
7955 (match_operator 2 "comparison_operator"
7956 [(plus:SI
7957 (match_operand:SI 0 "reg_before_reload_operand" "+!r,!*f,*m")
7958 (match_operand:SI 1 "int5_operand" "L,L,L"))
7959 (const_int 0)])
7960 (label_ref (match_operand 3 "" ""))
7961 (pc)))
7962 (set (match_dup 0)
7963 (plus:SI (match_dup 0) (match_dup 1)))
7964 (clobber (match_scratch:SI 4 "=X,r,r"))]
7965 ""
7966 "* return output_dbra (operands, insn, which_alternative); "
7967 ;; Do not expect to understand this the first time through.
7968 [(set_attr "type" "cbranch,multi,multi")
7969 (set (attr "length")
7970 (if_then_else (eq_attr "alternative" "0")
7971 ;; Loop counter in register case
7972 ;; Short branch has length of 4
7973 ;; Long branch has length of 8
7974 (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
7975 (const_int 8184))
7976 (const_int 4)
7977 (const_int 8))
7978
7979 ;; Loop counter in FP reg case.
7980 ;; Extra goo to deal with additional reload insns.
7981 (if_then_else (eq_attr "alternative" "1")
7982 (if_then_else (lt (match_dup 3) (pc))
7983 (if_then_else
7984 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 24))))
7985 (const_int 8184))
7986 (const_int 24)
7987 (const_int 28))
7988 (if_then_else
7989 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
7990 (const_int 8184))
7991 (const_int 24)
7992 (const_int 28)))
7993 ;; Loop counter in memory case.
7994 ;; Extra goo to deal with additional reload insns.
7995 (if_then_else (lt (match_dup 3) (pc))
7996 (if_then_else
7997 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
7998 (const_int 8184))
7999 (const_int 12)
8000 (const_int 16))
8001 (if_then_else
8002 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
8003 (const_int 8184))
8004 (const_int 12)
8005 (const_int 16))))))])
8006
8007 (define_insn ""
8008 [(set (pc)
8009 (if_then_else
8010 (match_operator 2 "movb_comparison_operator"
8011 [(match_operand:SI 1 "register_operand" "r,r,r,r") (const_int 0)])
8012 (label_ref (match_operand 3 "" ""))
8013 (pc)))
8014 (set (match_operand:SI 0 "reg_before_reload_operand" "=!r,!*f,*m,!*q")
8015 (match_dup 1))]
8016 ""
8017 "* return output_movb (operands, insn, which_alternative, 0); "
8018 ;; Do not expect to understand this the first time through.
8019 [(set_attr "type" "cbranch,multi,multi,multi")
8020 (set (attr "length")
8021 (if_then_else (eq_attr "alternative" "0")
8022 ;; Loop counter in register case
8023 ;; Short branch has length of 4
8024 ;; Long branch has length of 8
8025 (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
8026 (const_int 8184))
8027 (const_int 4)
8028 (const_int 8))
8029
8030 ;; Loop counter in FP reg case.
8031 ;; Extra goo to deal with additional reload insns.
8032 (if_then_else (eq_attr "alternative" "1")
8033 (if_then_else (lt (match_dup 3) (pc))
8034 (if_then_else
8035 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
8036 (const_int 8184))
8037 (const_int 12)
8038 (const_int 16))
8039 (if_then_else
8040 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
8041 (const_int 8184))
8042 (const_int 12)
8043 (const_int 16)))
8044 ;; Loop counter in memory or sar case.
8045 ;; Extra goo to deal with additional reload insns.
8046 (if_then_else
8047 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
8048 (const_int 8184))
8049 (const_int 8)
8050 (const_int 12)))))])
8051
8052 ;; Handle negated branch.
8053 (define_insn ""
8054 [(set (pc)
8055 (if_then_else
8056 (match_operator 2 "movb_comparison_operator"
8057 [(match_operand:SI 1 "register_operand" "r,r,r,r") (const_int 0)])
8058 (pc)
8059 (label_ref (match_operand 3 "" ""))))
8060 (set (match_operand:SI 0 "reg_before_reload_operand" "=!r,!*f,*m,!*q")
8061 (match_dup 1))]
8062 ""
8063 "* return output_movb (operands, insn, which_alternative, 1); "
8064 ;; Do not expect to understand this the first time through.
8065 [(set_attr "type" "cbranch,multi,multi,multi")
8066 (set (attr "length")
8067 (if_then_else (eq_attr "alternative" "0")
8068 ;; Loop counter in register case
8069 ;; Short branch has length of 4
8070 ;; Long branch has length of 8
8071 (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
8072 (const_int 8184))
8073 (const_int 4)
8074 (const_int 8))
8075
8076 ;; Loop counter in FP reg case.
8077 ;; Extra goo to deal with additional reload insns.
8078 (if_then_else (eq_attr "alternative" "1")
8079 (if_then_else (lt (match_dup 3) (pc))
8080 (if_then_else
8081 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
8082 (const_int 8184))
8083 (const_int 12)
8084 (const_int 16))
8085 (if_then_else
8086 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
8087 (const_int 8184))
8088 (const_int 12)
8089 (const_int 16)))
8090 ;; Loop counter in memory or SAR case.
8091 ;; Extra goo to deal with additional reload insns.
8092 (if_then_else
8093 (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
8094 (const_int 8184))
8095 (const_int 8)
8096 (const_int 12)))))])
8097
8098 (define_insn ""
8099 [(set (pc) (label_ref (match_operand 3 "" "" )))
8100 (set (match_operand:SI 0 "ireg_operand" "=r")
8101 (plus:SI (match_operand:SI 1 "ireg_operand" "r")
8102 (match_operand:SI 2 "ireg_or_int5_operand" "rL")))]
8103 "(reload_completed && operands[0] == operands[1]) || operands[0] == operands[2]"
8104 "*
8105 {
8106 return output_parallel_addb (operands, get_attr_length (insn));
8107 }"
8108 [(set_attr "type" "parallel_branch")
8109 (set (attr "length")
8110 (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
8111 (const_int 8184))
8112 (const_int 4)
8113 (const_int 8)))])
8114
8115 (define_insn ""
8116 [(set (pc) (label_ref (match_operand 2 "" "" )))
8117 (set (match_operand:SF 0 "ireg_operand" "=r")
8118 (match_operand:SF 1 "ireg_or_int5_operand" "rL"))]
8119 "reload_completed"
8120 "*
8121 {
8122 return output_parallel_movb (operands, get_attr_length (insn));
8123 }"
8124 [(set_attr "type" "parallel_branch")
8125 (set (attr "length")
8126 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
8127 (const_int 8184))
8128 (const_int 4)
8129 (const_int 8)))])
8130
8131 (define_insn ""
8132 [(set (pc) (label_ref (match_operand 2 "" "" )))
8133 (set (match_operand:SI 0 "ireg_operand" "=r")
8134 (match_operand:SI 1 "ireg_or_int5_operand" "rL"))]
8135 "reload_completed"
8136 "*
8137 {
8138 return output_parallel_movb (operands, get_attr_length (insn));
8139 }"
8140 [(set_attr "type" "parallel_branch")
8141 (set (attr "length")
8142 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
8143 (const_int 8184))
8144 (const_int 4)
8145 (const_int 8)))])
8146
8147 (define_insn ""
8148 [(set (pc) (label_ref (match_operand 2 "" "" )))
8149 (set (match_operand:HI 0 "ireg_operand" "=r")
8150 (match_operand:HI 1 "ireg_or_int5_operand" "rL"))]
8151 "reload_completed"
8152 "*
8153 {
8154 return output_parallel_movb (operands, get_attr_length (insn));
8155 }"
8156 [(set_attr "type" "parallel_branch")
8157 (set (attr "length")
8158 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
8159 (const_int 8184))
8160 (const_int 4)
8161 (const_int 8)))])
8162
8163 (define_insn ""
8164 [(set (pc) (label_ref (match_operand 2 "" "" )))
8165 (set (match_operand:QI 0 "ireg_operand" "=r")
8166 (match_operand:QI 1 "ireg_or_int5_operand" "rL"))]
8167 "reload_completed"
8168 "*
8169 {
8170 return output_parallel_movb (operands, get_attr_length (insn));
8171 }"
8172 [(set_attr "type" "parallel_branch")
8173 (set (attr "length")
8174 (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
8175 (const_int 8184))
8176 (const_int 4)
8177 (const_int 8)))])
8178
8179 (define_insn ""
8180 [(set (match_operand 0 "register_operand" "=f")
8181 (mult (match_operand 1 "register_operand" "f")
8182 (match_operand 2 "register_operand" "f")))
8183 (set (match_operand 3 "register_operand" "+f")
8184 (plus (match_operand 4 "register_operand" "f")
8185 (match_operand 5 "register_operand" "f")))]
8186 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT
8187 && reload_completed && fmpyaddoperands (operands)"
8188 "*
8189 {
8190 if (GET_MODE (operands[0]) == DFmode)
8191 {
8192 if (rtx_equal_p (operands[3], operands[5]))
8193 return \"fmpyadd,dbl %1,%2,%0,%4,%3\";
8194 else
8195 return \"fmpyadd,dbl %1,%2,%0,%5,%3\";
8196 }
8197 else
8198 {
8199 if (rtx_equal_p (operands[3], operands[5]))
8200 return \"fmpyadd,sgl %1,%2,%0,%4,%3\";
8201 else
8202 return \"fmpyadd,sgl %1,%2,%0,%5,%3\";
8203 }
8204 }"
8205 [(set_attr "type" "fpalu")
8206 (set_attr "length" "4")])
8207
8208 (define_insn ""
8209 [(set (match_operand 3 "register_operand" "+f")
8210 (plus (match_operand 4 "register_operand" "f")
8211 (match_operand 5 "register_operand" "f")))
8212 (set (match_operand 0 "register_operand" "=f")
8213 (mult (match_operand 1 "register_operand" "f")
8214 (match_operand 2 "register_operand" "f")))]
8215 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT
8216 && reload_completed && fmpyaddoperands (operands)"
8217 "*
8218 {
8219 if (GET_MODE (operands[0]) == DFmode)
8220 {
8221 if (rtx_equal_p (operands[3], operands[5]))
8222 return \"fmpyadd,dbl %1,%2,%0,%4,%3\";
8223 else
8224 return \"fmpyadd,dbl %1,%2,%0,%5,%3\";
8225 }
8226 else
8227 {
8228 if (rtx_equal_p (operands[3], operands[5]))
8229 return \"fmpyadd,sgl %1,%2,%0,%4,%3\";
8230 else
8231 return \"fmpyadd,sgl %1,%2,%0,%5,%3\";
8232 }
8233 }"
8234 [(set_attr "type" "fpalu")
8235 (set_attr "length" "4")])
8236
8237 (define_insn ""
8238 [(set (match_operand 0 "register_operand" "=f")
8239 (mult (match_operand 1 "register_operand" "f")
8240 (match_operand 2 "register_operand" "f")))
8241 (set (match_operand 3 "register_operand" "+f")
8242 (minus (match_operand 4 "register_operand" "f")
8243 (match_operand 5 "register_operand" "f")))]
8244 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT
8245 && reload_completed && fmpysuboperands (operands)"
8246 "*
8247 {
8248 if (GET_MODE (operands[0]) == DFmode)
8249 return \"fmpysub,dbl %1,%2,%0,%5,%3\";
8250 else
8251 return \"fmpysub,sgl %1,%2,%0,%5,%3\";
8252 }"
8253 [(set_attr "type" "fpalu")
8254 (set_attr "length" "4")])
8255
8256 (define_insn ""
8257 [(set (match_operand 3 "register_operand" "+f")
8258 (minus (match_operand 4 "register_operand" "f")
8259 (match_operand 5 "register_operand" "f")))
8260 (set (match_operand 0 "register_operand" "=f")
8261 (mult (match_operand 1 "register_operand" "f")
8262 (match_operand 2 "register_operand" "f")))]
8263 "TARGET_PA_11 && ! TARGET_SOFT_FLOAT
8264 && reload_completed && fmpysuboperands (operands)"
8265 "*
8266 {
8267 if (GET_MODE (operands[0]) == DFmode)
8268 return \"fmpysub,dbl %1,%2,%0,%5,%3\";
8269 else
8270 return \"fmpysub,sgl %1,%2,%0,%5,%3\";
8271 }"
8272 [(set_attr "type" "fpalu")
8273 (set_attr "length" "4")])
8274
8275 ;; Clean up turds left by reload.
8276 (define_peephole
8277 [(set (match_operand 0 "reg_or_nonsymb_mem_operand" "")
8278 (match_operand 1 "register_operand" "fr"))
8279 (set (match_operand 2 "register_operand" "fr")
8280 (match_dup 0))]
8281 "! TARGET_SOFT_FLOAT
8282 && GET_CODE (operands[0]) == MEM
8283 && ! MEM_VOLATILE_P (operands[0])
8284 && GET_MODE (operands[0]) == GET_MODE (operands[1])
8285 && GET_MODE (operands[0]) == GET_MODE (operands[2])
8286 && GET_MODE (operands[0]) == DFmode
8287 && GET_CODE (operands[1]) == REG
8288 && GET_CODE (operands[2]) == REG
8289 && ! side_effects_p (XEXP (operands[0], 0))
8290 && REGNO_REG_CLASS (REGNO (operands[1]))
8291 == REGNO_REG_CLASS (REGNO (operands[2]))"
8292 "*
8293 {
8294 rtx xoperands[2];
8295
8296 if (FP_REG_P (operands[1]))
8297 output_asm_insn (output_fp_move_double (operands), operands);
8298 else
8299 output_asm_insn (output_move_double (operands), operands);
8300
8301 if (rtx_equal_p (operands[1], operands[2]))
8302 return \"\";
8303
8304 xoperands[0] = operands[2];
8305 xoperands[1] = operands[1];
8306
8307 if (FP_REG_P (xoperands[1]))
8308 output_asm_insn (output_fp_move_double (xoperands), xoperands);
8309 else
8310 output_asm_insn (output_move_double (xoperands), xoperands);
8311
8312 return \"\";
8313 }")
8314
8315 (define_peephole
8316 [(set (match_operand 0 "register_operand" "fr")
8317 (match_operand 1 "reg_or_nonsymb_mem_operand" ""))
8318 (set (match_operand 2 "register_operand" "fr")
8319 (match_dup 1))]
8320 "! TARGET_SOFT_FLOAT
8321 && GET_CODE (operands[1]) == MEM
8322 && ! MEM_VOLATILE_P (operands[1])
8323 && GET_MODE (operands[0]) == GET_MODE (operands[1])
8324 && GET_MODE (operands[0]) == GET_MODE (operands[2])
8325 && GET_MODE (operands[0]) == DFmode
8326 && GET_CODE (operands[0]) == REG
8327 && GET_CODE (operands[2]) == REG
8328 && ! side_effects_p (XEXP (operands[1], 0))
8329 && REGNO_REG_CLASS (REGNO (operands[0]))
8330 == REGNO_REG_CLASS (REGNO (operands[2]))"
8331 "*
8332 {
8333 rtx xoperands[2];
8334
8335 if (FP_REG_P (operands[0]))
8336 output_asm_insn (output_fp_move_double (operands), operands);
8337 else
8338 output_asm_insn (output_move_double (operands), operands);
8339
8340 xoperands[0] = operands[2];
8341 xoperands[1] = operands[0];
8342
8343 if (FP_REG_P (xoperands[1]))
8344 output_asm_insn (output_fp_move_double (xoperands), xoperands);
8345 else
8346 output_asm_insn (output_move_double (xoperands), xoperands);
8347
8348 return \"\";
8349 }")
8350
8351 ;; Flush the I and D cache line found at the address in operand 0.
8352 ;; This is used by the trampoline code for nested functions.
8353 ;; So long as the trampoline itself is less than 32 bytes this
8354 ;; is sufficient.
8355
8356 (define_insn "dcacheflush"
8357 [(unspec_volatile [(const_int 1)] 0)
8358 (use (mem:SI (match_operand 0 "pmode_register_operand" "r")))
8359 (use (mem:SI (match_operand 1 "pmode_register_operand" "r")))]
8360 ""
8361 "fdc 0(%0)\;fdc 0(%1)\;sync"
8362 [(set_attr "type" "multi")
8363 (set_attr "length" "12")])
8364
8365 (define_insn "icacheflush"
8366 [(unspec_volatile [(const_int 2)] 0)
8367 (use (mem:SI (match_operand 0 "pmode_register_operand" "r")))
8368 (use (mem:SI (match_operand 1 "pmode_register_operand" "r")))
8369 (use (match_operand 2 "pmode_register_operand" "r"))
8370 (clobber (match_operand 3 "pmode_register_operand" "=&r"))
8371 (clobber (match_operand 4 "pmode_register_operand" "=&r"))]
8372 ""
8373 "mfsp %%sr0,%4\;ldsid (%2),%3\;mtsp %3,%%sr0\;fic 0(%%sr0,%0)\;fic 0(%%sr0,%1)\;sync\;mtsp %4,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop"
8374 [(set_attr "type" "multi")
8375 (set_attr "length" "52")])
8376
8377 ;; An out-of-line prologue.
8378 (define_insn "outline_prologue_call"
8379 [(unspec_volatile [(const_int 0)] 0)
8380 (clobber (reg:SI 31))
8381 (clobber (reg:SI 22))
8382 (clobber (reg:SI 21))
8383 (clobber (reg:SI 20))
8384 (clobber (reg:SI 19))
8385 (clobber (reg:SI 1))]
8386 ""
8387 "*
8388 {
8389 extern int frame_pointer_needed;
8390
8391 /* We need two different versions depending on whether or not we
8392 need a frame pointer. Also note that we return to the instruction
8393 immediately after the branch rather than two instructions after the
8394 break as normally is the case. */
8395 if (frame_pointer_needed)
8396 {
8397 /* Must import the magic millicode routine(s). */
8398 output_asm_insn (\".IMPORT __outline_prologue_fp,MILLICODE\", NULL);
8399
8400 if (TARGET_PORTABLE_RUNTIME)
8401 {
8402 output_asm_insn (\"ldil L'__outline_prologue_fp,%%r31\", NULL);
8403 output_asm_insn (\"ble,n R'__outline_prologue_fp(%%sr0,%%r31)\",
8404 NULL);
8405 }
8406 else
8407 output_asm_insn (\"{bl|b,l},n __outline_prologue_fp,%%r31\", NULL);
8408 }
8409 else
8410 {
8411 /* Must import the magic millicode routine(s). */
8412 output_asm_insn (\".IMPORT __outline_prologue,MILLICODE\", NULL);
8413
8414 if (TARGET_PORTABLE_RUNTIME)
8415 {
8416 output_asm_insn (\"ldil L'__outline_prologue,%%r31\", NULL);
8417 output_asm_insn (\"ble,n R'__outline_prologue(%%sr0,%%r31)\", NULL);
8418 }
8419 else
8420 output_asm_insn (\"{bl|b,l},n __outline_prologue,%%r31\", NULL);
8421 }
8422 return \"\";
8423 }"
8424 [(set_attr "type" "multi")
8425 (set_attr "length" "8")])
8426
8427 ;; An out-of-line epilogue.
8428 (define_insn "outline_epilogue_call"
8429 [(unspec_volatile [(const_int 1)] 0)
8430 (use (reg:SI 29))
8431 (use (reg:SI 28))
8432 (clobber (reg:SI 31))
8433 (clobber (reg:SI 22))
8434 (clobber (reg:SI 21))
8435 (clobber (reg:SI 20))
8436 (clobber (reg:SI 19))
8437 (clobber (reg:SI 2))
8438 (clobber (reg:SI 1))]
8439 ""
8440 "*
8441 {
8442 extern int frame_pointer_needed;
8443
8444 /* We need two different versions depending on whether or not we
8445 need a frame pointer. Also note that we return to the instruction
8446 immediately after the branch rather than two instructions after the
8447 break as normally is the case. */
8448 if (frame_pointer_needed)
8449 {
8450 /* Must import the magic millicode routine. */
8451 output_asm_insn (\".IMPORT __outline_epilogue_fp,MILLICODE\", NULL);
8452
8453 /* The out-of-line prologue will make sure we return to the right
8454 instruction. */
8455 if (TARGET_PORTABLE_RUNTIME)
8456 {
8457 output_asm_insn (\"ldil L'__outline_epilogue_fp,%%r31\", NULL);
8458 output_asm_insn (\"ble,n R'__outline_epilogue_fp(%%sr0,%%r31)\",
8459 NULL);
8460 }
8461 else
8462 output_asm_insn (\"{bl|b,l},n __outline_epilogue_fp,%%r31\", NULL);
8463 }
8464 else
8465 {
8466 /* Must import the magic millicode routine. */
8467 output_asm_insn (\".IMPORT __outline_epilogue,MILLICODE\", NULL);
8468
8469 /* The out-of-line prologue will make sure we return to the right
8470 instruction. */
8471 if (TARGET_PORTABLE_RUNTIME)
8472 {
8473 output_asm_insn (\"ldil L'__outline_epilogue,%%r31\", NULL);
8474 output_asm_insn (\"ble,n R'__outline_epilogue(%%sr0,%%r31)\", NULL);
8475 }
8476 else
8477 output_asm_insn (\"{bl|b,l},n __outline_epilogue,%%r31\", NULL);
8478 }
8479 return \"\";
8480 }"
8481 [(set_attr "type" "multi")
8482 (set_attr "length" "8")])
8483
8484 ;; Given a function pointer, canonicalize it so it can be
8485 ;; reliably compared to another function pointer. */
8486 (define_expand "canonicalize_funcptr_for_compare"
8487 [(set (reg:SI 26) (match_operand:SI 1 "register_operand" ""))
8488 (parallel [(set (reg:SI 29) (unspec:SI [(reg:SI 26)] 0))
8489 (clobber (match_dup 2))
8490 (clobber (reg:SI 26))
8491 (clobber (reg:SI 22))
8492 (clobber (reg:SI 31))])
8493 (set (match_operand:SI 0 "register_operand" "")
8494 (reg:SI 29))]
8495 "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
8496 "
8497 {
8498 if (TARGET_ELF32)
8499 {
8500 rtx canonicalize_funcptr_for_compare_libfunc
8501 = init_one_libfunc (CANONICALIZE_FUNCPTR_FOR_COMPARE_LIBCALL);
8502
8503 emit_library_call_value (canonicalize_funcptr_for_compare_libfunc,
8504 operands[0], LCT_NORMAL, Pmode,
8505 1, operands[1], Pmode);
8506 DONE;
8507 }
8508
8509 operands[2] = gen_reg_rtx (SImode);
8510 if (GET_CODE (operands[1]) != REG)
8511 {
8512 rtx tmp = gen_reg_rtx (Pmode);
8513 emit_move_insn (tmp, operands[1]);
8514 operands[1] = tmp;
8515 }
8516 }")
8517
8518 (define_insn ""
8519 [(set (reg:SI 29) (unspec:SI [(reg:SI 26)] 0))
8520 (clobber (match_operand:SI 0 "register_operand" "=a"))
8521 (clobber (reg:SI 26))
8522 (clobber (reg:SI 22))
8523 (clobber (reg:SI 31))]
8524 "!TARGET_64BIT"
8525 "*
8526 {
8527 int length = get_attr_length (insn);
8528 rtx xoperands[2];
8529
8530 xoperands[0] = GEN_INT (length - 8);
8531 xoperands[1] = GEN_INT (length - 16);
8532
8533 /* Must import the magic millicode routine. */
8534 output_asm_insn (\".IMPORT $$sh_func_adrs,MILLICODE\", NULL);
8535
8536 /* This is absolutely amazing.
8537
8538 First, copy our input parameter into %r29 just in case we don't
8539 need to call $$sh_func_adrs. */
8540 output_asm_insn (\"copy %%r26,%%r29\", NULL);
8541 output_asm_insn (\"{extru|extrw,u} %%r26,31,2,%%r31\", NULL);
8542
8543 /* Next, examine the low two bits in %r26, if they aren't 0x2, then
8544 we use %r26 unchanged. */
8545 output_asm_insn (\"{comib|cmpib},<>,n 2,%%r31,.+%0\", xoperands);
8546 output_asm_insn (\"ldi 4096,%%r31\", NULL);
8547
8548 /* Next, compare %r26 with 4096, if %r26 is less than or equal to
8549 4096, then again we use %r26 unchanged. */
8550 output_asm_insn (\"{comb|cmpb},<<,n %%r26,%%r31,.+%1\", xoperands);
8551
8552 /* Finally, call $$sh_func_adrs to extract the function's real add24. */
8553 return output_millicode_call (insn,
8554 gen_rtx_SYMBOL_REF (SImode,
8555 \"$$sh_func_adrs\"));
8556 }"
8557 [(set_attr "type" "multi")
8558 (set (attr "length")
8559 (plus (symbol_ref "attr_length_millicode_call (insn)")
8560 (const_int 20)))])
8561
8562 ;; On the PA, the PIC register is call clobbered, so it must
8563 ;; be saved & restored around calls by the caller. If the call
8564 ;; doesn't return normally (nonlocal goto, or an exception is
8565 ;; thrown), then the code at the exception handler label must
8566 ;; restore the PIC register.
8567 (define_expand "exception_receiver"
8568 [(const_int 4)]
8569 "flag_pic"
8570 "
8571 {
8572 /* On the 64-bit port, we need a blockage because there is
8573 confusion regarding the dependence of the restore on the
8574 frame pointer. As a result, the frame pointer and pic
8575 register restores sometimes are interchanged erroneously. */
8576 if (TARGET_64BIT)
8577 emit_insn (gen_blockage ());
8578 /* Restore the PIC register using hppa_pic_save_rtx (). The
8579 PIC register is not saved in the frame in 64-bit ABI. */
8580 emit_move_insn (pic_offset_table_rtx, hppa_pic_save_rtx ());
8581 emit_insn (gen_blockage ());
8582 DONE;
8583 }")
8584
8585 (define_expand "builtin_setjmp_receiver"
8586 [(label_ref (match_operand 0 "" ""))]
8587 "flag_pic"
8588 "
8589 {
8590 if (TARGET_64BIT)
8591 emit_insn (gen_blockage ());
8592 /* Restore the PIC register. Hopefully, this will always be from
8593 a stack slot. The only registers that are valid after a
8594 builtin_longjmp are the stack and frame pointers. */
8595 emit_move_insn (pic_offset_table_rtx, hppa_pic_save_rtx ());
8596 emit_insn (gen_blockage ());
8597 DONE;
8598 }")
8599
8600 ;; Allocate new stack space and update the saved stack pointer in the
8601 ;; frame marker. The HP C compilers also copy additional words in the
8602 ;; frame marker. The 64-bit compiler copies words at -48, -32 and -24.
8603 ;; The 32-bit compiler copies the word at -16 (Static Link). We
8604 ;; currently don't copy these values.
8605 ;;
8606 ;; Since the copy of the frame marker can't be done atomically, I
8607 ;; suspect that using it for unwind purposes may be somewhat unreliable.
8608 ;; The HP compilers appear to raise the stack and copy the frame
8609 ;; marker in a strict instruction sequence. This suggests that the
8610 ;; unwind library may check for an alloca sequence when ALLOCA_FRAME
8611 ;; is set in the callinfo data. We currently don't set ALLOCA_FRAME
8612 ;; as GAS doesn't support it, or try to keep the instructions emitted
8613 ;; here in strict sequence.
8614 (define_expand "allocate_stack"
8615 [(match_operand 0 "" "")
8616 (match_operand 1 "" "")]
8617 ""
8618 "
8619 {
8620 rtx addr;
8621
8622 /* Since the stack grows upward, we need to store virtual_stack_dynamic_rtx
8623 in operand 0 before adjusting the stack. */
8624 emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
8625 anti_adjust_stack (operands[1]);
8626 if (TARGET_HPUX_UNWIND_LIBRARY)
8627 {
8628 addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
8629 GEN_INT (TARGET_64BIT ? -8 : -4));
8630 emit_move_insn (gen_rtx_MEM (word_mode, addr), frame_pointer_rtx);
8631 }
8632 if (!TARGET_64BIT && flag_pic)
8633 {
8634 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
8635 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
8636 }
8637 DONE;
8638 }")